target-arm/neon_helper: Remove obsolete FIXME comment
[qemu/pbrook.git] / target-arm / translate.c
blobdaccb15c23af42b911021b13b790d803f7fa58b0
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "disas.h"
29 #include "tcg-op.h"
30 #include "qemu-log.h"
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 conditional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 int bswap_code;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
79 /* These instructions trap after executing, so defer them until after the
80 conditional execution state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
89 static TCGv_i32 cpu_exclusive_addr;
90 static TCGv_i32 cpu_exclusive_val;
91 static TCGv_i32 cpu_exclusive_high;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv_i32 cpu_exclusive_test;
94 static TCGv_i32 cpu_exclusive_info;
95 #endif
97 /* FIXME: These should be removed. */
98 static TCGv cpu_F0s, cpu_F1s;
99 static TCGv_i64 cpu_F0d, cpu_F1d;
101 #include "gen-icount.h"
103 static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
107 /* initialize TCG globals. */
108 void arm_translate_init(void)
110 int i;
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUARMState, regs[i]),
117 regnames[i]);
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
130 #ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
135 #endif
137 #define GEN_HELPER 2
138 #include "helper.h"
141 static inline TCGv load_cpu_offset(int offset)
143 TCGv tmp = tcg_temp_new_i32();
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
148 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
150 static inline void store_cpu_offset(TCGv var, int offset)
152 tcg_gen_st_i32(var, cpu_env, offset);
153 tcg_temp_free_i32(var);
156 #define store_cpu_field(var, name) \
157 store_cpu_offset(var, offsetof(CPUARMState, name))
159 /* Set a variable to the value of a CPU register. */
160 static void load_reg_var(DisasContext *s, TCGv var, int reg)
162 if (reg == 15) {
163 uint32_t addr;
164 /* normally, since we updated PC, we need only to add one insn */
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
171 tcg_gen_mov_i32(var, cpu_R[reg]);
175 /* Create a new temporary and set it to the value of a CPU register. */
176 static inline TCGv load_reg(DisasContext *s, int reg)
178 TCGv tmp = tcg_temp_new_i32();
179 load_reg_var(s, tmp, reg);
180 return tmp;
183 /* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185 static void store_reg(DisasContext *s, int reg, TCGv var)
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
191 tcg_gen_mov_i32(cpu_R[reg], var);
192 tcg_temp_free_i32(var);
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
205 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207 TCGv tmp_mask = tcg_const_i32(mask);
208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
209 tcg_temp_free_i32(tmp_mask);
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214 static void gen_exception(int excp)
216 TCGv tmp = tcg_temp_new_i32();
217 tcg_gen_movi_i32(tmp, excp);
218 gen_helper_exception(cpu_env, tmp);
219 tcg_temp_free_i32(tmp);
222 static void gen_smul_dual(TCGv a, TCGv b)
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
229 tcg_temp_free_i32(tmp2);
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
234 tcg_temp_free_i32(tmp1);
237 /* Byteswap each halfword. */
238 static void gen_rev16(TCGv var)
240 TCGv tmp = tcg_temp_new_i32();
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
246 tcg_temp_free_i32(tmp);
249 /* Byteswap low halfword and sign extend. */
250 static void gen_revsh(TCGv var)
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
257 /* Unsigned bitfield extract. */
258 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
265 /* Signed bitfield extract. */
266 static void gen_sbfx(TCGv var, int shift, int width)
268 uint32_t signbit;
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
280 /* Return (b << 32) + a. Mark inputs as dead */
281 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
285 tcg_gen_extu_i32_i64(tmp64, b);
286 tcg_temp_free_i32(b);
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
290 tcg_temp_free_i64(tmp64);
291 return a;
294 /* Return (b << 32) - a. Mark inputs as dead. */
295 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
299 tcg_gen_extu_i32_i64(tmp64, b);
300 tcg_temp_free_i32(b);
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
304 tcg_temp_free_i64(tmp64);
305 return a;
308 /* FIXME: Most targets have native widening multiplication.
309 It would be good to use that instead of a full wide multiply. */
310 /* 32x32->64 multiply. Marks inputs as dead. */
311 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
313 TCGv_i64 tmp1 = tcg_temp_new_i64();
314 TCGv_i64 tmp2 = tcg_temp_new_i64();
316 tcg_gen_extu_i32_i64(tmp1, a);
317 tcg_temp_free_i32(a);
318 tcg_gen_extu_i32_i64(tmp2, b);
319 tcg_temp_free_i32(b);
320 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
321 tcg_temp_free_i64(tmp2);
322 return tmp1;
325 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
327 TCGv_i64 tmp1 = tcg_temp_new_i64();
328 TCGv_i64 tmp2 = tcg_temp_new_i64();
330 tcg_gen_ext_i32_i64(tmp1, a);
331 tcg_temp_free_i32(a);
332 tcg_gen_ext_i32_i64(tmp2, b);
333 tcg_temp_free_i32(b);
334 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
335 tcg_temp_free_i64(tmp2);
336 return tmp1;
339 /* Swap low and high halfwords. */
340 static void gen_swap_half(TCGv var)
342 TCGv tmp = tcg_temp_new_i32();
343 tcg_gen_shri_i32(tmp, var, 16);
344 tcg_gen_shli_i32(var, var, 16);
345 tcg_gen_or_i32(var, var, tmp);
346 tcg_temp_free_i32(tmp);
349 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
350 tmp = (t0 ^ t1) & 0x8000;
351 t0 &= ~0x8000;
352 t1 &= ~0x8000;
353 t0 = (t0 + t1) ^ tmp;
356 static void gen_add16(TCGv t0, TCGv t1)
358 TCGv tmp = tcg_temp_new_i32();
359 tcg_gen_xor_i32(tmp, t0, t1);
360 tcg_gen_andi_i32(tmp, tmp, 0x8000);
361 tcg_gen_andi_i32(t0, t0, ~0x8000);
362 tcg_gen_andi_i32(t1, t1, ~0x8000);
363 tcg_gen_add_i32(t0, t0, t1);
364 tcg_gen_xor_i32(t0, t0, tmp);
365 tcg_temp_free_i32(tmp);
366 tcg_temp_free_i32(t1);
369 /* Set CF to the top bit of var. */
370 static void gen_set_CF_bit31(TCGv var)
372 tcg_gen_shri_i32(cpu_CF, var, 31);
375 /* Set N and Z flags from var. */
376 static inline void gen_logic_CC(TCGv var)
378 tcg_gen_mov_i32(cpu_NF, var);
379 tcg_gen_mov_i32(cpu_ZF, var);
382 /* T0 += T1 + CF. */
383 static void gen_adc(TCGv t0, TCGv t1)
385 tcg_gen_add_i32(t0, t0, t1);
386 tcg_gen_add_i32(t0, t0, cpu_CF);
389 /* dest = T0 + T1 + CF. */
390 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
392 tcg_gen_add_i32(dest, t0, t1);
393 tcg_gen_add_i32(dest, dest, cpu_CF);
396 /* dest = T0 - T1 + CF - 1. */
397 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
399 tcg_gen_sub_i32(dest, t0, t1);
400 tcg_gen_add_i32(dest, dest, cpu_CF);
401 tcg_gen_subi_i32(dest, dest, 1);
404 /* dest = T0 + T1. Compute C, N, V and Z flags */
405 static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
407 TCGv tmp;
408 tcg_gen_add_i32(cpu_NF, t0, t1);
409 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
410 tcg_gen_setcond_i32(TCG_COND_LTU, cpu_CF, cpu_NF, t0);
411 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
412 tmp = tcg_temp_new_i32();
413 tcg_gen_xor_i32(tmp, t0, t1);
414 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
415 tcg_temp_free_i32(tmp);
416 tcg_gen_mov_i32(dest, cpu_NF);
419 /* dest = T0 - T1. Compute C, N, V and Z flags */
420 static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
422 TCGv tmp;
423 tcg_gen_sub_i32(cpu_NF, t0, t1);
424 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
425 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
426 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
427 tmp = tcg_temp_new_i32();
428 tcg_gen_xor_i32(tmp, t0, t1);
429 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
430 tcg_temp_free_i32(tmp);
431 tcg_gen_mov_i32(dest, cpu_NF);
434 #define GEN_SHIFT(name) \
435 static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
437 TCGv tmp1, tmp2, tmp3; \
438 tmp1 = tcg_temp_new_i32(); \
439 tcg_gen_andi_i32(tmp1, t1, 0xff); \
440 tmp2 = tcg_const_i32(0); \
441 tmp3 = tcg_const_i32(0x1f); \
442 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
443 tcg_temp_free_i32(tmp3); \
444 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
445 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
446 tcg_temp_free_i32(tmp2); \
447 tcg_temp_free_i32(tmp1); \
449 GEN_SHIFT(shl)
450 GEN_SHIFT(shr)
451 #undef GEN_SHIFT
453 static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
455 TCGv tmp1, tmp2;
456 tmp1 = tcg_temp_new_i32();
457 tcg_gen_andi_i32(tmp1, t1, 0xff);
458 tmp2 = tcg_const_i32(0x1f);
459 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
460 tcg_temp_free_i32(tmp2);
461 tcg_gen_sar_i32(dest, t0, tmp1);
462 tcg_temp_free_i32(tmp1);
465 /* FIXME: Implement this natively. */
466 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
468 static void shifter_out_im(TCGv var, int shift)
470 if (shift == 0) {
471 tcg_gen_andi_i32(cpu_CF, var, 1);
472 } else {
473 tcg_gen_shri_i32(cpu_CF, var, shift);
474 if (shift != 31) {
475 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
480 /* Shift by immediate. Includes special handling for shift == 0. */
481 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
483 switch (shiftop) {
484 case 0: /* LSL */
485 if (shift != 0) {
486 if (flags)
487 shifter_out_im(var, 32 - shift);
488 tcg_gen_shli_i32(var, var, shift);
490 break;
491 case 1: /* LSR */
492 if (shift == 0) {
493 if (flags) {
494 tcg_gen_shri_i32(cpu_CF, var, 31);
496 tcg_gen_movi_i32(var, 0);
497 } else {
498 if (flags)
499 shifter_out_im(var, shift - 1);
500 tcg_gen_shri_i32(var, var, shift);
502 break;
503 case 2: /* ASR */
504 if (shift == 0)
505 shift = 32;
506 if (flags)
507 shifter_out_im(var, shift - 1);
508 if (shift == 32)
509 shift = 31;
510 tcg_gen_sari_i32(var, var, shift);
511 break;
512 case 3: /* ROR/RRX */
513 if (shift != 0) {
514 if (flags)
515 shifter_out_im(var, shift - 1);
516 tcg_gen_rotri_i32(var, var, shift); break;
517 } else {
518 TCGv tmp = tcg_temp_new_i32();
519 tcg_gen_shli_i32(tmp, cpu_CF, 31);
520 if (flags)
521 shifter_out_im(var, 0);
522 tcg_gen_shri_i32(var, var, 1);
523 tcg_gen_or_i32(var, var, tmp);
524 tcg_temp_free_i32(tmp);
529 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
530 TCGv shift, int flags)
532 if (flags) {
533 switch (shiftop) {
534 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
535 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
536 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
537 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
539 } else {
540 switch (shiftop) {
541 case 0:
542 gen_shl(var, var, shift);
543 break;
544 case 1:
545 gen_shr(var, var, shift);
546 break;
547 case 2:
548 gen_sar(var, var, shift);
549 break;
550 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
551 tcg_gen_rotr_i32(var, var, shift); break;
554 tcg_temp_free_i32(shift);
557 #define PAS_OP(pfx) \
558 switch (op2) { \
559 case 0: gen_pas_helper(glue(pfx,add16)); break; \
560 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
562 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 4: gen_pas_helper(glue(pfx,add8)); break; \
564 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
566 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
568 TCGv_ptr tmp;
570 switch (op1) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 1:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 5:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 2:
587 PAS_OP(q);
588 break;
589 case 3:
590 PAS_OP(sh);
591 break;
592 case 6:
593 PAS_OP(uq);
594 break;
595 case 7:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
601 #undef PAS_OP
603 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
604 #define PAS_OP(pfx) \
605 switch (op1) { \
606 case 0: gen_pas_helper(glue(pfx,add8)); break; \
607 case 1: gen_pas_helper(glue(pfx,add16)); break; \
608 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
609 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
610 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
611 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
613 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
615 TCGv_ptr tmp;
617 switch (op2) {
618 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
619 case 0:
620 tmp = tcg_temp_new_ptr();
621 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
622 PAS_OP(s)
623 tcg_temp_free_ptr(tmp);
624 break;
625 case 4:
626 tmp = tcg_temp_new_ptr();
627 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
628 PAS_OP(u)
629 tcg_temp_free_ptr(tmp);
630 break;
631 #undef gen_pas_helper
632 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
633 case 1:
634 PAS_OP(q);
635 break;
636 case 2:
637 PAS_OP(sh);
638 break;
639 case 5:
640 PAS_OP(uq);
641 break;
642 case 6:
643 PAS_OP(uh);
644 break;
645 #undef gen_pas_helper
648 #undef PAS_OP
650 static void gen_test_cc(int cc, int label)
652 TCGv tmp;
653 int inv;
655 switch (cc) {
656 case 0: /* eq: Z */
657 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
658 break;
659 case 1: /* ne: !Z */
660 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
661 break;
662 case 2: /* cs: C */
663 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
664 break;
665 case 3: /* cc: !C */
666 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
667 break;
668 case 4: /* mi: N */
669 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
670 break;
671 case 5: /* pl: !N */
672 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
673 break;
674 case 6: /* vs: V */
675 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
676 break;
677 case 7: /* vc: !V */
678 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
679 break;
680 case 8: /* hi: C && !Z */
681 inv = gen_new_label();
682 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
683 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
684 gen_set_label(inv);
685 break;
686 case 9: /* ls: !C || Z */
687 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
688 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
689 break;
690 case 10: /* ge: N == V -> N ^ V == 0 */
691 tmp = tcg_temp_new_i32();
692 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 tcg_temp_free_i32(tmp);
695 break;
696 case 11: /* lt: N != V -> N ^ V != 0 */
697 tmp = tcg_temp_new_i32();
698 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
699 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
700 tcg_temp_free_i32(tmp);
701 break;
702 case 12: /* gt: !Z && N == V */
703 inv = gen_new_label();
704 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
705 tmp = tcg_temp_new_i32();
706 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
707 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
708 tcg_temp_free_i32(tmp);
709 gen_set_label(inv);
710 break;
711 case 13: /* le: Z || N != V */
712 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
713 tmp = tcg_temp_new_i32();
714 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
715 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
716 tcg_temp_free_i32(tmp);
717 break;
718 default:
719 fprintf(stderr, "Bad condition code 0x%x\n", cc);
720 abort();
724 static const uint8_t table_logic_cc[16] = {
725 1, /* and */
726 1, /* xor */
727 0, /* sub */
728 0, /* rsb */
729 0, /* add */
730 0, /* adc */
731 0, /* sbc */
732 0, /* rsc */
733 1, /* andl */
734 1, /* xorl */
735 0, /* cmp */
736 0, /* cmn */
737 1, /* orr */
738 1, /* mov */
739 1, /* bic */
740 1, /* mvn */
743 /* Set PC and Thumb state from an immediate address. */
744 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
746 TCGv tmp;
748 s->is_jmp = DISAS_UPDATE;
749 if (s->thumb != (addr & 1)) {
750 tmp = tcg_temp_new_i32();
751 tcg_gen_movi_i32(tmp, addr & 1);
752 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
753 tcg_temp_free_i32(tmp);
755 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
758 /* Set PC and Thumb state from var. var is marked as dead. */
759 static inline void gen_bx(DisasContext *s, TCGv var)
761 s->is_jmp = DISAS_UPDATE;
762 tcg_gen_andi_i32(cpu_R[15], var, ~1);
763 tcg_gen_andi_i32(var, var, 1);
764 store_cpu_field(var, thumb);
767 /* Variant of store_reg which uses branch&exchange logic when storing
768 to r15 in ARM architecture v7 and above. The source must be a temporary
769 and will be marked as dead. */
770 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
771 int reg, TCGv var)
773 if (reg == 15 && ENABLE_ARCH_7) {
774 gen_bx(s, var);
775 } else {
776 store_reg(s, reg, var);
780 /* Variant of store_reg which uses branch&exchange logic when storing
781 * to r15 in ARM architecture v5T and above. This is used for storing
782 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
783 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
784 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
785 int reg, TCGv var)
787 if (reg == 15 && ENABLE_ARCH_5) {
788 gen_bx(s, var);
789 } else {
790 store_reg(s, reg, var);
794 static inline TCGv gen_ld8s(TCGv addr, int index)
796 TCGv tmp = tcg_temp_new_i32();
797 tcg_gen_qemu_ld8s(tmp, addr, index);
798 return tmp;
800 static inline TCGv gen_ld8u(TCGv addr, int index)
802 TCGv tmp = tcg_temp_new_i32();
803 tcg_gen_qemu_ld8u(tmp, addr, index);
804 return tmp;
806 static inline TCGv gen_ld16s(TCGv addr, int index)
808 TCGv tmp = tcg_temp_new_i32();
809 tcg_gen_qemu_ld16s(tmp, addr, index);
810 return tmp;
812 static inline TCGv gen_ld16u(TCGv addr, int index)
814 TCGv tmp = tcg_temp_new_i32();
815 tcg_gen_qemu_ld16u(tmp, addr, index);
816 return tmp;
818 static inline TCGv gen_ld32(TCGv addr, int index)
820 TCGv tmp = tcg_temp_new_i32();
821 tcg_gen_qemu_ld32u(tmp, addr, index);
822 return tmp;
824 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
826 TCGv_i64 tmp = tcg_temp_new_i64();
827 tcg_gen_qemu_ld64(tmp, addr, index);
828 return tmp;
830 static inline void gen_st8(TCGv val, TCGv addr, int index)
832 tcg_gen_qemu_st8(val, addr, index);
833 tcg_temp_free_i32(val);
835 static inline void gen_st16(TCGv val, TCGv addr, int index)
837 tcg_gen_qemu_st16(val, addr, index);
838 tcg_temp_free_i32(val);
840 static inline void gen_st32(TCGv val, TCGv addr, int index)
842 tcg_gen_qemu_st32(val, addr, index);
843 tcg_temp_free_i32(val);
845 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
847 tcg_gen_qemu_st64(val, addr, index);
848 tcg_temp_free_i64(val);
851 static inline void gen_set_pc_im(uint32_t val)
853 tcg_gen_movi_i32(cpu_R[15], val);
856 /* Force a TB lookup after an instruction that changes the CPU state. */
857 static inline void gen_lookup_tb(DisasContext *s)
859 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
860 s->is_jmp = DISAS_UPDATE;
863 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
864 TCGv var)
866 int val, rm, shift, shiftop;
867 TCGv offset;
869 if (!(insn & (1 << 25))) {
870 /* immediate */
871 val = insn & 0xfff;
872 if (!(insn & (1 << 23)))
873 val = -val;
874 if (val != 0)
875 tcg_gen_addi_i32(var, var, val);
876 } else {
877 /* shift/register */
878 rm = (insn) & 0xf;
879 shift = (insn >> 7) & 0x1f;
880 shiftop = (insn >> 5) & 3;
881 offset = load_reg(s, rm);
882 gen_arm_shift_im(offset, shiftop, shift, 0);
883 if (!(insn & (1 << 23)))
884 tcg_gen_sub_i32(var, var, offset);
885 else
886 tcg_gen_add_i32(var, var, offset);
887 tcg_temp_free_i32(offset);
891 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
892 int extra, TCGv var)
894 int val, rm;
895 TCGv offset;
897 if (insn & (1 << 22)) {
898 /* immediate */
899 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
900 if (!(insn & (1 << 23)))
901 val = -val;
902 val += extra;
903 if (val != 0)
904 tcg_gen_addi_i32(var, var, val);
905 } else {
906 /* register */
907 if (extra)
908 tcg_gen_addi_i32(var, var, extra);
909 rm = (insn) & 0xf;
910 offset = load_reg(s, rm);
911 if (!(insn & (1 << 23)))
912 tcg_gen_sub_i32(var, var, offset);
913 else
914 tcg_gen_add_i32(var, var, offset);
915 tcg_temp_free_i32(offset);
919 static TCGv_ptr get_fpstatus_ptr(int neon)
921 TCGv_ptr statusptr = tcg_temp_new_ptr();
922 int offset;
923 if (neon) {
924 offset = offsetof(CPUARMState, vfp.standard_fp_status);
925 } else {
926 offset = offsetof(CPUARMState, vfp.fp_status);
928 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
929 return statusptr;
932 #define VFP_OP2(name) \
933 static inline void gen_vfp_##name(int dp) \
935 TCGv_ptr fpst = get_fpstatus_ptr(0); \
936 if (dp) { \
937 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
938 } else { \
939 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
941 tcg_temp_free_ptr(fpst); \
944 VFP_OP2(add)
945 VFP_OP2(sub)
946 VFP_OP2(mul)
947 VFP_OP2(div)
949 #undef VFP_OP2
951 static inline void gen_vfp_F1_mul(int dp)
953 /* Like gen_vfp_mul() but put result in F1 */
954 TCGv_ptr fpst = get_fpstatus_ptr(0);
955 if (dp) {
956 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
957 } else {
958 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
960 tcg_temp_free_ptr(fpst);
963 static inline void gen_vfp_F1_neg(int dp)
965 /* Like gen_vfp_neg() but put result in F1 */
966 if (dp) {
967 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
968 } else {
969 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
973 static inline void gen_vfp_abs(int dp)
975 if (dp)
976 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
977 else
978 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
981 static inline void gen_vfp_neg(int dp)
983 if (dp)
984 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
985 else
986 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
989 static inline void gen_vfp_sqrt(int dp)
991 if (dp)
992 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
993 else
994 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
997 static inline void gen_vfp_cmp(int dp)
999 if (dp)
1000 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1001 else
1002 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1005 static inline void gen_vfp_cmpe(int dp)
1007 if (dp)
1008 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1009 else
1010 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1013 static inline void gen_vfp_F1_ld0(int dp)
1015 if (dp)
1016 tcg_gen_movi_i64(cpu_F1d, 0);
1017 else
1018 tcg_gen_movi_i32(cpu_F1s, 0);
1021 #define VFP_GEN_ITOF(name) \
1022 static inline void gen_vfp_##name(int dp, int neon) \
1024 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1025 if (dp) { \
1026 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1027 } else { \
1028 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1030 tcg_temp_free_ptr(statusptr); \
1033 VFP_GEN_ITOF(uito)
1034 VFP_GEN_ITOF(sito)
1035 #undef VFP_GEN_ITOF
1037 #define VFP_GEN_FTOI(name) \
1038 static inline void gen_vfp_##name(int dp, int neon) \
1040 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1041 if (dp) { \
1042 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1043 } else { \
1044 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1046 tcg_temp_free_ptr(statusptr); \
1049 VFP_GEN_FTOI(toui)
1050 VFP_GEN_FTOI(touiz)
1051 VFP_GEN_FTOI(tosi)
1052 VFP_GEN_FTOI(tosiz)
1053 #undef VFP_GEN_FTOI
1055 #define VFP_GEN_FIX(name) \
1056 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1058 TCGv tmp_shift = tcg_const_i32(shift); \
1059 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1060 if (dp) { \
1061 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1062 } else { \
1063 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1065 tcg_temp_free_i32(tmp_shift); \
1066 tcg_temp_free_ptr(statusptr); \
1068 VFP_GEN_FIX(tosh)
1069 VFP_GEN_FIX(tosl)
1070 VFP_GEN_FIX(touh)
1071 VFP_GEN_FIX(toul)
1072 VFP_GEN_FIX(shto)
1073 VFP_GEN_FIX(slto)
1074 VFP_GEN_FIX(uhto)
1075 VFP_GEN_FIX(ulto)
1076 #undef VFP_GEN_FIX
1078 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1080 if (dp)
1081 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1082 else
1083 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1086 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1088 if (dp)
1089 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1090 else
1091 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1094 static inline long
1095 vfp_reg_offset (int dp, int reg)
1097 if (dp)
1098 return offsetof(CPUARMState, vfp.regs[reg]);
1099 else if (reg & 1) {
1100 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1101 + offsetof(CPU_DoubleU, l.upper);
1102 } else {
1103 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1104 + offsetof(CPU_DoubleU, l.lower);
1108 /* Return the offset of a 32-bit piece of a NEON register.
1109 zero is the least significant end of the register. */
1110 static inline long
1111 neon_reg_offset (int reg, int n)
1113 int sreg;
1114 sreg = reg * 2 + n;
1115 return vfp_reg_offset(0, sreg);
1118 static TCGv neon_load_reg(int reg, int pass)
1120 TCGv tmp = tcg_temp_new_i32();
1121 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1122 return tmp;
1125 static void neon_store_reg(int reg, int pass, TCGv var)
1127 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1128 tcg_temp_free_i32(var);
1131 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1133 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1136 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1138 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1141 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1142 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1143 #define tcg_gen_st_f32 tcg_gen_st_i32
1144 #define tcg_gen_st_f64 tcg_gen_st_i64
1146 static inline void gen_mov_F0_vreg(int dp, int reg)
1148 if (dp)
1149 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1150 else
1151 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1154 static inline void gen_mov_F1_vreg(int dp, int reg)
1156 if (dp)
1157 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1158 else
1159 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1162 static inline void gen_mov_vreg_F0(int dp, int reg)
1164 if (dp)
1165 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1166 else
1167 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1170 #define ARM_CP_RW_BIT (1 << 20)
1172 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1174 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1177 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1179 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1182 static inline TCGv iwmmxt_load_creg(int reg)
1184 TCGv var = tcg_temp_new_i32();
1185 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1186 return var;
1189 static inline void iwmmxt_store_creg(int reg, TCGv var)
1191 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1192 tcg_temp_free_i32(var);
1195 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1197 iwmmxt_store_reg(cpu_M0, rn);
1200 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1202 iwmmxt_load_reg(cpu_M0, rn);
1205 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1207 iwmmxt_load_reg(cpu_V1, rn);
1208 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1211 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1213 iwmmxt_load_reg(cpu_V1, rn);
1214 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1217 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1219 iwmmxt_load_reg(cpu_V1, rn);
1220 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1223 #define IWMMXT_OP(name) \
1224 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1226 iwmmxt_load_reg(cpu_V1, rn); \
1227 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1230 #define IWMMXT_OP_ENV(name) \
1231 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1233 iwmmxt_load_reg(cpu_V1, rn); \
1234 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1237 #define IWMMXT_OP_ENV_SIZE(name) \
1238 IWMMXT_OP_ENV(name##b) \
1239 IWMMXT_OP_ENV(name##w) \
1240 IWMMXT_OP_ENV(name##l)
1242 #define IWMMXT_OP_ENV1(name) \
1243 static inline void gen_op_iwmmxt_##name##_M0(void) \
1245 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1248 IWMMXT_OP(maddsq)
1249 IWMMXT_OP(madduq)
1250 IWMMXT_OP(sadb)
1251 IWMMXT_OP(sadw)
1252 IWMMXT_OP(mulslw)
1253 IWMMXT_OP(mulshw)
1254 IWMMXT_OP(mululw)
1255 IWMMXT_OP(muluhw)
1256 IWMMXT_OP(macsw)
1257 IWMMXT_OP(macuw)
1259 IWMMXT_OP_ENV_SIZE(unpackl)
1260 IWMMXT_OP_ENV_SIZE(unpackh)
1262 IWMMXT_OP_ENV1(unpacklub)
1263 IWMMXT_OP_ENV1(unpackluw)
1264 IWMMXT_OP_ENV1(unpacklul)
1265 IWMMXT_OP_ENV1(unpackhub)
1266 IWMMXT_OP_ENV1(unpackhuw)
1267 IWMMXT_OP_ENV1(unpackhul)
1268 IWMMXT_OP_ENV1(unpacklsb)
1269 IWMMXT_OP_ENV1(unpacklsw)
1270 IWMMXT_OP_ENV1(unpacklsl)
1271 IWMMXT_OP_ENV1(unpackhsb)
1272 IWMMXT_OP_ENV1(unpackhsw)
1273 IWMMXT_OP_ENV1(unpackhsl)
1275 IWMMXT_OP_ENV_SIZE(cmpeq)
1276 IWMMXT_OP_ENV_SIZE(cmpgtu)
1277 IWMMXT_OP_ENV_SIZE(cmpgts)
1279 IWMMXT_OP_ENV_SIZE(mins)
1280 IWMMXT_OP_ENV_SIZE(minu)
1281 IWMMXT_OP_ENV_SIZE(maxs)
1282 IWMMXT_OP_ENV_SIZE(maxu)
1284 IWMMXT_OP_ENV_SIZE(subn)
1285 IWMMXT_OP_ENV_SIZE(addn)
1286 IWMMXT_OP_ENV_SIZE(subu)
1287 IWMMXT_OP_ENV_SIZE(addu)
1288 IWMMXT_OP_ENV_SIZE(subs)
1289 IWMMXT_OP_ENV_SIZE(adds)
1291 IWMMXT_OP_ENV(avgb0)
1292 IWMMXT_OP_ENV(avgb1)
1293 IWMMXT_OP_ENV(avgw0)
1294 IWMMXT_OP_ENV(avgw1)
1296 IWMMXT_OP(msadb)
1298 IWMMXT_OP_ENV(packuw)
1299 IWMMXT_OP_ENV(packul)
1300 IWMMXT_OP_ENV(packuq)
1301 IWMMXT_OP_ENV(packsw)
1302 IWMMXT_OP_ENV(packsl)
1303 IWMMXT_OP_ENV(packsq)
1305 static void gen_op_iwmmxt_set_mup(void)
1307 TCGv tmp;
1308 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1309 tcg_gen_ori_i32(tmp, tmp, 2);
1310 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1313 static void gen_op_iwmmxt_set_cup(void)
1315 TCGv tmp;
1316 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1317 tcg_gen_ori_i32(tmp, tmp, 1);
1318 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1321 static void gen_op_iwmmxt_setpsr_nz(void)
1323 TCGv tmp = tcg_temp_new_i32();
1324 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1325 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1328 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1330 iwmmxt_load_reg(cpu_V1, rn);
1331 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1332 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1335 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1337 int rd;
1338 uint32_t offset;
1339 TCGv tmp;
1341 rd = (insn >> 16) & 0xf;
1342 tmp = load_reg(s, rd);
1344 offset = (insn & 0xff) << ((insn >> 7) & 2);
1345 if (insn & (1 << 24)) {
1346 /* Pre indexed */
1347 if (insn & (1 << 23))
1348 tcg_gen_addi_i32(tmp, tmp, offset);
1349 else
1350 tcg_gen_addi_i32(tmp, tmp, -offset);
1351 tcg_gen_mov_i32(dest, tmp);
1352 if (insn & (1 << 21))
1353 store_reg(s, rd, tmp);
1354 else
1355 tcg_temp_free_i32(tmp);
1356 } else if (insn & (1 << 21)) {
1357 /* Post indexed */
1358 tcg_gen_mov_i32(dest, tmp);
1359 if (insn & (1 << 23))
1360 tcg_gen_addi_i32(tmp, tmp, offset);
1361 else
1362 tcg_gen_addi_i32(tmp, tmp, -offset);
1363 store_reg(s, rd, tmp);
1364 } else if (!(insn & (1 << 23)))
1365 return 1;
1366 return 0;
1369 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1371 int rd = (insn >> 0) & 0xf;
1372 TCGv tmp;
1374 if (insn & (1 << 8)) {
1375 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1376 return 1;
1377 } else {
1378 tmp = iwmmxt_load_creg(rd);
1380 } else {
1381 tmp = tcg_temp_new_i32();
1382 iwmmxt_load_reg(cpu_V0, rd);
1383 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1385 tcg_gen_andi_i32(tmp, tmp, mask);
1386 tcg_gen_mov_i32(dest, tmp);
1387 tcg_temp_free_i32(tmp);
1388 return 0;
1391 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1392 (ie. an undefined instruction). */
1393 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1395 int rd, wrd;
1396 int rdhi, rdlo, rd0, rd1, i;
1397 TCGv addr;
1398 TCGv tmp, tmp2, tmp3;
1400 if ((insn & 0x0e000e00) == 0x0c000000) {
1401 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1402 wrd = insn & 0xf;
1403 rdlo = (insn >> 12) & 0xf;
1404 rdhi = (insn >> 16) & 0xf;
1405 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1406 iwmmxt_load_reg(cpu_V0, wrd);
1407 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1408 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1409 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1410 } else { /* TMCRR */
1411 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1412 iwmmxt_store_reg(cpu_V0, wrd);
1413 gen_op_iwmmxt_set_mup();
1415 return 0;
1418 wrd = (insn >> 12) & 0xf;
1419 addr = tcg_temp_new_i32();
1420 if (gen_iwmmxt_address(s, insn, addr)) {
1421 tcg_temp_free_i32(addr);
1422 return 1;
1424 if (insn & ARM_CP_RW_BIT) {
1425 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1426 tmp = tcg_temp_new_i32();
1427 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1428 iwmmxt_store_creg(wrd, tmp);
1429 } else {
1430 i = 1;
1431 if (insn & (1 << 8)) {
1432 if (insn & (1 << 22)) { /* WLDRD */
1433 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1434 i = 0;
1435 } else { /* WLDRW wRd */
1436 tmp = gen_ld32(addr, IS_USER(s));
1438 } else {
1439 if (insn & (1 << 22)) { /* WLDRH */
1440 tmp = gen_ld16u(addr, IS_USER(s));
1441 } else { /* WLDRB */
1442 tmp = gen_ld8u(addr, IS_USER(s));
1445 if (i) {
1446 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1447 tcg_temp_free_i32(tmp);
1449 gen_op_iwmmxt_movq_wRn_M0(wrd);
1451 } else {
1452 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1453 tmp = iwmmxt_load_creg(wrd);
1454 gen_st32(tmp, addr, IS_USER(s));
1455 } else {
1456 gen_op_iwmmxt_movq_M0_wRn(wrd);
1457 tmp = tcg_temp_new_i32();
1458 if (insn & (1 << 8)) {
1459 if (insn & (1 << 22)) { /* WSTRD */
1460 tcg_temp_free_i32(tmp);
1461 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1462 } else { /* WSTRW wRd */
1463 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1464 gen_st32(tmp, addr, IS_USER(s));
1466 } else {
1467 if (insn & (1 << 22)) { /* WSTRH */
1468 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1469 gen_st16(tmp, addr, IS_USER(s));
1470 } else { /* WSTRB */
1471 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1472 gen_st8(tmp, addr, IS_USER(s));
1477 tcg_temp_free_i32(addr);
1478 return 0;
1481 if ((insn & 0x0f000000) != 0x0e000000)
1482 return 1;
1484 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1485 case 0x000: /* WOR */
1486 wrd = (insn >> 12) & 0xf;
1487 rd0 = (insn >> 0) & 0xf;
1488 rd1 = (insn >> 16) & 0xf;
1489 gen_op_iwmmxt_movq_M0_wRn(rd0);
1490 gen_op_iwmmxt_orq_M0_wRn(rd1);
1491 gen_op_iwmmxt_setpsr_nz();
1492 gen_op_iwmmxt_movq_wRn_M0(wrd);
1493 gen_op_iwmmxt_set_mup();
1494 gen_op_iwmmxt_set_cup();
1495 break;
1496 case 0x011: /* TMCR */
1497 if (insn & 0xf)
1498 return 1;
1499 rd = (insn >> 12) & 0xf;
1500 wrd = (insn >> 16) & 0xf;
1501 switch (wrd) {
1502 case ARM_IWMMXT_wCID:
1503 case ARM_IWMMXT_wCASF:
1504 break;
1505 case ARM_IWMMXT_wCon:
1506 gen_op_iwmmxt_set_cup();
1507 /* Fall through. */
1508 case ARM_IWMMXT_wCSSF:
1509 tmp = iwmmxt_load_creg(wrd);
1510 tmp2 = load_reg(s, rd);
1511 tcg_gen_andc_i32(tmp, tmp, tmp2);
1512 tcg_temp_free_i32(tmp2);
1513 iwmmxt_store_creg(wrd, tmp);
1514 break;
1515 case ARM_IWMMXT_wCGR0:
1516 case ARM_IWMMXT_wCGR1:
1517 case ARM_IWMMXT_wCGR2:
1518 case ARM_IWMMXT_wCGR3:
1519 gen_op_iwmmxt_set_cup();
1520 tmp = load_reg(s, rd);
1521 iwmmxt_store_creg(wrd, tmp);
1522 break;
1523 default:
1524 return 1;
1526 break;
1527 case 0x100: /* WXOR */
1528 wrd = (insn >> 12) & 0xf;
1529 rd0 = (insn >> 0) & 0xf;
1530 rd1 = (insn >> 16) & 0xf;
1531 gen_op_iwmmxt_movq_M0_wRn(rd0);
1532 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1533 gen_op_iwmmxt_setpsr_nz();
1534 gen_op_iwmmxt_movq_wRn_M0(wrd);
1535 gen_op_iwmmxt_set_mup();
1536 gen_op_iwmmxt_set_cup();
1537 break;
1538 case 0x111: /* TMRC */
1539 if (insn & 0xf)
1540 return 1;
1541 rd = (insn >> 12) & 0xf;
1542 wrd = (insn >> 16) & 0xf;
1543 tmp = iwmmxt_load_creg(wrd);
1544 store_reg(s, rd, tmp);
1545 break;
1546 case 0x300: /* WANDN */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1552 gen_op_iwmmxt_andq_M0_wRn(rd1);
1553 gen_op_iwmmxt_setpsr_nz();
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 gen_op_iwmmxt_set_cup();
1557 break;
1558 case 0x200: /* WAND */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 0) & 0xf;
1561 rd1 = (insn >> 16) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 gen_op_iwmmxt_andq_M0_wRn(rd1);
1564 gen_op_iwmmxt_setpsr_nz();
1565 gen_op_iwmmxt_movq_wRn_M0(wrd);
1566 gen_op_iwmmxt_set_mup();
1567 gen_op_iwmmxt_set_cup();
1568 break;
1569 case 0x810: case 0xa10: /* WMADD */
1570 wrd = (insn >> 12) & 0xf;
1571 rd0 = (insn >> 0) & 0xf;
1572 rd1 = (insn >> 16) & 0xf;
1573 gen_op_iwmmxt_movq_M0_wRn(rd0);
1574 if (insn & (1 << 21))
1575 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1576 else
1577 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1578 gen_op_iwmmxt_movq_wRn_M0(wrd);
1579 gen_op_iwmmxt_set_mup();
1580 break;
1581 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1582 wrd = (insn >> 12) & 0xf;
1583 rd0 = (insn >> 16) & 0xf;
1584 rd1 = (insn >> 0) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0);
1586 switch ((insn >> 22) & 3) {
1587 case 0:
1588 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1589 break;
1590 case 1:
1591 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1592 break;
1593 case 2:
1594 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1595 break;
1596 case 3:
1597 return 1;
1599 gen_op_iwmmxt_movq_wRn_M0(wrd);
1600 gen_op_iwmmxt_set_mup();
1601 gen_op_iwmmxt_set_cup();
1602 break;
1603 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1604 wrd = (insn >> 12) & 0xf;
1605 rd0 = (insn >> 16) & 0xf;
1606 rd1 = (insn >> 0) & 0xf;
1607 gen_op_iwmmxt_movq_M0_wRn(rd0);
1608 switch ((insn >> 22) & 3) {
1609 case 0:
1610 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1611 break;
1612 case 1:
1613 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1614 break;
1615 case 2:
1616 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1617 break;
1618 case 3:
1619 return 1;
1621 gen_op_iwmmxt_movq_wRn_M0(wrd);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1624 break;
1625 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1626 wrd = (insn >> 12) & 0xf;
1627 rd0 = (insn >> 16) & 0xf;
1628 rd1 = (insn >> 0) & 0xf;
1629 gen_op_iwmmxt_movq_M0_wRn(rd0);
1630 if (insn & (1 << 22))
1631 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1632 else
1633 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1634 if (!(insn & (1 << 20)))
1635 gen_op_iwmmxt_addl_M0_wRn(wrd);
1636 gen_op_iwmmxt_movq_wRn_M0(wrd);
1637 gen_op_iwmmxt_set_mup();
1638 break;
1639 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 if (insn & (1 << 21)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 break;
1658 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 16) & 0xf;
1661 rd1 = (insn >> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 if (insn & (1 << 21))
1664 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1665 else
1666 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1667 if (!(insn & (1 << 20))) {
1668 iwmmxt_load_reg(cpu_V1, wrd);
1669 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1671 gen_op_iwmmxt_movq_wRn_M0(wrd);
1672 gen_op_iwmmxt_set_mup();
1673 break;
1674 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1675 wrd = (insn >> 12) & 0xf;
1676 rd0 = (insn >> 16) & 0xf;
1677 rd1 = (insn >> 0) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0);
1679 switch ((insn >> 22) & 3) {
1680 case 0:
1681 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1682 break;
1683 case 1:
1684 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1685 break;
1686 case 2:
1687 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1688 break;
1689 case 3:
1690 return 1;
1692 gen_op_iwmmxt_movq_wRn_M0(wrd);
1693 gen_op_iwmmxt_set_mup();
1694 gen_op_iwmmxt_set_cup();
1695 break;
1696 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1697 wrd = (insn >> 12) & 0xf;
1698 rd0 = (insn >> 16) & 0xf;
1699 rd1 = (insn >> 0) & 0xf;
1700 gen_op_iwmmxt_movq_M0_wRn(rd0);
1701 if (insn & (1 << 22)) {
1702 if (insn & (1 << 20))
1703 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1704 else
1705 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1706 } else {
1707 if (insn & (1 << 20))
1708 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1709 else
1710 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1712 gen_op_iwmmxt_movq_wRn_M0(wrd);
1713 gen_op_iwmmxt_set_mup();
1714 gen_op_iwmmxt_set_cup();
1715 break;
1716 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1717 wrd = (insn >> 12) & 0xf;
1718 rd0 = (insn >> 16) & 0xf;
1719 rd1 = (insn >> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0);
1721 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1722 tcg_gen_andi_i32(tmp, tmp, 7);
1723 iwmmxt_load_reg(cpu_V1, rd1);
1724 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1725 tcg_temp_free_i32(tmp);
1726 gen_op_iwmmxt_movq_wRn_M0(wrd);
1727 gen_op_iwmmxt_set_mup();
1728 break;
1729 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1730 if (((insn >> 6) & 3) == 3)
1731 return 1;
1732 rd = (insn >> 12) & 0xf;
1733 wrd = (insn >> 16) & 0xf;
1734 tmp = load_reg(s, rd);
1735 gen_op_iwmmxt_movq_M0_wRn(wrd);
1736 switch ((insn >> 6) & 3) {
1737 case 0:
1738 tmp2 = tcg_const_i32(0xff);
1739 tmp3 = tcg_const_i32((insn & 7) << 3);
1740 break;
1741 case 1:
1742 tmp2 = tcg_const_i32(0xffff);
1743 tmp3 = tcg_const_i32((insn & 3) << 4);
1744 break;
1745 case 2:
1746 tmp2 = tcg_const_i32(0xffffffff);
1747 tmp3 = tcg_const_i32((insn & 1) << 5);
1748 break;
1749 default:
1750 TCGV_UNUSED(tmp2);
1751 TCGV_UNUSED(tmp3);
1753 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1754 tcg_temp_free(tmp3);
1755 tcg_temp_free(tmp2);
1756 tcg_temp_free_i32(tmp);
1757 gen_op_iwmmxt_movq_wRn_M0(wrd);
1758 gen_op_iwmmxt_set_mup();
1759 break;
1760 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1761 rd = (insn >> 12) & 0xf;
1762 wrd = (insn >> 16) & 0xf;
1763 if (rd == 15 || ((insn >> 22) & 3) == 3)
1764 return 1;
1765 gen_op_iwmmxt_movq_M0_wRn(wrd);
1766 tmp = tcg_temp_new_i32();
1767 switch ((insn >> 22) & 3) {
1768 case 0:
1769 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1770 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1771 if (insn & 8) {
1772 tcg_gen_ext8s_i32(tmp, tmp);
1773 } else {
1774 tcg_gen_andi_i32(tmp, tmp, 0xff);
1776 break;
1777 case 1:
1778 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1779 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1780 if (insn & 8) {
1781 tcg_gen_ext16s_i32(tmp, tmp);
1782 } else {
1783 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1785 break;
1786 case 2:
1787 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1788 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1789 break;
1791 store_reg(s, rd, tmp);
1792 break;
1793 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1794 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1795 return 1;
1796 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1797 switch ((insn >> 22) & 3) {
1798 case 0:
1799 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1800 break;
1801 case 1:
1802 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1803 break;
1804 case 2:
1805 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1806 break;
1808 tcg_gen_shli_i32(tmp, tmp, 28);
1809 gen_set_nzcv(tmp);
1810 tcg_temp_free_i32(tmp);
1811 break;
1812 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1813 if (((insn >> 6) & 3) == 3)
1814 return 1;
1815 rd = (insn >> 12) & 0xf;
1816 wrd = (insn >> 16) & 0xf;
1817 tmp = load_reg(s, rd);
1818 switch ((insn >> 6) & 3) {
1819 case 0:
1820 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1821 break;
1822 case 1:
1823 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1824 break;
1825 case 2:
1826 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1827 break;
1829 tcg_temp_free_i32(tmp);
1830 gen_op_iwmmxt_movq_wRn_M0(wrd);
1831 gen_op_iwmmxt_set_mup();
1832 break;
1833 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1834 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1835 return 1;
1836 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1837 tmp2 = tcg_temp_new_i32();
1838 tcg_gen_mov_i32(tmp2, tmp);
1839 switch ((insn >> 22) & 3) {
1840 case 0:
1841 for (i = 0; i < 7; i ++) {
1842 tcg_gen_shli_i32(tmp2, tmp2, 4);
1843 tcg_gen_and_i32(tmp, tmp, tmp2);
1845 break;
1846 case 1:
1847 for (i = 0; i < 3; i ++) {
1848 tcg_gen_shli_i32(tmp2, tmp2, 8);
1849 tcg_gen_and_i32(tmp, tmp, tmp2);
1851 break;
1852 case 2:
1853 tcg_gen_shli_i32(tmp2, tmp2, 16);
1854 tcg_gen_and_i32(tmp, tmp, tmp2);
1855 break;
1857 gen_set_nzcv(tmp);
1858 tcg_temp_free_i32(tmp2);
1859 tcg_temp_free_i32(tmp);
1860 break;
1861 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1862 wrd = (insn >> 12) & 0xf;
1863 rd0 = (insn >> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0);
1865 switch ((insn >> 22) & 3) {
1866 case 0:
1867 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1868 break;
1869 case 1:
1870 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1871 break;
1872 case 2:
1873 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1874 break;
1875 case 3:
1876 return 1;
1878 gen_op_iwmmxt_movq_wRn_M0(wrd);
1879 gen_op_iwmmxt_set_mup();
1880 break;
1881 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1882 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1883 return 1;
1884 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1885 tmp2 = tcg_temp_new_i32();
1886 tcg_gen_mov_i32(tmp2, tmp);
1887 switch ((insn >> 22) & 3) {
1888 case 0:
1889 for (i = 0; i < 7; i ++) {
1890 tcg_gen_shli_i32(tmp2, tmp2, 4);
1891 tcg_gen_or_i32(tmp, tmp, tmp2);
1893 break;
1894 case 1:
1895 for (i = 0; i < 3; i ++) {
1896 tcg_gen_shli_i32(tmp2, tmp2, 8);
1897 tcg_gen_or_i32(tmp, tmp, tmp2);
1899 break;
1900 case 2:
1901 tcg_gen_shli_i32(tmp2, tmp2, 16);
1902 tcg_gen_or_i32(tmp, tmp, tmp2);
1903 break;
1905 gen_set_nzcv(tmp);
1906 tcg_temp_free_i32(tmp2);
1907 tcg_temp_free_i32(tmp);
1908 break;
1909 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1910 rd = (insn >> 12) & 0xf;
1911 rd0 = (insn >> 16) & 0xf;
1912 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1913 return 1;
1914 gen_op_iwmmxt_movq_M0_wRn(rd0);
1915 tmp = tcg_temp_new_i32();
1916 switch ((insn >> 22) & 3) {
1917 case 0:
1918 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1919 break;
1920 case 1:
1921 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1922 break;
1923 case 2:
1924 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1925 break;
1927 store_reg(s, rd, tmp);
1928 break;
1929 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1930 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1931 wrd = (insn >> 12) & 0xf;
1932 rd0 = (insn >> 16) & 0xf;
1933 rd1 = (insn >> 0) & 0xf;
1934 gen_op_iwmmxt_movq_M0_wRn(rd0);
1935 switch ((insn >> 22) & 3) {
1936 case 0:
1937 if (insn & (1 << 21))
1938 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1939 else
1940 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1941 break;
1942 case 1:
1943 if (insn & (1 << 21))
1944 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1945 else
1946 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1947 break;
1948 case 2:
1949 if (insn & (1 << 21))
1950 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1951 else
1952 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1953 break;
1954 case 3:
1955 return 1;
1957 gen_op_iwmmxt_movq_wRn_M0(wrd);
1958 gen_op_iwmmxt_set_mup();
1959 gen_op_iwmmxt_set_cup();
1960 break;
1961 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1962 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1963 wrd = (insn >> 12) & 0xf;
1964 rd0 = (insn >> 16) & 0xf;
1965 gen_op_iwmmxt_movq_M0_wRn(rd0);
1966 switch ((insn >> 22) & 3) {
1967 case 0:
1968 if (insn & (1 << 21))
1969 gen_op_iwmmxt_unpacklsb_M0();
1970 else
1971 gen_op_iwmmxt_unpacklub_M0();
1972 break;
1973 case 1:
1974 if (insn & (1 << 21))
1975 gen_op_iwmmxt_unpacklsw_M0();
1976 else
1977 gen_op_iwmmxt_unpackluw_M0();
1978 break;
1979 case 2:
1980 if (insn & (1 << 21))
1981 gen_op_iwmmxt_unpacklsl_M0();
1982 else
1983 gen_op_iwmmxt_unpacklul_M0();
1984 break;
1985 case 3:
1986 return 1;
1988 gen_op_iwmmxt_movq_wRn_M0(wrd);
1989 gen_op_iwmmxt_set_mup();
1990 gen_op_iwmmxt_set_cup();
1991 break;
1992 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1993 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1994 wrd = (insn >> 12) & 0xf;
1995 rd0 = (insn >> 16) & 0xf;
1996 gen_op_iwmmxt_movq_M0_wRn(rd0);
1997 switch ((insn >> 22) & 3) {
1998 case 0:
1999 if (insn & (1 << 21))
2000 gen_op_iwmmxt_unpackhsb_M0();
2001 else
2002 gen_op_iwmmxt_unpackhub_M0();
2003 break;
2004 case 1:
2005 if (insn & (1 << 21))
2006 gen_op_iwmmxt_unpackhsw_M0();
2007 else
2008 gen_op_iwmmxt_unpackhuw_M0();
2009 break;
2010 case 2:
2011 if (insn & (1 << 21))
2012 gen_op_iwmmxt_unpackhsl_M0();
2013 else
2014 gen_op_iwmmxt_unpackhul_M0();
2015 break;
2016 case 3:
2017 return 1;
2019 gen_op_iwmmxt_movq_wRn_M0(wrd);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2022 break;
2023 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2024 case 0x214: case 0x614: case 0xa14: case 0xe14:
2025 if (((insn >> 22) & 3) == 0)
2026 return 1;
2027 wrd = (insn >> 12) & 0xf;
2028 rd0 = (insn >> 16) & 0xf;
2029 gen_op_iwmmxt_movq_M0_wRn(rd0);
2030 tmp = tcg_temp_new_i32();
2031 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2032 tcg_temp_free_i32(tmp);
2033 return 1;
2035 switch ((insn >> 22) & 3) {
2036 case 1:
2037 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2038 break;
2039 case 2:
2040 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2041 break;
2042 case 3:
2043 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2044 break;
2046 tcg_temp_free_i32(tmp);
2047 gen_op_iwmmxt_movq_wRn_M0(wrd);
2048 gen_op_iwmmxt_set_mup();
2049 gen_op_iwmmxt_set_cup();
2050 break;
2051 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2052 case 0x014: case 0x414: case 0x814: case 0xc14:
2053 if (((insn >> 22) & 3) == 0)
2054 return 1;
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0);
2058 tmp = tcg_temp_new_i32();
2059 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2060 tcg_temp_free_i32(tmp);
2061 return 1;
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2066 break;
2067 case 2:
2068 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2069 break;
2070 case 3:
2071 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2072 break;
2074 tcg_temp_free_i32(tmp);
2075 gen_op_iwmmxt_movq_wRn_M0(wrd);
2076 gen_op_iwmmxt_set_mup();
2077 gen_op_iwmmxt_set_cup();
2078 break;
2079 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2080 case 0x114: case 0x514: case 0x914: case 0xd14:
2081 if (((insn >> 22) & 3) == 0)
2082 return 1;
2083 wrd = (insn >> 12) & 0xf;
2084 rd0 = (insn >> 16) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0);
2086 tmp = tcg_temp_new_i32();
2087 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2088 tcg_temp_free_i32(tmp);
2089 return 1;
2091 switch ((insn >> 22) & 3) {
2092 case 1:
2093 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2094 break;
2095 case 2:
2096 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2097 break;
2098 case 3:
2099 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2100 break;
2102 tcg_temp_free_i32(tmp);
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 gen_op_iwmmxt_set_cup();
2106 break;
2107 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2108 case 0x314: case 0x714: case 0xb14: case 0xf14:
2109 if (((insn >> 22) & 3) == 0)
2110 return 1;
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 tmp = tcg_temp_new_i32();
2115 switch ((insn >> 22) & 3) {
2116 case 1:
2117 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2118 tcg_temp_free_i32(tmp);
2119 return 1;
2121 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2122 break;
2123 case 2:
2124 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2125 tcg_temp_free_i32(tmp);
2126 return 1;
2128 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2129 break;
2130 case 3:
2131 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2132 tcg_temp_free_i32(tmp);
2133 return 1;
2135 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2136 break;
2138 tcg_temp_free_i32(tmp);
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2144 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 rd1 = (insn >> 0) & 0xf;
2148 gen_op_iwmmxt_movq_M0_wRn(rd0);
2149 switch ((insn >> 22) & 3) {
2150 case 0:
2151 if (insn & (1 << 21))
2152 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2153 else
2154 gen_op_iwmmxt_minub_M0_wRn(rd1);
2155 break;
2156 case 1:
2157 if (insn & (1 << 21))
2158 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2159 else
2160 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2161 break;
2162 case 2:
2163 if (insn & (1 << 21))
2164 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2165 else
2166 gen_op_iwmmxt_minul_M0_wRn(rd1);
2167 break;
2168 case 3:
2169 return 1;
2171 gen_op_iwmmxt_movq_wRn_M0(wrd);
2172 gen_op_iwmmxt_set_mup();
2173 break;
2174 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2175 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2176 wrd = (insn >> 12) & 0xf;
2177 rd0 = (insn >> 16) & 0xf;
2178 rd1 = (insn >> 0) & 0xf;
2179 gen_op_iwmmxt_movq_M0_wRn(rd0);
2180 switch ((insn >> 22) & 3) {
2181 case 0:
2182 if (insn & (1 << 21))
2183 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2184 else
2185 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2186 break;
2187 case 1:
2188 if (insn & (1 << 21))
2189 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2190 else
2191 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2192 break;
2193 case 2:
2194 if (insn & (1 << 21))
2195 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2196 else
2197 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2198 break;
2199 case 3:
2200 return 1;
2202 gen_op_iwmmxt_movq_wRn_M0(wrd);
2203 gen_op_iwmmxt_set_mup();
2204 break;
2205 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2206 case 0x402: case 0x502: case 0x602: case 0x702:
2207 wrd = (insn >> 12) & 0xf;
2208 rd0 = (insn >> 16) & 0xf;
2209 rd1 = (insn >> 0) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 tmp = tcg_const_i32((insn >> 20) & 3);
2212 iwmmxt_load_reg(cpu_V1, rd1);
2213 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2214 tcg_temp_free(tmp);
2215 gen_op_iwmmxt_movq_wRn_M0(wrd);
2216 gen_op_iwmmxt_set_mup();
2217 break;
2218 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2219 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2220 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2221 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_subub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_subul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2262 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2263 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2264 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2265 wrd = (insn >> 12) & 0xf;
2266 rd0 = (insn >> 16) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
2268 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2269 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2270 tcg_temp_free(tmp);
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2274 break;
2275 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2276 case 0x418: case 0x518: case 0x618: case 0x718:
2277 case 0x818: case 0x918: case 0xa18: case 0xb18:
2278 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2279 wrd = (insn >> 12) & 0xf;
2280 rd0 = (insn >> 16) & 0xf;
2281 rd1 = (insn >> 0) & 0xf;
2282 gen_op_iwmmxt_movq_M0_wRn(rd0);
2283 switch ((insn >> 20) & 0xf) {
2284 case 0x0:
2285 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2286 break;
2287 case 0x1:
2288 gen_op_iwmmxt_addub_M0_wRn(rd1);
2289 break;
2290 case 0x3:
2291 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2292 break;
2293 case 0x4:
2294 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2295 break;
2296 case 0x5:
2297 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2298 break;
2299 case 0x7:
2300 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2301 break;
2302 case 0x8:
2303 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2304 break;
2305 case 0x9:
2306 gen_op_iwmmxt_addul_M0_wRn(rd1);
2307 break;
2308 case 0xb:
2309 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2310 break;
2311 default:
2312 return 1;
2314 gen_op_iwmmxt_movq_wRn_M0(wrd);
2315 gen_op_iwmmxt_set_mup();
2316 gen_op_iwmmxt_set_cup();
2317 break;
2318 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2319 case 0x408: case 0x508: case 0x608: case 0x708:
2320 case 0x808: case 0x908: case 0xa08: case 0xb08:
2321 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2322 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2323 return 1;
2324 wrd = (insn >> 12) & 0xf;
2325 rd0 = (insn >> 16) & 0xf;
2326 rd1 = (insn >> 0) & 0xf;
2327 gen_op_iwmmxt_movq_M0_wRn(rd0);
2328 switch ((insn >> 22) & 3) {
2329 case 1:
2330 if (insn & (1 << 21))
2331 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2332 else
2333 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2334 break;
2335 case 2:
2336 if (insn & (1 << 21))
2337 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2338 else
2339 gen_op_iwmmxt_packul_M0_wRn(rd1);
2340 break;
2341 case 3:
2342 if (insn & (1 << 21))
2343 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2344 else
2345 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2346 break;
2348 gen_op_iwmmxt_movq_wRn_M0(wrd);
2349 gen_op_iwmmxt_set_mup();
2350 gen_op_iwmmxt_set_cup();
2351 break;
2352 case 0x201: case 0x203: case 0x205: case 0x207:
2353 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2354 case 0x211: case 0x213: case 0x215: case 0x217:
2355 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2356 wrd = (insn >> 5) & 0xf;
2357 rd0 = (insn >> 12) & 0xf;
2358 rd1 = (insn >> 0) & 0xf;
2359 if (rd0 == 0xf || rd1 == 0xf)
2360 return 1;
2361 gen_op_iwmmxt_movq_M0_wRn(wrd);
2362 tmp = load_reg(s, rd0);
2363 tmp2 = load_reg(s, rd1);
2364 switch ((insn >> 16) & 0xf) {
2365 case 0x0: /* TMIA */
2366 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0x8: /* TMIAPH */
2369 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2370 break;
2371 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 tcg_temp_free_i32(tmp2);
2380 tcg_temp_free_i32(tmp);
2381 return 1;
2383 tcg_temp_free_i32(tmp2);
2384 tcg_temp_free_i32(tmp);
2385 gen_op_iwmmxt_movq_wRn_M0(wrd);
2386 gen_op_iwmmxt_set_mup();
2387 break;
2388 default:
2389 return 1;
2392 return 0;
2395 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2396 (ie. an undefined instruction). */
2397 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2399 int acc, rd0, rd1, rdhi, rdlo;
2400 TCGv tmp, tmp2;
2402 if ((insn & 0x0ff00f10) == 0x0e200010) {
2403 /* Multiply with Internal Accumulate Format */
2404 rd0 = (insn >> 12) & 0xf;
2405 rd1 = insn & 0xf;
2406 acc = (insn >> 5) & 7;
2408 if (acc != 0)
2409 return 1;
2411 tmp = load_reg(s, rd0);
2412 tmp2 = load_reg(s, rd1);
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* MIA */
2415 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2416 break;
2417 case 0x8: /* MIAPH */
2418 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2419 break;
2420 case 0xc: /* MIABB */
2421 case 0xd: /* MIABT */
2422 case 0xe: /* MIATB */
2423 case 0xf: /* MIATT */
2424 if (insn & (1 << 16))
2425 tcg_gen_shri_i32(tmp, tmp, 16);
2426 if (insn & (1 << 17))
2427 tcg_gen_shri_i32(tmp2, tmp2, 16);
2428 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2429 break;
2430 default:
2431 return 1;
2433 tcg_temp_free_i32(tmp2);
2434 tcg_temp_free_i32(tmp);
2436 gen_op_iwmmxt_movq_wRn_M0(acc);
2437 return 0;
2440 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2441 /* Internal Accumulator Access Format */
2442 rdhi = (insn >> 16) & 0xf;
2443 rdlo = (insn >> 12) & 0xf;
2444 acc = insn & 7;
2446 if (acc != 0)
2447 return 1;
2449 if (insn & ARM_CP_RW_BIT) { /* MRA */
2450 iwmmxt_load_reg(cpu_V0, acc);
2451 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2452 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2453 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2454 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2455 } else { /* MAR */
2456 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2457 iwmmxt_store_reg(cpu_V0, acc);
2459 return 0;
2462 return 1;
2465 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2466 #define VFP_SREG(insn, bigbit, smallbit) \
2467 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2468 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2469 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2470 reg = (((insn) >> (bigbit)) & 0x0f) \
2471 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2472 } else { \
2473 if (insn & (1 << (smallbit))) \
2474 return 1; \
2475 reg = ((insn) >> (bigbit)) & 0x0f; \
2476 }} while (0)
2478 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2479 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2480 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2481 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2482 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2483 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2485 /* Move between integer and VFP cores. */
2486 static TCGv gen_vfp_mrs(void)
2488 TCGv tmp = tcg_temp_new_i32();
2489 tcg_gen_mov_i32(tmp, cpu_F0s);
2490 return tmp;
2493 static void gen_vfp_msr(TCGv tmp)
2495 tcg_gen_mov_i32(cpu_F0s, tmp);
2496 tcg_temp_free_i32(tmp);
2499 static void gen_neon_dup_u8(TCGv var, int shift)
2501 TCGv tmp = tcg_temp_new_i32();
2502 if (shift)
2503 tcg_gen_shri_i32(var, var, shift);
2504 tcg_gen_ext8u_i32(var, var);
2505 tcg_gen_shli_i32(tmp, var, 8);
2506 tcg_gen_or_i32(var, var, tmp);
2507 tcg_gen_shli_i32(tmp, var, 16);
2508 tcg_gen_or_i32(var, var, tmp);
2509 tcg_temp_free_i32(tmp);
2512 static void gen_neon_dup_low16(TCGv var)
2514 TCGv tmp = tcg_temp_new_i32();
2515 tcg_gen_ext16u_i32(var, var);
2516 tcg_gen_shli_i32(tmp, var, 16);
2517 tcg_gen_or_i32(var, var, tmp);
2518 tcg_temp_free_i32(tmp);
2521 static void gen_neon_dup_high16(TCGv var)
2523 TCGv tmp = tcg_temp_new_i32();
2524 tcg_gen_andi_i32(var, var, 0xffff0000);
2525 tcg_gen_shri_i32(tmp, var, 16);
2526 tcg_gen_or_i32(var, var, tmp);
2527 tcg_temp_free_i32(tmp);
2530 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2532 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2533 TCGv tmp;
2534 switch (size) {
2535 case 0:
2536 tmp = gen_ld8u(addr, IS_USER(s));
2537 gen_neon_dup_u8(tmp, 0);
2538 break;
2539 case 1:
2540 tmp = gen_ld16u(addr, IS_USER(s));
2541 gen_neon_dup_low16(tmp);
2542 break;
2543 case 2:
2544 tmp = gen_ld32(addr, IS_USER(s));
2545 break;
2546 default: /* Avoid compiler warnings. */
2547 abort();
2549 return tmp;
2552 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2553 (ie. an undefined instruction). */
2554 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2556 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2557 int dp, veclen;
2558 TCGv addr;
2559 TCGv tmp;
2560 TCGv tmp2;
2562 if (!arm_feature(env, ARM_FEATURE_VFP))
2563 return 1;
2565 if (!s->vfp_enabled) {
2566 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2567 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2568 return 1;
2569 rn = (insn >> 16) & 0xf;
2570 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2571 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2572 return 1;
2574 dp = ((insn & 0xf00) == 0xb00);
2575 switch ((insn >> 24) & 0xf) {
2576 case 0xe:
2577 if (insn & (1 << 4)) {
2578 /* single register transfer */
2579 rd = (insn >> 12) & 0xf;
2580 if (dp) {
2581 int size;
2582 int pass;
2584 VFP_DREG_N(rn, insn);
2585 if (insn & 0xf)
2586 return 1;
2587 if (insn & 0x00c00060
2588 && !arm_feature(env, ARM_FEATURE_NEON))
2589 return 1;
2591 pass = (insn >> 21) & 1;
2592 if (insn & (1 << 22)) {
2593 size = 0;
2594 offset = ((insn >> 5) & 3) * 8;
2595 } else if (insn & (1 << 5)) {
2596 size = 1;
2597 offset = (insn & (1 << 6)) ? 16 : 0;
2598 } else {
2599 size = 2;
2600 offset = 0;
2602 if (insn & ARM_CP_RW_BIT) {
2603 /* vfp->arm */
2604 tmp = neon_load_reg(rn, pass);
2605 switch (size) {
2606 case 0:
2607 if (offset)
2608 tcg_gen_shri_i32(tmp, tmp, offset);
2609 if (insn & (1 << 23))
2610 gen_uxtb(tmp);
2611 else
2612 gen_sxtb(tmp);
2613 break;
2614 case 1:
2615 if (insn & (1 << 23)) {
2616 if (offset) {
2617 tcg_gen_shri_i32(tmp, tmp, 16);
2618 } else {
2619 gen_uxth(tmp);
2621 } else {
2622 if (offset) {
2623 tcg_gen_sari_i32(tmp, tmp, 16);
2624 } else {
2625 gen_sxth(tmp);
2628 break;
2629 case 2:
2630 break;
2632 store_reg(s, rd, tmp);
2633 } else {
2634 /* arm->vfp */
2635 tmp = load_reg(s, rd);
2636 if (insn & (1 << 23)) {
2637 /* VDUP */
2638 if (size == 0) {
2639 gen_neon_dup_u8(tmp, 0);
2640 } else if (size == 1) {
2641 gen_neon_dup_low16(tmp);
2643 for (n = 0; n <= pass * 2; n++) {
2644 tmp2 = tcg_temp_new_i32();
2645 tcg_gen_mov_i32(tmp2, tmp);
2646 neon_store_reg(rn, n, tmp2);
2648 neon_store_reg(rn, n, tmp);
2649 } else {
2650 /* VMOV */
2651 switch (size) {
2652 case 0:
2653 tmp2 = neon_load_reg(rn, pass);
2654 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
2655 tcg_temp_free_i32(tmp2);
2656 break;
2657 case 1:
2658 tmp2 = neon_load_reg(rn, pass);
2659 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
2660 tcg_temp_free_i32(tmp2);
2661 break;
2662 case 2:
2663 break;
2665 neon_store_reg(rn, pass, tmp);
2668 } else { /* !dp */
2669 if ((insn & 0x6f) != 0x00)
2670 return 1;
2671 rn = VFP_SREG_N(insn);
2672 if (insn & ARM_CP_RW_BIT) {
2673 /* vfp->arm */
2674 if (insn & (1 << 21)) {
2675 /* system register */
2676 rn >>= 1;
2678 switch (rn) {
2679 case ARM_VFP_FPSID:
2680 /* VFP2 allows access to FSID from userspace.
2681 VFP3 restricts all id registers to privileged
2682 accesses. */
2683 if (IS_USER(s)
2684 && arm_feature(env, ARM_FEATURE_VFP3))
2685 return 1;
2686 tmp = load_cpu_field(vfp.xregs[rn]);
2687 break;
2688 case ARM_VFP_FPEXC:
2689 if (IS_USER(s))
2690 return 1;
2691 tmp = load_cpu_field(vfp.xregs[rn]);
2692 break;
2693 case ARM_VFP_FPINST:
2694 case ARM_VFP_FPINST2:
2695 /* Not present in VFP3. */
2696 if (IS_USER(s)
2697 || arm_feature(env, ARM_FEATURE_VFP3))
2698 return 1;
2699 tmp = load_cpu_field(vfp.xregs[rn]);
2700 break;
2701 case ARM_VFP_FPSCR:
2702 if (rd == 15) {
2703 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2704 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2705 } else {
2706 tmp = tcg_temp_new_i32();
2707 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2709 break;
2710 case ARM_VFP_MVFR0:
2711 case ARM_VFP_MVFR1:
2712 if (IS_USER(s)
2713 || !arm_feature(env, ARM_FEATURE_MVFR))
2714 return 1;
2715 tmp = load_cpu_field(vfp.xregs[rn]);
2716 break;
2717 default:
2718 return 1;
2720 } else {
2721 gen_mov_F0_vreg(0, rn);
2722 tmp = gen_vfp_mrs();
2724 if (rd == 15) {
2725 /* Set the 4 flag bits in the CPSR. */
2726 gen_set_nzcv(tmp);
2727 tcg_temp_free_i32(tmp);
2728 } else {
2729 store_reg(s, rd, tmp);
2731 } else {
2732 /* arm->vfp */
2733 tmp = load_reg(s, rd);
2734 if (insn & (1 << 21)) {
2735 rn >>= 1;
2736 /* system register */
2737 switch (rn) {
2738 case ARM_VFP_FPSID:
2739 case ARM_VFP_MVFR0:
2740 case ARM_VFP_MVFR1:
2741 /* Writes are ignored. */
2742 break;
2743 case ARM_VFP_FPSCR:
2744 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2745 tcg_temp_free_i32(tmp);
2746 gen_lookup_tb(s);
2747 break;
2748 case ARM_VFP_FPEXC:
2749 if (IS_USER(s))
2750 return 1;
2751 /* TODO: VFP subarchitecture support.
2752 * For now, keep the EN bit only */
2753 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2754 store_cpu_field(tmp, vfp.xregs[rn]);
2755 gen_lookup_tb(s);
2756 break;
2757 case ARM_VFP_FPINST:
2758 case ARM_VFP_FPINST2:
2759 store_cpu_field(tmp, vfp.xregs[rn]);
2760 break;
2761 default:
2762 return 1;
2764 } else {
2765 gen_vfp_msr(tmp);
2766 gen_mov_vreg_F0(0, rn);
2770 } else {
2771 /* data processing */
2772 /* The opcode is in bits 23, 21, 20 and 6. */
2773 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2774 if (dp) {
2775 if (op == 15) {
2776 /* rn is opcode */
2777 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2778 } else {
2779 /* rn is register number */
2780 VFP_DREG_N(rn, insn);
2783 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2784 /* Integer or single precision destination. */
2785 rd = VFP_SREG_D(insn);
2786 } else {
2787 VFP_DREG_D(rd, insn);
2789 if (op == 15 &&
2790 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2791 /* VCVT from int is always from S reg regardless of dp bit.
2792 * VCVT with immediate frac_bits has same format as SREG_M
2794 rm = VFP_SREG_M(insn);
2795 } else {
2796 VFP_DREG_M(rm, insn);
2798 } else {
2799 rn = VFP_SREG_N(insn);
2800 if (op == 15 && rn == 15) {
2801 /* Double precision destination. */
2802 VFP_DREG_D(rd, insn);
2803 } else {
2804 rd = VFP_SREG_D(insn);
2806 /* NB that we implicitly rely on the encoding for the frac_bits
2807 * in VCVT of fixed to float being the same as that of an SREG_M
2809 rm = VFP_SREG_M(insn);
2812 veclen = s->vec_len;
2813 if (op == 15 && rn > 3)
2814 veclen = 0;
2816 /* Shut up compiler warnings. */
2817 delta_m = 0;
2818 delta_d = 0;
2819 bank_mask = 0;
2821 if (veclen > 0) {
2822 if (dp)
2823 bank_mask = 0xc;
2824 else
2825 bank_mask = 0x18;
2827 /* Figure out what type of vector operation this is. */
2828 if ((rd & bank_mask) == 0) {
2829 /* scalar */
2830 veclen = 0;
2831 } else {
2832 if (dp)
2833 delta_d = (s->vec_stride >> 1) + 1;
2834 else
2835 delta_d = s->vec_stride + 1;
2837 if ((rm & bank_mask) == 0) {
2838 /* mixed scalar/vector */
2839 delta_m = 0;
2840 } else {
2841 /* vector */
2842 delta_m = delta_d;
2847 /* Load the initial operands. */
2848 if (op == 15) {
2849 switch (rn) {
2850 case 16:
2851 case 17:
2852 /* Integer source */
2853 gen_mov_F0_vreg(0, rm);
2854 break;
2855 case 8:
2856 case 9:
2857 /* Compare */
2858 gen_mov_F0_vreg(dp, rd);
2859 gen_mov_F1_vreg(dp, rm);
2860 break;
2861 case 10:
2862 case 11:
2863 /* Compare with zero */
2864 gen_mov_F0_vreg(dp, rd);
2865 gen_vfp_F1_ld0(dp);
2866 break;
2867 case 20:
2868 case 21:
2869 case 22:
2870 case 23:
2871 case 28:
2872 case 29:
2873 case 30:
2874 case 31:
2875 /* Source and destination the same. */
2876 gen_mov_F0_vreg(dp, rd);
2877 break;
2878 case 4:
2879 case 5:
2880 case 6:
2881 case 7:
2882 /* VCVTB, VCVTT: only present with the halfprec extension,
2883 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2885 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2886 return 1;
2888 /* Otherwise fall through */
2889 default:
2890 /* One source operand. */
2891 gen_mov_F0_vreg(dp, rm);
2892 break;
2894 } else {
2895 /* Two source operands. */
2896 gen_mov_F0_vreg(dp, rn);
2897 gen_mov_F1_vreg(dp, rm);
2900 for (;;) {
2901 /* Perform the calculation. */
2902 switch (op) {
2903 case 0: /* VMLA: fd + (fn * fm) */
2904 /* Note that order of inputs to the add matters for NaNs */
2905 gen_vfp_F1_mul(dp);
2906 gen_mov_F0_vreg(dp, rd);
2907 gen_vfp_add(dp);
2908 break;
2909 case 1: /* VMLS: fd + -(fn * fm) */
2910 gen_vfp_mul(dp);
2911 gen_vfp_F1_neg(dp);
2912 gen_mov_F0_vreg(dp, rd);
2913 gen_vfp_add(dp);
2914 break;
2915 case 2: /* VNMLS: -fd + (fn * fm) */
2916 /* Note that it isn't valid to replace (-A + B) with (B - A)
2917 * or similar plausible looking simplifications
2918 * because this will give wrong results for NaNs.
2920 gen_vfp_F1_mul(dp);
2921 gen_mov_F0_vreg(dp, rd);
2922 gen_vfp_neg(dp);
2923 gen_vfp_add(dp);
2924 break;
2925 case 3: /* VNMLA: -fd + -(fn * fm) */
2926 gen_vfp_mul(dp);
2927 gen_vfp_F1_neg(dp);
2928 gen_mov_F0_vreg(dp, rd);
2929 gen_vfp_neg(dp);
2930 gen_vfp_add(dp);
2931 break;
2932 case 4: /* mul: fn * fm */
2933 gen_vfp_mul(dp);
2934 break;
2935 case 5: /* nmul: -(fn * fm) */
2936 gen_vfp_mul(dp);
2937 gen_vfp_neg(dp);
2938 break;
2939 case 6: /* add: fn + fm */
2940 gen_vfp_add(dp);
2941 break;
2942 case 7: /* sub: fn - fm */
2943 gen_vfp_sub(dp);
2944 break;
2945 case 8: /* div: fn / fm */
2946 gen_vfp_div(dp);
2947 break;
2948 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2949 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2950 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2951 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2952 /* These are fused multiply-add, and must be done as one
2953 * floating point operation with no rounding between the
2954 * multiplication and addition steps.
2955 * NB that doing the negations here as separate steps is
2956 * correct : an input NaN should come out with its sign bit
2957 * flipped if it is a negated-input.
2959 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2960 return 1;
2962 if (dp) {
2963 TCGv_ptr fpst;
2964 TCGv_i64 frd;
2965 if (op & 1) {
2966 /* VFNMS, VFMS */
2967 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2969 frd = tcg_temp_new_i64();
2970 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2971 if (op & 2) {
2972 /* VFNMA, VFNMS */
2973 gen_helper_vfp_negd(frd, frd);
2975 fpst = get_fpstatus_ptr(0);
2976 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2977 cpu_F1d, frd, fpst);
2978 tcg_temp_free_ptr(fpst);
2979 tcg_temp_free_i64(frd);
2980 } else {
2981 TCGv_ptr fpst;
2982 TCGv_i32 frd;
2983 if (op & 1) {
2984 /* VFNMS, VFMS */
2985 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2987 frd = tcg_temp_new_i32();
2988 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2989 if (op & 2) {
2990 gen_helper_vfp_negs(frd, frd);
2992 fpst = get_fpstatus_ptr(0);
2993 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
2994 cpu_F1s, frd, fpst);
2995 tcg_temp_free_ptr(fpst);
2996 tcg_temp_free_i32(frd);
2998 break;
2999 case 14: /* fconst */
3000 if (!arm_feature(env, ARM_FEATURE_VFP3))
3001 return 1;
3003 n = (insn << 12) & 0x80000000;
3004 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3005 if (dp) {
3006 if (i & 0x40)
3007 i |= 0x3f80;
3008 else
3009 i |= 0x4000;
3010 n |= i << 16;
3011 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3012 } else {
3013 if (i & 0x40)
3014 i |= 0x780;
3015 else
3016 i |= 0x800;
3017 n |= i << 19;
3018 tcg_gen_movi_i32(cpu_F0s, n);
3020 break;
3021 case 15: /* extension space */
3022 switch (rn) {
3023 case 0: /* cpy */
3024 /* no-op */
3025 break;
3026 case 1: /* abs */
3027 gen_vfp_abs(dp);
3028 break;
3029 case 2: /* neg */
3030 gen_vfp_neg(dp);
3031 break;
3032 case 3: /* sqrt */
3033 gen_vfp_sqrt(dp);
3034 break;
3035 case 4: /* vcvtb.f32.f16 */
3036 tmp = gen_vfp_mrs();
3037 tcg_gen_ext16u_i32(tmp, tmp);
3038 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3039 tcg_temp_free_i32(tmp);
3040 break;
3041 case 5: /* vcvtt.f32.f16 */
3042 tmp = gen_vfp_mrs();
3043 tcg_gen_shri_i32(tmp, tmp, 16);
3044 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3045 tcg_temp_free_i32(tmp);
3046 break;
3047 case 6: /* vcvtb.f16.f32 */
3048 tmp = tcg_temp_new_i32();
3049 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3050 gen_mov_F0_vreg(0, rd);
3051 tmp2 = gen_vfp_mrs();
3052 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3053 tcg_gen_or_i32(tmp, tmp, tmp2);
3054 tcg_temp_free_i32(tmp2);
3055 gen_vfp_msr(tmp);
3056 break;
3057 case 7: /* vcvtt.f16.f32 */
3058 tmp = tcg_temp_new_i32();
3059 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3060 tcg_gen_shli_i32(tmp, tmp, 16);
3061 gen_mov_F0_vreg(0, rd);
3062 tmp2 = gen_vfp_mrs();
3063 tcg_gen_ext16u_i32(tmp2, tmp2);
3064 tcg_gen_or_i32(tmp, tmp, tmp2);
3065 tcg_temp_free_i32(tmp2);
3066 gen_vfp_msr(tmp);
3067 break;
3068 case 8: /* cmp */
3069 gen_vfp_cmp(dp);
3070 break;
3071 case 9: /* cmpe */
3072 gen_vfp_cmpe(dp);
3073 break;
3074 case 10: /* cmpz */
3075 gen_vfp_cmp(dp);
3076 break;
3077 case 11: /* cmpez */
3078 gen_vfp_F1_ld0(dp);
3079 gen_vfp_cmpe(dp);
3080 break;
3081 case 15: /* single<->double conversion */
3082 if (dp)
3083 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3084 else
3085 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3086 break;
3087 case 16: /* fuito */
3088 gen_vfp_uito(dp, 0);
3089 break;
3090 case 17: /* fsito */
3091 gen_vfp_sito(dp, 0);
3092 break;
3093 case 20: /* fshto */
3094 if (!arm_feature(env, ARM_FEATURE_VFP3))
3095 return 1;
3096 gen_vfp_shto(dp, 16 - rm, 0);
3097 break;
3098 case 21: /* fslto */
3099 if (!arm_feature(env, ARM_FEATURE_VFP3))
3100 return 1;
3101 gen_vfp_slto(dp, 32 - rm, 0);
3102 break;
3103 case 22: /* fuhto */
3104 if (!arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
3106 gen_vfp_uhto(dp, 16 - rm, 0);
3107 break;
3108 case 23: /* fulto */
3109 if (!arm_feature(env, ARM_FEATURE_VFP3))
3110 return 1;
3111 gen_vfp_ulto(dp, 32 - rm, 0);
3112 break;
3113 case 24: /* ftoui */
3114 gen_vfp_toui(dp, 0);
3115 break;
3116 case 25: /* ftouiz */
3117 gen_vfp_touiz(dp, 0);
3118 break;
3119 case 26: /* ftosi */
3120 gen_vfp_tosi(dp, 0);
3121 break;
3122 case 27: /* ftosiz */
3123 gen_vfp_tosiz(dp, 0);
3124 break;
3125 case 28: /* ftosh */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
3128 gen_vfp_tosh(dp, 16 - rm, 0);
3129 break;
3130 case 29: /* ftosl */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
3133 gen_vfp_tosl(dp, 32 - rm, 0);
3134 break;
3135 case 30: /* ftouh */
3136 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 return 1;
3138 gen_vfp_touh(dp, 16 - rm, 0);
3139 break;
3140 case 31: /* ftoul */
3141 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 return 1;
3143 gen_vfp_toul(dp, 32 - rm, 0);
3144 break;
3145 default: /* undefined */
3146 return 1;
3148 break;
3149 default: /* undefined */
3150 return 1;
3153 /* Write back the result. */
3154 if (op == 15 && (rn >= 8 && rn <= 11))
3155 ; /* Comparison, do nothing. */
3156 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3157 /* VCVT double to int: always integer result. */
3158 gen_mov_vreg_F0(0, rd);
3159 else if (op == 15 && rn == 15)
3160 /* conversion */
3161 gen_mov_vreg_F0(!dp, rd);
3162 else
3163 gen_mov_vreg_F0(dp, rd);
3165 /* break out of the loop if we have finished */
3166 if (veclen == 0)
3167 break;
3169 if (op == 15 && delta_m == 0) {
3170 /* single source one-many */
3171 while (veclen--) {
3172 rd = ((rd + delta_d) & (bank_mask - 1))
3173 | (rd & bank_mask);
3174 gen_mov_vreg_F0(dp, rd);
3176 break;
3178 /* Setup the next operands. */
3179 veclen--;
3180 rd = ((rd + delta_d) & (bank_mask - 1))
3181 | (rd & bank_mask);
3183 if (op == 15) {
3184 /* One source operand. */
3185 rm = ((rm + delta_m) & (bank_mask - 1))
3186 | (rm & bank_mask);
3187 gen_mov_F0_vreg(dp, rm);
3188 } else {
3189 /* Two source operands. */
3190 rn = ((rn + delta_d) & (bank_mask - 1))
3191 | (rn & bank_mask);
3192 gen_mov_F0_vreg(dp, rn);
3193 if (delta_m) {
3194 rm = ((rm + delta_m) & (bank_mask - 1))
3195 | (rm & bank_mask);
3196 gen_mov_F1_vreg(dp, rm);
3201 break;
3202 case 0xc:
3203 case 0xd:
3204 if ((insn & 0x03e00000) == 0x00400000) {
3205 /* two-register transfer */
3206 rn = (insn >> 16) & 0xf;
3207 rd = (insn >> 12) & 0xf;
3208 if (dp) {
3209 VFP_DREG_M(rm, insn);
3210 } else {
3211 rm = VFP_SREG_M(insn);
3214 if (insn & ARM_CP_RW_BIT) {
3215 /* vfp->arm */
3216 if (dp) {
3217 gen_mov_F0_vreg(0, rm * 2);
3218 tmp = gen_vfp_mrs();
3219 store_reg(s, rd, tmp);
3220 gen_mov_F0_vreg(0, rm * 2 + 1);
3221 tmp = gen_vfp_mrs();
3222 store_reg(s, rn, tmp);
3223 } else {
3224 gen_mov_F0_vreg(0, rm);
3225 tmp = gen_vfp_mrs();
3226 store_reg(s, rd, tmp);
3227 gen_mov_F0_vreg(0, rm + 1);
3228 tmp = gen_vfp_mrs();
3229 store_reg(s, rn, tmp);
3231 } else {
3232 /* arm->vfp */
3233 if (dp) {
3234 tmp = load_reg(s, rd);
3235 gen_vfp_msr(tmp);
3236 gen_mov_vreg_F0(0, rm * 2);
3237 tmp = load_reg(s, rn);
3238 gen_vfp_msr(tmp);
3239 gen_mov_vreg_F0(0, rm * 2 + 1);
3240 } else {
3241 tmp = load_reg(s, rd);
3242 gen_vfp_msr(tmp);
3243 gen_mov_vreg_F0(0, rm);
3244 tmp = load_reg(s, rn);
3245 gen_vfp_msr(tmp);
3246 gen_mov_vreg_F0(0, rm + 1);
3249 } else {
3250 /* Load/store */
3251 rn = (insn >> 16) & 0xf;
3252 if (dp)
3253 VFP_DREG_D(rd, insn);
3254 else
3255 rd = VFP_SREG_D(insn);
3256 if ((insn & 0x01200000) == 0x01000000) {
3257 /* Single load/store */
3258 offset = (insn & 0xff) << 2;
3259 if ((insn & (1 << 23)) == 0)
3260 offset = -offset;
3261 if (s->thumb && rn == 15) {
3262 /* This is actually UNPREDICTABLE */
3263 addr = tcg_temp_new_i32();
3264 tcg_gen_movi_i32(addr, s->pc & ~2);
3265 } else {
3266 addr = load_reg(s, rn);
3268 tcg_gen_addi_i32(addr, addr, offset);
3269 if (insn & (1 << 20)) {
3270 gen_vfp_ld(s, dp, addr);
3271 gen_mov_vreg_F0(dp, rd);
3272 } else {
3273 gen_mov_F0_vreg(dp, rd);
3274 gen_vfp_st(s, dp, addr);
3276 tcg_temp_free_i32(addr);
3277 } else {
3278 /* load/store multiple */
3279 int w = insn & (1 << 21);
3280 if (dp)
3281 n = (insn >> 1) & 0x7f;
3282 else
3283 n = insn & 0xff;
3285 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3286 /* P == U , W == 1 => UNDEF */
3287 return 1;
3289 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3290 /* UNPREDICTABLE cases for bad immediates: we choose to
3291 * UNDEF to avoid generating huge numbers of TCG ops
3293 return 1;
3295 if (rn == 15 && w) {
3296 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3297 return 1;
3300 if (s->thumb && rn == 15) {
3301 /* This is actually UNPREDICTABLE */
3302 addr = tcg_temp_new_i32();
3303 tcg_gen_movi_i32(addr, s->pc & ~2);
3304 } else {
3305 addr = load_reg(s, rn);
3307 if (insn & (1 << 24)) /* pre-decrement */
3308 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3310 if (dp)
3311 offset = 8;
3312 else
3313 offset = 4;
3314 for (i = 0; i < n; i++) {
3315 if (insn & ARM_CP_RW_BIT) {
3316 /* load */
3317 gen_vfp_ld(s, dp, addr);
3318 gen_mov_vreg_F0(dp, rd + i);
3319 } else {
3320 /* store */
3321 gen_mov_F0_vreg(dp, rd + i);
3322 gen_vfp_st(s, dp, addr);
3324 tcg_gen_addi_i32(addr, addr, offset);
3326 if (w) {
3327 /* writeback */
3328 if (insn & (1 << 24))
3329 offset = -offset * n;
3330 else if (dp && (insn & 1))
3331 offset = 4;
3332 else
3333 offset = 0;
3335 if (offset != 0)
3336 tcg_gen_addi_i32(addr, addr, offset);
3337 store_reg(s, rn, addr);
3338 } else {
3339 tcg_temp_free_i32(addr);
3343 break;
3344 default:
3345 /* Should never happen. */
3346 return 1;
3348 return 0;
3351 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3353 TranslationBlock *tb;
3355 tb = s->tb;
3356 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3357 tcg_gen_goto_tb(n);
3358 gen_set_pc_im(dest);
3359 tcg_gen_exit_tb((tcg_target_long)tb + n);
3360 } else {
3361 gen_set_pc_im(dest);
3362 tcg_gen_exit_tb(0);
3366 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3368 if (unlikely(s->singlestep_enabled)) {
3369 /* An indirect jump so that we still trigger the debug exception. */
3370 if (s->thumb)
3371 dest |= 1;
3372 gen_bx_im(s, dest);
3373 } else {
3374 gen_goto_tb(s, 0, dest);
3375 s->is_jmp = DISAS_TB_JUMP;
3379 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3381 if (x)
3382 tcg_gen_sari_i32(t0, t0, 16);
3383 else
3384 gen_sxth(t0);
3385 if (y)
3386 tcg_gen_sari_i32(t1, t1, 16);
3387 else
3388 gen_sxth(t1);
3389 tcg_gen_mul_i32(t0, t0, t1);
3392 /* Return the mask of PSR bits set by a MSR instruction. */
3393 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3394 uint32_t mask;
3396 mask = 0;
3397 if (flags & (1 << 0))
3398 mask |= 0xff;
3399 if (flags & (1 << 1))
3400 mask |= 0xff00;
3401 if (flags & (1 << 2))
3402 mask |= 0xff0000;
3403 if (flags & (1 << 3))
3404 mask |= 0xff000000;
3406 /* Mask out undefined bits. */
3407 mask &= ~CPSR_RESERVED;
3408 if (!arm_feature(env, ARM_FEATURE_V4T))
3409 mask &= ~CPSR_T;
3410 if (!arm_feature(env, ARM_FEATURE_V5))
3411 mask &= ~CPSR_Q; /* V5TE in reality*/
3412 if (!arm_feature(env, ARM_FEATURE_V6))
3413 mask &= ~(CPSR_E | CPSR_GE);
3414 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3415 mask &= ~CPSR_IT;
3416 /* Mask out execution state bits. */
3417 if (!spsr)
3418 mask &= ~CPSR_EXEC;
3419 /* Mask out privileged bits. */
3420 if (IS_USER(s))
3421 mask &= CPSR_USER;
3422 return mask;
3425 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3426 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3428 TCGv tmp;
3429 if (spsr) {
3430 /* ??? This is also undefined in system mode. */
3431 if (IS_USER(s))
3432 return 1;
3434 tmp = load_cpu_field(spsr);
3435 tcg_gen_andi_i32(tmp, tmp, ~mask);
3436 tcg_gen_andi_i32(t0, t0, mask);
3437 tcg_gen_or_i32(tmp, tmp, t0);
3438 store_cpu_field(tmp, spsr);
3439 } else {
3440 gen_set_cpsr(t0, mask);
3442 tcg_temp_free_i32(t0);
3443 gen_lookup_tb(s);
3444 return 0;
3447 /* Returns nonzero if access to the PSR is not permitted. */
3448 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3450 TCGv tmp;
3451 tmp = tcg_temp_new_i32();
3452 tcg_gen_movi_i32(tmp, val);
3453 return gen_set_psr(s, mask, spsr, tmp);
3456 /* Generate an old-style exception return. Marks pc as dead. */
3457 static void gen_exception_return(DisasContext *s, TCGv pc)
3459 TCGv tmp;
3460 store_reg(s, 15, pc);
3461 tmp = load_cpu_field(spsr);
3462 gen_set_cpsr(tmp, 0xffffffff);
3463 tcg_temp_free_i32(tmp);
3464 s->is_jmp = DISAS_UPDATE;
3467 /* Generate a v6 exception return. Marks both values as dead. */
3468 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3470 gen_set_cpsr(cpsr, 0xffffffff);
3471 tcg_temp_free_i32(cpsr);
3472 store_reg(s, 15, pc);
3473 s->is_jmp = DISAS_UPDATE;
3476 static inline void
3477 gen_set_condexec (DisasContext *s)
3479 if (s->condexec_mask) {
3480 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3481 TCGv tmp = tcg_temp_new_i32();
3482 tcg_gen_movi_i32(tmp, val);
3483 store_cpu_field(tmp, condexec_bits);
3487 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3489 gen_set_condexec(s);
3490 gen_set_pc_im(s->pc - offset);
3491 gen_exception(excp);
3492 s->is_jmp = DISAS_JUMP;
3495 static void gen_nop_hint(DisasContext *s, int val)
3497 switch (val) {
3498 case 3: /* wfi */
3499 gen_set_pc_im(s->pc);
3500 s->is_jmp = DISAS_WFI;
3501 break;
3502 case 2: /* wfe */
3503 case 4: /* sev */
3504 /* TODO: Implement SEV and WFE. May help SMP performance. */
3505 default: /* nop */
3506 break;
3510 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3512 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3514 switch (size) {
3515 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3516 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3517 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3518 default: abort();
3522 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3524 switch (size) {
3525 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3526 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3527 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3528 default: return;
3532 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3533 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3534 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3535 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3536 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3538 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3539 switch ((size << 1) | u) { \
3540 case 0: \
3541 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3542 break; \
3543 case 1: \
3544 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3545 break; \
3546 case 2: \
3547 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3548 break; \
3549 case 3: \
3550 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3551 break; \
3552 case 4: \
3553 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3554 break; \
3555 case 5: \
3556 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3557 break; \
3558 default: return 1; \
3559 }} while (0)
3561 #define GEN_NEON_INTEGER_OP(name) do { \
3562 switch ((size << 1) | u) { \
3563 case 0: \
3564 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3565 break; \
3566 case 1: \
3567 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3568 break; \
3569 case 2: \
3570 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3571 break; \
3572 case 3: \
3573 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3574 break; \
3575 case 4: \
3576 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3577 break; \
3578 case 5: \
3579 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3580 break; \
3581 default: return 1; \
3582 }} while (0)
3584 static TCGv neon_load_scratch(int scratch)
3586 TCGv tmp = tcg_temp_new_i32();
3587 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3588 return tmp;
3591 static void neon_store_scratch(int scratch, TCGv var)
3593 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3594 tcg_temp_free_i32(var);
3597 static inline TCGv neon_get_scalar(int size, int reg)
3599 TCGv tmp;
3600 if (size == 1) {
3601 tmp = neon_load_reg(reg & 7, reg >> 4);
3602 if (reg & 8) {
3603 gen_neon_dup_high16(tmp);
3604 } else {
3605 gen_neon_dup_low16(tmp);
3607 } else {
3608 tmp = neon_load_reg(reg & 15, reg >> 4);
3610 return tmp;
3613 static int gen_neon_unzip(int rd, int rm, int size, int q)
3615 TCGv tmp, tmp2;
3616 if (!q && size == 2) {
3617 return 1;
3619 tmp = tcg_const_i32(rd);
3620 tmp2 = tcg_const_i32(rm);
3621 if (q) {
3622 switch (size) {
3623 case 0:
3624 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3625 break;
3626 case 1:
3627 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3628 break;
3629 case 2:
3630 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3631 break;
3632 default:
3633 abort();
3635 } else {
3636 switch (size) {
3637 case 0:
3638 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3639 break;
3640 case 1:
3641 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3642 break;
3643 default:
3644 abort();
3647 tcg_temp_free_i32(tmp);
3648 tcg_temp_free_i32(tmp2);
3649 return 0;
3652 static int gen_neon_zip(int rd, int rm, int size, int q)
3654 TCGv tmp, tmp2;
3655 if (!q && size == 2) {
3656 return 1;
3658 tmp = tcg_const_i32(rd);
3659 tmp2 = tcg_const_i32(rm);
3660 if (q) {
3661 switch (size) {
3662 case 0:
3663 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3664 break;
3665 case 1:
3666 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3667 break;
3668 case 2:
3669 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3670 break;
3671 default:
3672 abort();
3674 } else {
3675 switch (size) {
3676 case 0:
3677 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3678 break;
3679 case 1:
3680 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3681 break;
3682 default:
3683 abort();
3686 tcg_temp_free_i32(tmp);
3687 tcg_temp_free_i32(tmp2);
3688 return 0;
3691 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3693 TCGv rd, tmp;
3695 rd = tcg_temp_new_i32();
3696 tmp = tcg_temp_new_i32();
3698 tcg_gen_shli_i32(rd, t0, 8);
3699 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3700 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3701 tcg_gen_or_i32(rd, rd, tmp);
3703 tcg_gen_shri_i32(t1, t1, 8);
3704 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3705 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3706 tcg_gen_or_i32(t1, t1, tmp);
3707 tcg_gen_mov_i32(t0, rd);
3709 tcg_temp_free_i32(tmp);
3710 tcg_temp_free_i32(rd);
3713 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3715 TCGv rd, tmp;
3717 rd = tcg_temp_new_i32();
3718 tmp = tcg_temp_new_i32();
3720 tcg_gen_shli_i32(rd, t0, 16);
3721 tcg_gen_andi_i32(tmp, t1, 0xffff);
3722 tcg_gen_or_i32(rd, rd, tmp);
3723 tcg_gen_shri_i32(t1, t1, 16);
3724 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3725 tcg_gen_or_i32(t1, t1, tmp);
3726 tcg_gen_mov_i32(t0, rd);
3728 tcg_temp_free_i32(tmp);
3729 tcg_temp_free_i32(rd);
3733 static struct {
3734 int nregs;
3735 int interleave;
3736 int spacing;
3737 } neon_ls_element_type[11] = {
3738 {4, 4, 1},
3739 {4, 4, 2},
3740 {4, 1, 1},
3741 {4, 2, 1},
3742 {3, 3, 1},
3743 {3, 3, 2},
3744 {3, 1, 1},
3745 {1, 1, 1},
3746 {2, 2, 1},
3747 {2, 2, 2},
3748 {2, 1, 1}
3751 /* Translate a NEON load/store element instruction. Return nonzero if the
3752 instruction is invalid. */
3753 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3755 int rd, rn, rm;
3756 int op;
3757 int nregs;
3758 int interleave;
3759 int spacing;
3760 int stride;
3761 int size;
3762 int reg;
3763 int pass;
3764 int load;
3765 int shift;
3766 int n;
3767 TCGv addr;
3768 TCGv tmp;
3769 TCGv tmp2;
3770 TCGv_i64 tmp64;
3772 if (!s->vfp_enabled)
3773 return 1;
3774 VFP_DREG_D(rd, insn);
3775 rn = (insn >> 16) & 0xf;
3776 rm = insn & 0xf;
3777 load = (insn & (1 << 21)) != 0;
3778 if ((insn & (1 << 23)) == 0) {
3779 /* Load store all elements. */
3780 op = (insn >> 8) & 0xf;
3781 size = (insn >> 6) & 3;
3782 if (op > 10)
3783 return 1;
3784 /* Catch UNDEF cases for bad values of align field */
3785 switch (op & 0xc) {
3786 case 4:
3787 if (((insn >> 5) & 1) == 1) {
3788 return 1;
3790 break;
3791 case 8:
3792 if (((insn >> 4) & 3) == 3) {
3793 return 1;
3795 break;
3796 default:
3797 break;
3799 nregs = neon_ls_element_type[op].nregs;
3800 interleave = neon_ls_element_type[op].interleave;
3801 spacing = neon_ls_element_type[op].spacing;
3802 if (size == 3 && (interleave | spacing) != 1)
3803 return 1;
3804 addr = tcg_temp_new_i32();
3805 load_reg_var(s, addr, rn);
3806 stride = (1 << size) * interleave;
3807 for (reg = 0; reg < nregs; reg++) {
3808 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3809 load_reg_var(s, addr, rn);
3810 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3811 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3812 load_reg_var(s, addr, rn);
3813 tcg_gen_addi_i32(addr, addr, 1 << size);
3815 if (size == 3) {
3816 if (load) {
3817 tmp64 = gen_ld64(addr, IS_USER(s));
3818 neon_store_reg64(tmp64, rd);
3819 tcg_temp_free_i64(tmp64);
3820 } else {
3821 tmp64 = tcg_temp_new_i64();
3822 neon_load_reg64(tmp64, rd);
3823 gen_st64(tmp64, addr, IS_USER(s));
3825 tcg_gen_addi_i32(addr, addr, stride);
3826 } else {
3827 for (pass = 0; pass < 2; pass++) {
3828 if (size == 2) {
3829 if (load) {
3830 tmp = gen_ld32(addr, IS_USER(s));
3831 neon_store_reg(rd, pass, tmp);
3832 } else {
3833 tmp = neon_load_reg(rd, pass);
3834 gen_st32(tmp, addr, IS_USER(s));
3836 tcg_gen_addi_i32(addr, addr, stride);
3837 } else if (size == 1) {
3838 if (load) {
3839 tmp = gen_ld16u(addr, IS_USER(s));
3840 tcg_gen_addi_i32(addr, addr, stride);
3841 tmp2 = gen_ld16u(addr, IS_USER(s));
3842 tcg_gen_addi_i32(addr, addr, stride);
3843 tcg_gen_shli_i32(tmp2, tmp2, 16);
3844 tcg_gen_or_i32(tmp, tmp, tmp2);
3845 tcg_temp_free_i32(tmp2);
3846 neon_store_reg(rd, pass, tmp);
3847 } else {
3848 tmp = neon_load_reg(rd, pass);
3849 tmp2 = tcg_temp_new_i32();
3850 tcg_gen_shri_i32(tmp2, tmp, 16);
3851 gen_st16(tmp, addr, IS_USER(s));
3852 tcg_gen_addi_i32(addr, addr, stride);
3853 gen_st16(tmp2, addr, IS_USER(s));
3854 tcg_gen_addi_i32(addr, addr, stride);
3856 } else /* size == 0 */ {
3857 if (load) {
3858 TCGV_UNUSED(tmp2);
3859 for (n = 0; n < 4; n++) {
3860 tmp = gen_ld8u(addr, IS_USER(s));
3861 tcg_gen_addi_i32(addr, addr, stride);
3862 if (n == 0) {
3863 tmp2 = tmp;
3864 } else {
3865 tcg_gen_shli_i32(tmp, tmp, n * 8);
3866 tcg_gen_or_i32(tmp2, tmp2, tmp);
3867 tcg_temp_free_i32(tmp);
3870 neon_store_reg(rd, pass, tmp2);
3871 } else {
3872 tmp2 = neon_load_reg(rd, pass);
3873 for (n = 0; n < 4; n++) {
3874 tmp = tcg_temp_new_i32();
3875 if (n == 0) {
3876 tcg_gen_mov_i32(tmp, tmp2);
3877 } else {
3878 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3880 gen_st8(tmp, addr, IS_USER(s));
3881 tcg_gen_addi_i32(addr, addr, stride);
3883 tcg_temp_free_i32(tmp2);
3888 rd += spacing;
3890 tcg_temp_free_i32(addr);
3891 stride = nregs * 8;
3892 } else {
3893 size = (insn >> 10) & 3;
3894 if (size == 3) {
3895 /* Load single element to all lanes. */
3896 int a = (insn >> 4) & 1;
3897 if (!load) {
3898 return 1;
3900 size = (insn >> 6) & 3;
3901 nregs = ((insn >> 8) & 3) + 1;
3903 if (size == 3) {
3904 if (nregs != 4 || a == 0) {
3905 return 1;
3907 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3908 size = 2;
3910 if (nregs == 1 && a == 1 && size == 0) {
3911 return 1;
3913 if (nregs == 3 && a == 1) {
3914 return 1;
3916 addr = tcg_temp_new_i32();
3917 load_reg_var(s, addr, rn);
3918 if (nregs == 1) {
3919 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3920 tmp = gen_load_and_replicate(s, addr, size);
3921 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3922 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3923 if (insn & (1 << 5)) {
3924 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3925 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3927 tcg_temp_free_i32(tmp);
3928 } else {
3929 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3930 stride = (insn & (1 << 5)) ? 2 : 1;
3931 for (reg = 0; reg < nregs; reg++) {
3932 tmp = gen_load_and_replicate(s, addr, size);
3933 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3934 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3935 tcg_temp_free_i32(tmp);
3936 tcg_gen_addi_i32(addr, addr, 1 << size);
3937 rd += stride;
3940 tcg_temp_free_i32(addr);
3941 stride = (1 << size) * nregs;
3942 } else {
3943 /* Single element. */
3944 int idx = (insn >> 4) & 0xf;
3945 pass = (insn >> 7) & 1;
3946 switch (size) {
3947 case 0:
3948 shift = ((insn >> 5) & 3) * 8;
3949 stride = 1;
3950 break;
3951 case 1:
3952 shift = ((insn >> 6) & 1) * 16;
3953 stride = (insn & (1 << 5)) ? 2 : 1;
3954 break;
3955 case 2:
3956 shift = 0;
3957 stride = (insn & (1 << 6)) ? 2 : 1;
3958 break;
3959 default:
3960 abort();
3962 nregs = ((insn >> 8) & 3) + 1;
3963 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3964 switch (nregs) {
3965 case 1:
3966 if (((idx & (1 << size)) != 0) ||
3967 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3968 return 1;
3970 break;
3971 case 3:
3972 if ((idx & 1) != 0) {
3973 return 1;
3975 /* fall through */
3976 case 2:
3977 if (size == 2 && (idx & 2) != 0) {
3978 return 1;
3980 break;
3981 case 4:
3982 if ((size == 2) && ((idx & 3) == 3)) {
3983 return 1;
3985 break;
3986 default:
3987 abort();
3989 if ((rd + stride * (nregs - 1)) > 31) {
3990 /* Attempts to write off the end of the register file
3991 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3992 * the neon_load_reg() would write off the end of the array.
3994 return 1;
3996 addr = tcg_temp_new_i32();
3997 load_reg_var(s, addr, rn);
3998 for (reg = 0; reg < nregs; reg++) {
3999 if (load) {
4000 switch (size) {
4001 case 0:
4002 tmp = gen_ld8u(addr, IS_USER(s));
4003 break;
4004 case 1:
4005 tmp = gen_ld16u(addr, IS_USER(s));
4006 break;
4007 case 2:
4008 tmp = gen_ld32(addr, IS_USER(s));
4009 break;
4010 default: /* Avoid compiler warnings. */
4011 abort();
4013 if (size != 2) {
4014 tmp2 = neon_load_reg(rd, pass);
4015 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4016 shift, size ? 16 : 8);
4017 tcg_temp_free_i32(tmp2);
4019 neon_store_reg(rd, pass, tmp);
4020 } else { /* Store */
4021 tmp = neon_load_reg(rd, pass);
4022 if (shift)
4023 tcg_gen_shri_i32(tmp, tmp, shift);
4024 switch (size) {
4025 case 0:
4026 gen_st8(tmp, addr, IS_USER(s));
4027 break;
4028 case 1:
4029 gen_st16(tmp, addr, IS_USER(s));
4030 break;
4031 case 2:
4032 gen_st32(tmp, addr, IS_USER(s));
4033 break;
4036 rd += stride;
4037 tcg_gen_addi_i32(addr, addr, 1 << size);
4039 tcg_temp_free_i32(addr);
4040 stride = nregs * (1 << size);
4043 if (rm != 15) {
4044 TCGv base;
4046 base = load_reg(s, rn);
4047 if (rm == 13) {
4048 tcg_gen_addi_i32(base, base, stride);
4049 } else {
4050 TCGv index;
4051 index = load_reg(s, rm);
4052 tcg_gen_add_i32(base, base, index);
4053 tcg_temp_free_i32(index);
4055 store_reg(s, rn, base);
4057 return 0;
4060 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4061 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4063 tcg_gen_and_i32(t, t, c);
4064 tcg_gen_andc_i32(f, f, c);
4065 tcg_gen_or_i32(dest, t, f);
4068 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4070 switch (size) {
4071 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4072 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4073 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4074 default: abort();
4078 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4080 switch (size) {
4081 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4082 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4083 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4084 default: abort();
4088 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4090 switch (size) {
4091 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4092 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4093 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4094 default: abort();
4098 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4100 switch (size) {
4101 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4102 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4103 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4104 default: abort();
4108 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4109 int q, int u)
4111 if (q) {
4112 if (u) {
4113 switch (size) {
4114 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4115 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4116 default: abort();
4118 } else {
4119 switch (size) {
4120 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4121 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4122 default: abort();
4125 } else {
4126 if (u) {
4127 switch (size) {
4128 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4129 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4130 default: abort();
4132 } else {
4133 switch (size) {
4134 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4135 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4136 default: abort();
4142 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4144 if (u) {
4145 switch (size) {
4146 case 0: gen_helper_neon_widen_u8(dest, src); break;
4147 case 1: gen_helper_neon_widen_u16(dest, src); break;
4148 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4149 default: abort();
4151 } else {
4152 switch (size) {
4153 case 0: gen_helper_neon_widen_s8(dest, src); break;
4154 case 1: gen_helper_neon_widen_s16(dest, src); break;
4155 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4156 default: abort();
4159 tcg_temp_free_i32(src);
4162 static inline void gen_neon_addl(int size)
4164 switch (size) {
4165 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4166 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4167 case 2: tcg_gen_add_i64(CPU_V001); break;
4168 default: abort();
4172 static inline void gen_neon_subl(int size)
4174 switch (size) {
4175 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4176 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4177 case 2: tcg_gen_sub_i64(CPU_V001); break;
4178 default: abort();
4182 static inline void gen_neon_negl(TCGv_i64 var, int size)
4184 switch (size) {
4185 case 0: gen_helper_neon_negl_u16(var, var); break;
4186 case 1: gen_helper_neon_negl_u32(var, var); break;
4187 case 2: gen_helper_neon_negl_u64(var, var); break;
4188 default: abort();
4192 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4194 switch (size) {
4195 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4196 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4197 default: abort();
4201 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4203 TCGv_i64 tmp;
4205 switch ((size << 1) | u) {
4206 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4207 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4208 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4209 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4210 case 4:
4211 tmp = gen_muls_i64_i32(a, b);
4212 tcg_gen_mov_i64(dest, tmp);
4213 tcg_temp_free_i64(tmp);
4214 break;
4215 case 5:
4216 tmp = gen_mulu_i64_i32(a, b);
4217 tcg_gen_mov_i64(dest, tmp);
4218 tcg_temp_free_i64(tmp);
4219 break;
4220 default: abort();
4223 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4224 Don't forget to clean them now. */
4225 if (size < 2) {
4226 tcg_temp_free_i32(a);
4227 tcg_temp_free_i32(b);
4231 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4233 if (op) {
4234 if (u) {
4235 gen_neon_unarrow_sats(size, dest, src);
4236 } else {
4237 gen_neon_narrow(size, dest, src);
4239 } else {
4240 if (u) {
4241 gen_neon_narrow_satu(size, dest, src);
4242 } else {
4243 gen_neon_narrow_sats(size, dest, src);
4248 /* Symbolic constants for op fields for Neon 3-register same-length.
4249 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4250 * table A7-9.
4252 #define NEON_3R_VHADD 0
4253 #define NEON_3R_VQADD 1
4254 #define NEON_3R_VRHADD 2
4255 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4256 #define NEON_3R_VHSUB 4
4257 #define NEON_3R_VQSUB 5
4258 #define NEON_3R_VCGT 6
4259 #define NEON_3R_VCGE 7
4260 #define NEON_3R_VSHL 8
4261 #define NEON_3R_VQSHL 9
4262 #define NEON_3R_VRSHL 10
4263 #define NEON_3R_VQRSHL 11
4264 #define NEON_3R_VMAX 12
4265 #define NEON_3R_VMIN 13
4266 #define NEON_3R_VABD 14
4267 #define NEON_3R_VABA 15
4268 #define NEON_3R_VADD_VSUB 16
4269 #define NEON_3R_VTST_VCEQ 17
4270 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4271 #define NEON_3R_VMUL 19
4272 #define NEON_3R_VPMAX 20
4273 #define NEON_3R_VPMIN 21
4274 #define NEON_3R_VQDMULH_VQRDMULH 22
4275 #define NEON_3R_VPADD 23
4276 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4277 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4278 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4279 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4280 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4281 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4282 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4284 static const uint8_t neon_3r_sizes[] = {
4285 [NEON_3R_VHADD] = 0x7,
4286 [NEON_3R_VQADD] = 0xf,
4287 [NEON_3R_VRHADD] = 0x7,
4288 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4289 [NEON_3R_VHSUB] = 0x7,
4290 [NEON_3R_VQSUB] = 0xf,
4291 [NEON_3R_VCGT] = 0x7,
4292 [NEON_3R_VCGE] = 0x7,
4293 [NEON_3R_VSHL] = 0xf,
4294 [NEON_3R_VQSHL] = 0xf,
4295 [NEON_3R_VRSHL] = 0xf,
4296 [NEON_3R_VQRSHL] = 0xf,
4297 [NEON_3R_VMAX] = 0x7,
4298 [NEON_3R_VMIN] = 0x7,
4299 [NEON_3R_VABD] = 0x7,
4300 [NEON_3R_VABA] = 0x7,
4301 [NEON_3R_VADD_VSUB] = 0xf,
4302 [NEON_3R_VTST_VCEQ] = 0x7,
4303 [NEON_3R_VML] = 0x7,
4304 [NEON_3R_VMUL] = 0x7,
4305 [NEON_3R_VPMAX] = 0x7,
4306 [NEON_3R_VPMIN] = 0x7,
4307 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4308 [NEON_3R_VPADD] = 0x7,
4309 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4313 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4314 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4315 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4318 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4319 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4320 * table A7-13.
4322 #define NEON_2RM_VREV64 0
4323 #define NEON_2RM_VREV32 1
4324 #define NEON_2RM_VREV16 2
4325 #define NEON_2RM_VPADDL 4
4326 #define NEON_2RM_VPADDL_U 5
4327 #define NEON_2RM_VCLS 8
4328 #define NEON_2RM_VCLZ 9
4329 #define NEON_2RM_VCNT 10
4330 #define NEON_2RM_VMVN 11
4331 #define NEON_2RM_VPADAL 12
4332 #define NEON_2RM_VPADAL_U 13
4333 #define NEON_2RM_VQABS 14
4334 #define NEON_2RM_VQNEG 15
4335 #define NEON_2RM_VCGT0 16
4336 #define NEON_2RM_VCGE0 17
4337 #define NEON_2RM_VCEQ0 18
4338 #define NEON_2RM_VCLE0 19
4339 #define NEON_2RM_VCLT0 20
4340 #define NEON_2RM_VABS 22
4341 #define NEON_2RM_VNEG 23
4342 #define NEON_2RM_VCGT0_F 24
4343 #define NEON_2RM_VCGE0_F 25
4344 #define NEON_2RM_VCEQ0_F 26
4345 #define NEON_2RM_VCLE0_F 27
4346 #define NEON_2RM_VCLT0_F 28
4347 #define NEON_2RM_VABS_F 30
4348 #define NEON_2RM_VNEG_F 31
4349 #define NEON_2RM_VSWP 32
4350 #define NEON_2RM_VTRN 33
4351 #define NEON_2RM_VUZP 34
4352 #define NEON_2RM_VZIP 35
4353 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4354 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4355 #define NEON_2RM_VSHLL 38
4356 #define NEON_2RM_VCVT_F16_F32 44
4357 #define NEON_2RM_VCVT_F32_F16 46
4358 #define NEON_2RM_VRECPE 56
4359 #define NEON_2RM_VRSQRTE 57
4360 #define NEON_2RM_VRECPE_F 58
4361 #define NEON_2RM_VRSQRTE_F 59
4362 #define NEON_2RM_VCVT_FS 60
4363 #define NEON_2RM_VCVT_FU 61
4364 #define NEON_2RM_VCVT_SF 62
4365 #define NEON_2RM_VCVT_UF 63
4367 static int neon_2rm_is_float_op(int op)
4369 /* Return true if this neon 2reg-misc op is float-to-float */
4370 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4371 op >= NEON_2RM_VRECPE_F);
4374 /* Each entry in this array has bit n set if the insn allows
4375 * size value n (otherwise it will UNDEF). Since unallocated
4376 * op values will have no bits set they always UNDEF.
4378 static const uint8_t neon_2rm_sizes[] = {
4379 [NEON_2RM_VREV64] = 0x7,
4380 [NEON_2RM_VREV32] = 0x3,
4381 [NEON_2RM_VREV16] = 0x1,
4382 [NEON_2RM_VPADDL] = 0x7,
4383 [NEON_2RM_VPADDL_U] = 0x7,
4384 [NEON_2RM_VCLS] = 0x7,
4385 [NEON_2RM_VCLZ] = 0x7,
4386 [NEON_2RM_VCNT] = 0x1,
4387 [NEON_2RM_VMVN] = 0x1,
4388 [NEON_2RM_VPADAL] = 0x7,
4389 [NEON_2RM_VPADAL_U] = 0x7,
4390 [NEON_2RM_VQABS] = 0x7,
4391 [NEON_2RM_VQNEG] = 0x7,
4392 [NEON_2RM_VCGT0] = 0x7,
4393 [NEON_2RM_VCGE0] = 0x7,
4394 [NEON_2RM_VCEQ0] = 0x7,
4395 [NEON_2RM_VCLE0] = 0x7,
4396 [NEON_2RM_VCLT0] = 0x7,
4397 [NEON_2RM_VABS] = 0x7,
4398 [NEON_2RM_VNEG] = 0x7,
4399 [NEON_2RM_VCGT0_F] = 0x4,
4400 [NEON_2RM_VCGE0_F] = 0x4,
4401 [NEON_2RM_VCEQ0_F] = 0x4,
4402 [NEON_2RM_VCLE0_F] = 0x4,
4403 [NEON_2RM_VCLT0_F] = 0x4,
4404 [NEON_2RM_VABS_F] = 0x4,
4405 [NEON_2RM_VNEG_F] = 0x4,
4406 [NEON_2RM_VSWP] = 0x1,
4407 [NEON_2RM_VTRN] = 0x7,
4408 [NEON_2RM_VUZP] = 0x7,
4409 [NEON_2RM_VZIP] = 0x7,
4410 [NEON_2RM_VMOVN] = 0x7,
4411 [NEON_2RM_VQMOVN] = 0x7,
4412 [NEON_2RM_VSHLL] = 0x7,
4413 [NEON_2RM_VCVT_F16_F32] = 0x2,
4414 [NEON_2RM_VCVT_F32_F16] = 0x2,
4415 [NEON_2RM_VRECPE] = 0x4,
4416 [NEON_2RM_VRSQRTE] = 0x4,
4417 [NEON_2RM_VRECPE_F] = 0x4,
4418 [NEON_2RM_VRSQRTE_F] = 0x4,
4419 [NEON_2RM_VCVT_FS] = 0x4,
4420 [NEON_2RM_VCVT_FU] = 0x4,
4421 [NEON_2RM_VCVT_SF] = 0x4,
4422 [NEON_2RM_VCVT_UF] = 0x4,
4425 /* Translate a NEON data processing instruction. Return nonzero if the
4426 instruction is invalid.
4427 We process data in a mixture of 32-bit and 64-bit chunks.
4428 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4430 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4432 int op;
4433 int q;
4434 int rd, rn, rm;
4435 int size;
4436 int shift;
4437 int pass;
4438 int count;
4439 int pairwise;
4440 int u;
4441 uint32_t imm, mask;
4442 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4443 TCGv_i64 tmp64;
4445 if (!s->vfp_enabled)
4446 return 1;
4447 q = (insn & (1 << 6)) != 0;
4448 u = (insn >> 24) & 1;
4449 VFP_DREG_D(rd, insn);
4450 VFP_DREG_N(rn, insn);
4451 VFP_DREG_M(rm, insn);
4452 size = (insn >> 20) & 3;
4453 if ((insn & (1 << 23)) == 0) {
4454 /* Three register same length. */
4455 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4456 /* Catch invalid op and bad size combinations: UNDEF */
4457 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4458 return 1;
4460 /* All insns of this form UNDEF for either this condition or the
4461 * superset of cases "Q==1"; we catch the latter later.
4463 if (q && ((rd | rn | rm) & 1)) {
4464 return 1;
4466 if (size == 3 && op != NEON_3R_LOGIC) {
4467 /* 64-bit element instructions. */
4468 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4469 neon_load_reg64(cpu_V0, rn + pass);
4470 neon_load_reg64(cpu_V1, rm + pass);
4471 switch (op) {
4472 case NEON_3R_VQADD:
4473 if (u) {
4474 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4475 cpu_V0, cpu_V1);
4476 } else {
4477 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4478 cpu_V0, cpu_V1);
4480 break;
4481 case NEON_3R_VQSUB:
4482 if (u) {
4483 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4484 cpu_V0, cpu_V1);
4485 } else {
4486 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4487 cpu_V0, cpu_V1);
4489 break;
4490 case NEON_3R_VSHL:
4491 if (u) {
4492 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4493 } else {
4494 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4496 break;
4497 case NEON_3R_VQSHL:
4498 if (u) {
4499 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4500 cpu_V1, cpu_V0);
4501 } else {
4502 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4503 cpu_V1, cpu_V0);
4505 break;
4506 case NEON_3R_VRSHL:
4507 if (u) {
4508 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4509 } else {
4510 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4512 break;
4513 case NEON_3R_VQRSHL:
4514 if (u) {
4515 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4516 cpu_V1, cpu_V0);
4517 } else {
4518 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4519 cpu_V1, cpu_V0);
4521 break;
4522 case NEON_3R_VADD_VSUB:
4523 if (u) {
4524 tcg_gen_sub_i64(CPU_V001);
4525 } else {
4526 tcg_gen_add_i64(CPU_V001);
4528 break;
4529 default:
4530 abort();
4532 neon_store_reg64(cpu_V0, rd + pass);
4534 return 0;
4536 pairwise = 0;
4537 switch (op) {
4538 case NEON_3R_VSHL:
4539 case NEON_3R_VQSHL:
4540 case NEON_3R_VRSHL:
4541 case NEON_3R_VQRSHL:
4543 int rtmp;
4544 /* Shift instruction operands are reversed. */
4545 rtmp = rn;
4546 rn = rm;
4547 rm = rtmp;
4549 break;
4550 case NEON_3R_VPADD:
4551 if (u) {
4552 return 1;
4554 /* Fall through */
4555 case NEON_3R_VPMAX:
4556 case NEON_3R_VPMIN:
4557 pairwise = 1;
4558 break;
4559 case NEON_3R_FLOAT_ARITH:
4560 pairwise = (u && size < 2); /* if VPADD (float) */
4561 break;
4562 case NEON_3R_FLOAT_MINMAX:
4563 pairwise = u; /* if VPMIN/VPMAX (float) */
4564 break;
4565 case NEON_3R_FLOAT_CMP:
4566 if (!u && size) {
4567 /* no encoding for U=0 C=1x */
4568 return 1;
4570 break;
4571 case NEON_3R_FLOAT_ACMP:
4572 if (!u) {
4573 return 1;
4575 break;
4576 case NEON_3R_VRECPS_VRSQRTS:
4577 if (u) {
4578 return 1;
4580 break;
4581 case NEON_3R_VMUL:
4582 if (u && (size != 0)) {
4583 /* UNDEF on invalid size for polynomial subcase */
4584 return 1;
4586 break;
4587 case NEON_3R_VFM:
4588 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4589 return 1;
4591 break;
4592 default:
4593 break;
4596 if (pairwise && q) {
4597 /* All the pairwise insns UNDEF if Q is set */
4598 return 1;
4601 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4603 if (pairwise) {
4604 /* Pairwise. */
4605 if (pass < 1) {
4606 tmp = neon_load_reg(rn, 0);
4607 tmp2 = neon_load_reg(rn, 1);
4608 } else {
4609 tmp = neon_load_reg(rm, 0);
4610 tmp2 = neon_load_reg(rm, 1);
4612 } else {
4613 /* Elementwise. */
4614 tmp = neon_load_reg(rn, pass);
4615 tmp2 = neon_load_reg(rm, pass);
4617 switch (op) {
4618 case NEON_3R_VHADD:
4619 GEN_NEON_INTEGER_OP(hadd);
4620 break;
4621 case NEON_3R_VQADD:
4622 GEN_NEON_INTEGER_OP_ENV(qadd);
4623 break;
4624 case NEON_3R_VRHADD:
4625 GEN_NEON_INTEGER_OP(rhadd);
4626 break;
4627 case NEON_3R_LOGIC: /* Logic ops. */
4628 switch ((u << 2) | size) {
4629 case 0: /* VAND */
4630 tcg_gen_and_i32(tmp, tmp, tmp2);
4631 break;
4632 case 1: /* BIC */
4633 tcg_gen_andc_i32(tmp, tmp, tmp2);
4634 break;
4635 case 2: /* VORR */
4636 tcg_gen_or_i32(tmp, tmp, tmp2);
4637 break;
4638 case 3: /* VORN */
4639 tcg_gen_orc_i32(tmp, tmp, tmp2);
4640 break;
4641 case 4: /* VEOR */
4642 tcg_gen_xor_i32(tmp, tmp, tmp2);
4643 break;
4644 case 5: /* VBSL */
4645 tmp3 = neon_load_reg(rd, pass);
4646 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4647 tcg_temp_free_i32(tmp3);
4648 break;
4649 case 6: /* VBIT */
4650 tmp3 = neon_load_reg(rd, pass);
4651 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4652 tcg_temp_free_i32(tmp3);
4653 break;
4654 case 7: /* VBIF */
4655 tmp3 = neon_load_reg(rd, pass);
4656 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4657 tcg_temp_free_i32(tmp3);
4658 break;
4660 break;
4661 case NEON_3R_VHSUB:
4662 GEN_NEON_INTEGER_OP(hsub);
4663 break;
4664 case NEON_3R_VQSUB:
4665 GEN_NEON_INTEGER_OP_ENV(qsub);
4666 break;
4667 case NEON_3R_VCGT:
4668 GEN_NEON_INTEGER_OP(cgt);
4669 break;
4670 case NEON_3R_VCGE:
4671 GEN_NEON_INTEGER_OP(cge);
4672 break;
4673 case NEON_3R_VSHL:
4674 GEN_NEON_INTEGER_OP(shl);
4675 break;
4676 case NEON_3R_VQSHL:
4677 GEN_NEON_INTEGER_OP_ENV(qshl);
4678 break;
4679 case NEON_3R_VRSHL:
4680 GEN_NEON_INTEGER_OP(rshl);
4681 break;
4682 case NEON_3R_VQRSHL:
4683 GEN_NEON_INTEGER_OP_ENV(qrshl);
4684 break;
4685 case NEON_3R_VMAX:
4686 GEN_NEON_INTEGER_OP(max);
4687 break;
4688 case NEON_3R_VMIN:
4689 GEN_NEON_INTEGER_OP(min);
4690 break;
4691 case NEON_3R_VABD:
4692 GEN_NEON_INTEGER_OP(abd);
4693 break;
4694 case NEON_3R_VABA:
4695 GEN_NEON_INTEGER_OP(abd);
4696 tcg_temp_free_i32(tmp2);
4697 tmp2 = neon_load_reg(rd, pass);
4698 gen_neon_add(size, tmp, tmp2);
4699 break;
4700 case NEON_3R_VADD_VSUB:
4701 if (!u) { /* VADD */
4702 gen_neon_add(size, tmp, tmp2);
4703 } else { /* VSUB */
4704 switch (size) {
4705 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4706 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4707 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4708 default: abort();
4711 break;
4712 case NEON_3R_VTST_VCEQ:
4713 if (!u) { /* VTST */
4714 switch (size) {
4715 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4716 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4717 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4718 default: abort();
4720 } else { /* VCEQ */
4721 switch (size) {
4722 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4723 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4724 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4725 default: abort();
4728 break;
4729 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4730 switch (size) {
4731 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4732 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4733 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4734 default: abort();
4736 tcg_temp_free_i32(tmp2);
4737 tmp2 = neon_load_reg(rd, pass);
4738 if (u) { /* VMLS */
4739 gen_neon_rsb(size, tmp, tmp2);
4740 } else { /* VMLA */
4741 gen_neon_add(size, tmp, tmp2);
4743 break;
4744 case NEON_3R_VMUL:
4745 if (u) { /* polynomial */
4746 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4747 } else { /* Integer */
4748 switch (size) {
4749 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4750 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4751 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4752 default: abort();
4755 break;
4756 case NEON_3R_VPMAX:
4757 GEN_NEON_INTEGER_OP(pmax);
4758 break;
4759 case NEON_3R_VPMIN:
4760 GEN_NEON_INTEGER_OP(pmin);
4761 break;
4762 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4763 if (!u) { /* VQDMULH */
4764 switch (size) {
4765 case 1:
4766 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4767 break;
4768 case 2:
4769 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4770 break;
4771 default: abort();
4773 } else { /* VQRDMULH */
4774 switch (size) {
4775 case 1:
4776 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4777 break;
4778 case 2:
4779 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4780 break;
4781 default: abort();
4784 break;
4785 case NEON_3R_VPADD:
4786 switch (size) {
4787 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4788 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4789 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4790 default: abort();
4792 break;
4793 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4795 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4796 switch ((u << 2) | size) {
4797 case 0: /* VADD */
4798 case 4: /* VPADD */
4799 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4800 break;
4801 case 2: /* VSUB */
4802 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4803 break;
4804 case 6: /* VABD */
4805 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4806 break;
4807 default:
4808 abort();
4810 tcg_temp_free_ptr(fpstatus);
4811 break;
4813 case NEON_3R_FLOAT_MULTIPLY:
4815 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4816 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4817 if (!u) {
4818 tcg_temp_free_i32(tmp2);
4819 tmp2 = neon_load_reg(rd, pass);
4820 if (size == 0) {
4821 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4822 } else {
4823 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4826 tcg_temp_free_ptr(fpstatus);
4827 break;
4829 case NEON_3R_FLOAT_CMP:
4831 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4832 if (!u) {
4833 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4834 } else {
4835 if (size == 0) {
4836 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4837 } else {
4838 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4841 tcg_temp_free_ptr(fpstatus);
4842 break;
4844 case NEON_3R_FLOAT_ACMP:
4846 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4847 if (size == 0) {
4848 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4849 } else {
4850 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4852 tcg_temp_free_ptr(fpstatus);
4853 break;
4855 case NEON_3R_FLOAT_MINMAX:
4857 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4858 if (size == 0) {
4859 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4860 } else {
4861 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4863 tcg_temp_free_ptr(fpstatus);
4864 break;
4866 case NEON_3R_VRECPS_VRSQRTS:
4867 if (size == 0)
4868 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4869 else
4870 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4871 break;
4872 case NEON_3R_VFM:
4874 /* VFMA, VFMS: fused multiply-add */
4875 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4876 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4877 if (size) {
4878 /* VFMS */
4879 gen_helper_vfp_negs(tmp, tmp);
4881 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4882 tcg_temp_free_i32(tmp3);
4883 tcg_temp_free_ptr(fpstatus);
4884 break;
4886 default:
4887 abort();
4889 tcg_temp_free_i32(tmp2);
4891 /* Save the result. For elementwise operations we can put it
4892 straight into the destination register. For pairwise operations
4893 we have to be careful to avoid clobbering the source operands. */
4894 if (pairwise && rd == rm) {
4895 neon_store_scratch(pass, tmp);
4896 } else {
4897 neon_store_reg(rd, pass, tmp);
4900 } /* for pass */
4901 if (pairwise && rd == rm) {
4902 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4903 tmp = neon_load_scratch(pass);
4904 neon_store_reg(rd, pass, tmp);
4907 /* End of 3 register same size operations. */
4908 } else if (insn & (1 << 4)) {
4909 if ((insn & 0x00380080) != 0) {
4910 /* Two registers and shift. */
4911 op = (insn >> 8) & 0xf;
4912 if (insn & (1 << 7)) {
4913 /* 64-bit shift. */
4914 if (op > 7) {
4915 return 1;
4917 size = 3;
4918 } else {
4919 size = 2;
4920 while ((insn & (1 << (size + 19))) == 0)
4921 size--;
4923 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4924 /* To avoid excessive duplication of ops we implement shift
4925 by immediate using the variable shift operations. */
4926 if (op < 8) {
4927 /* Shift by immediate:
4928 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4929 if (q && ((rd | rm) & 1)) {
4930 return 1;
4932 if (!u && (op == 4 || op == 6)) {
4933 return 1;
4935 /* Right shifts are encoded as N - shift, where N is the
4936 element size in bits. */
4937 if (op <= 4)
4938 shift = shift - (1 << (size + 3));
4939 if (size == 3) {
4940 count = q + 1;
4941 } else {
4942 count = q ? 4: 2;
4944 switch (size) {
4945 case 0:
4946 imm = (uint8_t) shift;
4947 imm |= imm << 8;
4948 imm |= imm << 16;
4949 break;
4950 case 1:
4951 imm = (uint16_t) shift;
4952 imm |= imm << 16;
4953 break;
4954 case 2:
4955 case 3:
4956 imm = shift;
4957 break;
4958 default:
4959 abort();
4962 for (pass = 0; pass < count; pass++) {
4963 if (size == 3) {
4964 neon_load_reg64(cpu_V0, rm + pass);
4965 tcg_gen_movi_i64(cpu_V1, imm);
4966 switch (op) {
4967 case 0: /* VSHR */
4968 case 1: /* VSRA */
4969 if (u)
4970 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4971 else
4972 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4973 break;
4974 case 2: /* VRSHR */
4975 case 3: /* VRSRA */
4976 if (u)
4977 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4978 else
4979 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4980 break;
4981 case 4: /* VSRI */
4982 case 5: /* VSHL, VSLI */
4983 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4984 break;
4985 case 6: /* VQSHLU */
4986 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4987 cpu_V0, cpu_V1);
4988 break;
4989 case 7: /* VQSHL */
4990 if (u) {
4991 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4992 cpu_V0, cpu_V1);
4993 } else {
4994 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4995 cpu_V0, cpu_V1);
4997 break;
4999 if (op == 1 || op == 3) {
5000 /* Accumulate. */
5001 neon_load_reg64(cpu_V1, rd + pass);
5002 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5003 } else if (op == 4 || (op == 5 && u)) {
5004 /* Insert */
5005 neon_load_reg64(cpu_V1, rd + pass);
5006 uint64_t mask;
5007 if (shift < -63 || shift > 63) {
5008 mask = 0;
5009 } else {
5010 if (op == 4) {
5011 mask = 0xffffffffffffffffull >> -shift;
5012 } else {
5013 mask = 0xffffffffffffffffull << shift;
5016 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5017 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5019 neon_store_reg64(cpu_V0, rd + pass);
5020 } else { /* size < 3 */
5021 /* Operands in T0 and T1. */
5022 tmp = neon_load_reg(rm, pass);
5023 tmp2 = tcg_temp_new_i32();
5024 tcg_gen_movi_i32(tmp2, imm);
5025 switch (op) {
5026 case 0: /* VSHR */
5027 case 1: /* VSRA */
5028 GEN_NEON_INTEGER_OP(shl);
5029 break;
5030 case 2: /* VRSHR */
5031 case 3: /* VRSRA */
5032 GEN_NEON_INTEGER_OP(rshl);
5033 break;
5034 case 4: /* VSRI */
5035 case 5: /* VSHL, VSLI */
5036 switch (size) {
5037 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5038 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5039 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5040 default: abort();
5042 break;
5043 case 6: /* VQSHLU */
5044 switch (size) {
5045 case 0:
5046 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5047 tmp, tmp2);
5048 break;
5049 case 1:
5050 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5051 tmp, tmp2);
5052 break;
5053 case 2:
5054 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5055 tmp, tmp2);
5056 break;
5057 default:
5058 abort();
5060 break;
5061 case 7: /* VQSHL */
5062 GEN_NEON_INTEGER_OP_ENV(qshl);
5063 break;
5065 tcg_temp_free_i32(tmp2);
5067 if (op == 1 || op == 3) {
5068 /* Accumulate. */
5069 tmp2 = neon_load_reg(rd, pass);
5070 gen_neon_add(size, tmp, tmp2);
5071 tcg_temp_free_i32(tmp2);
5072 } else if (op == 4 || (op == 5 && u)) {
5073 /* Insert */
5074 switch (size) {
5075 case 0:
5076 if (op == 4)
5077 mask = 0xff >> -shift;
5078 else
5079 mask = (uint8_t)(0xff << shift);
5080 mask |= mask << 8;
5081 mask |= mask << 16;
5082 break;
5083 case 1:
5084 if (op == 4)
5085 mask = 0xffff >> -shift;
5086 else
5087 mask = (uint16_t)(0xffff << shift);
5088 mask |= mask << 16;
5089 break;
5090 case 2:
5091 if (shift < -31 || shift > 31) {
5092 mask = 0;
5093 } else {
5094 if (op == 4)
5095 mask = 0xffffffffu >> -shift;
5096 else
5097 mask = 0xffffffffu << shift;
5099 break;
5100 default:
5101 abort();
5103 tmp2 = neon_load_reg(rd, pass);
5104 tcg_gen_andi_i32(tmp, tmp, mask);
5105 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5106 tcg_gen_or_i32(tmp, tmp, tmp2);
5107 tcg_temp_free_i32(tmp2);
5109 neon_store_reg(rd, pass, tmp);
5111 } /* for pass */
5112 } else if (op < 10) {
5113 /* Shift by immediate and narrow:
5114 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5115 int input_unsigned = (op == 8) ? !u : u;
5116 if (rm & 1) {
5117 return 1;
5119 shift = shift - (1 << (size + 3));
5120 size++;
5121 if (size == 3) {
5122 tmp64 = tcg_const_i64(shift);
5123 neon_load_reg64(cpu_V0, rm);
5124 neon_load_reg64(cpu_V1, rm + 1);
5125 for (pass = 0; pass < 2; pass++) {
5126 TCGv_i64 in;
5127 if (pass == 0) {
5128 in = cpu_V0;
5129 } else {
5130 in = cpu_V1;
5132 if (q) {
5133 if (input_unsigned) {
5134 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5135 } else {
5136 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5138 } else {
5139 if (input_unsigned) {
5140 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5141 } else {
5142 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5145 tmp = tcg_temp_new_i32();
5146 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5147 neon_store_reg(rd, pass, tmp);
5148 } /* for pass */
5149 tcg_temp_free_i64(tmp64);
5150 } else {
5151 if (size == 1) {
5152 imm = (uint16_t)shift;
5153 imm |= imm << 16;
5154 } else {
5155 /* size == 2 */
5156 imm = (uint32_t)shift;
5158 tmp2 = tcg_const_i32(imm);
5159 tmp4 = neon_load_reg(rm + 1, 0);
5160 tmp5 = neon_load_reg(rm + 1, 1);
5161 for (pass = 0; pass < 2; pass++) {
5162 if (pass == 0) {
5163 tmp = neon_load_reg(rm, 0);
5164 } else {
5165 tmp = tmp4;
5167 gen_neon_shift_narrow(size, tmp, tmp2, q,
5168 input_unsigned);
5169 if (pass == 0) {
5170 tmp3 = neon_load_reg(rm, 1);
5171 } else {
5172 tmp3 = tmp5;
5174 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5175 input_unsigned);
5176 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5177 tcg_temp_free_i32(tmp);
5178 tcg_temp_free_i32(tmp3);
5179 tmp = tcg_temp_new_i32();
5180 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5181 neon_store_reg(rd, pass, tmp);
5182 } /* for pass */
5183 tcg_temp_free_i32(tmp2);
5185 } else if (op == 10) {
5186 /* VSHLL, VMOVL */
5187 if (q || (rd & 1)) {
5188 return 1;
5190 tmp = neon_load_reg(rm, 0);
5191 tmp2 = neon_load_reg(rm, 1);
5192 for (pass = 0; pass < 2; pass++) {
5193 if (pass == 1)
5194 tmp = tmp2;
5196 gen_neon_widen(cpu_V0, tmp, size, u);
5198 if (shift != 0) {
5199 /* The shift is less than the width of the source
5200 type, so we can just shift the whole register. */
5201 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5202 /* Widen the result of shift: we need to clear
5203 * the potential overflow bits resulting from
5204 * left bits of the narrow input appearing as
5205 * right bits of left the neighbour narrow
5206 * input. */
5207 if (size < 2 || !u) {
5208 uint64_t imm64;
5209 if (size == 0) {
5210 imm = (0xffu >> (8 - shift));
5211 imm |= imm << 16;
5212 } else if (size == 1) {
5213 imm = 0xffff >> (16 - shift);
5214 } else {
5215 /* size == 2 */
5216 imm = 0xffffffff >> (32 - shift);
5218 if (size < 2) {
5219 imm64 = imm | (((uint64_t)imm) << 32);
5220 } else {
5221 imm64 = imm;
5223 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5226 neon_store_reg64(cpu_V0, rd + pass);
5228 } else if (op >= 14) {
5229 /* VCVT fixed-point. */
5230 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5231 return 1;
5233 /* We have already masked out the must-be-1 top bit of imm6,
5234 * hence this 32-shift where the ARM ARM has 64-imm6.
5236 shift = 32 - shift;
5237 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5238 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5239 if (!(op & 1)) {
5240 if (u)
5241 gen_vfp_ulto(0, shift, 1);
5242 else
5243 gen_vfp_slto(0, shift, 1);
5244 } else {
5245 if (u)
5246 gen_vfp_toul(0, shift, 1);
5247 else
5248 gen_vfp_tosl(0, shift, 1);
5250 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5252 } else {
5253 return 1;
5255 } else { /* (insn & 0x00380080) == 0 */
5256 int invert;
5257 if (q && (rd & 1)) {
5258 return 1;
5261 op = (insn >> 8) & 0xf;
5262 /* One register and immediate. */
5263 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5264 invert = (insn & (1 << 5)) != 0;
5265 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5266 * We choose to not special-case this and will behave as if a
5267 * valid constant encoding of 0 had been given.
5269 switch (op) {
5270 case 0: case 1:
5271 /* no-op */
5272 break;
5273 case 2: case 3:
5274 imm <<= 8;
5275 break;
5276 case 4: case 5:
5277 imm <<= 16;
5278 break;
5279 case 6: case 7:
5280 imm <<= 24;
5281 break;
5282 case 8: case 9:
5283 imm |= imm << 16;
5284 break;
5285 case 10: case 11:
5286 imm = (imm << 8) | (imm << 24);
5287 break;
5288 case 12:
5289 imm = (imm << 8) | 0xff;
5290 break;
5291 case 13:
5292 imm = (imm << 16) | 0xffff;
5293 break;
5294 case 14:
5295 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5296 if (invert)
5297 imm = ~imm;
5298 break;
5299 case 15:
5300 if (invert) {
5301 return 1;
5303 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5304 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5305 break;
5307 if (invert)
5308 imm = ~imm;
5310 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5311 if (op & 1 && op < 12) {
5312 tmp = neon_load_reg(rd, pass);
5313 if (invert) {
5314 /* The immediate value has already been inverted, so
5315 BIC becomes AND. */
5316 tcg_gen_andi_i32(tmp, tmp, imm);
5317 } else {
5318 tcg_gen_ori_i32(tmp, tmp, imm);
5320 } else {
5321 /* VMOV, VMVN. */
5322 tmp = tcg_temp_new_i32();
5323 if (op == 14 && invert) {
5324 int n;
5325 uint32_t val;
5326 val = 0;
5327 for (n = 0; n < 4; n++) {
5328 if (imm & (1 << (n + (pass & 1) * 4)))
5329 val |= 0xff << (n * 8);
5331 tcg_gen_movi_i32(tmp, val);
5332 } else {
5333 tcg_gen_movi_i32(tmp, imm);
5336 neon_store_reg(rd, pass, tmp);
5339 } else { /* (insn & 0x00800010 == 0x00800000) */
5340 if (size != 3) {
5341 op = (insn >> 8) & 0xf;
5342 if ((insn & (1 << 6)) == 0) {
5343 /* Three registers of different lengths. */
5344 int src1_wide;
5345 int src2_wide;
5346 int prewiden;
5347 /* undefreq: bit 0 : UNDEF if size != 0
5348 * bit 1 : UNDEF if size == 0
5349 * bit 2 : UNDEF if U == 1
5350 * Note that [1:0] set implies 'always UNDEF'
5352 int undefreq;
5353 /* prewiden, src1_wide, src2_wide, undefreq */
5354 static const int neon_3reg_wide[16][4] = {
5355 {1, 0, 0, 0}, /* VADDL */
5356 {1, 1, 0, 0}, /* VADDW */
5357 {1, 0, 0, 0}, /* VSUBL */
5358 {1, 1, 0, 0}, /* VSUBW */
5359 {0, 1, 1, 0}, /* VADDHN */
5360 {0, 0, 0, 0}, /* VABAL */
5361 {0, 1, 1, 0}, /* VSUBHN */
5362 {0, 0, 0, 0}, /* VABDL */
5363 {0, 0, 0, 0}, /* VMLAL */
5364 {0, 0, 0, 6}, /* VQDMLAL */
5365 {0, 0, 0, 0}, /* VMLSL */
5366 {0, 0, 0, 6}, /* VQDMLSL */
5367 {0, 0, 0, 0}, /* Integer VMULL */
5368 {0, 0, 0, 2}, /* VQDMULL */
5369 {0, 0, 0, 5}, /* Polynomial VMULL */
5370 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5373 prewiden = neon_3reg_wide[op][0];
5374 src1_wide = neon_3reg_wide[op][1];
5375 src2_wide = neon_3reg_wide[op][2];
5376 undefreq = neon_3reg_wide[op][3];
5378 if (((undefreq & 1) && (size != 0)) ||
5379 ((undefreq & 2) && (size == 0)) ||
5380 ((undefreq & 4) && u)) {
5381 return 1;
5383 if ((src1_wide && (rn & 1)) ||
5384 (src2_wide && (rm & 1)) ||
5385 (!src2_wide && (rd & 1))) {
5386 return 1;
5389 /* Avoid overlapping operands. Wide source operands are
5390 always aligned so will never overlap with wide
5391 destinations in problematic ways. */
5392 if (rd == rm && !src2_wide) {
5393 tmp = neon_load_reg(rm, 1);
5394 neon_store_scratch(2, tmp);
5395 } else if (rd == rn && !src1_wide) {
5396 tmp = neon_load_reg(rn, 1);
5397 neon_store_scratch(2, tmp);
5399 TCGV_UNUSED(tmp3);
5400 for (pass = 0; pass < 2; pass++) {
5401 if (src1_wide) {
5402 neon_load_reg64(cpu_V0, rn + pass);
5403 TCGV_UNUSED(tmp);
5404 } else {
5405 if (pass == 1 && rd == rn) {
5406 tmp = neon_load_scratch(2);
5407 } else {
5408 tmp = neon_load_reg(rn, pass);
5410 if (prewiden) {
5411 gen_neon_widen(cpu_V0, tmp, size, u);
5414 if (src2_wide) {
5415 neon_load_reg64(cpu_V1, rm + pass);
5416 TCGV_UNUSED(tmp2);
5417 } else {
5418 if (pass == 1 && rd == rm) {
5419 tmp2 = neon_load_scratch(2);
5420 } else {
5421 tmp2 = neon_load_reg(rm, pass);
5423 if (prewiden) {
5424 gen_neon_widen(cpu_V1, tmp2, size, u);
5427 switch (op) {
5428 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5429 gen_neon_addl(size);
5430 break;
5431 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5432 gen_neon_subl(size);
5433 break;
5434 case 5: case 7: /* VABAL, VABDL */
5435 switch ((size << 1) | u) {
5436 case 0:
5437 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5438 break;
5439 case 1:
5440 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5441 break;
5442 case 2:
5443 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5444 break;
5445 case 3:
5446 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5447 break;
5448 case 4:
5449 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5450 break;
5451 case 5:
5452 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5453 break;
5454 default: abort();
5456 tcg_temp_free_i32(tmp2);
5457 tcg_temp_free_i32(tmp);
5458 break;
5459 case 8: case 9: case 10: case 11: case 12: case 13:
5460 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5461 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5462 break;
5463 case 14: /* Polynomial VMULL */
5464 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5465 tcg_temp_free_i32(tmp2);
5466 tcg_temp_free_i32(tmp);
5467 break;
5468 default: /* 15 is RESERVED: caught earlier */
5469 abort();
5471 if (op == 13) {
5472 /* VQDMULL */
5473 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5474 neon_store_reg64(cpu_V0, rd + pass);
5475 } else if (op == 5 || (op >= 8 && op <= 11)) {
5476 /* Accumulate. */
5477 neon_load_reg64(cpu_V1, rd + pass);
5478 switch (op) {
5479 case 10: /* VMLSL */
5480 gen_neon_negl(cpu_V0, size);
5481 /* Fall through */
5482 case 5: case 8: /* VABAL, VMLAL */
5483 gen_neon_addl(size);
5484 break;
5485 case 9: case 11: /* VQDMLAL, VQDMLSL */
5486 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5487 if (op == 11) {
5488 gen_neon_negl(cpu_V0, size);
5490 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5491 break;
5492 default:
5493 abort();
5495 neon_store_reg64(cpu_V0, rd + pass);
5496 } else if (op == 4 || op == 6) {
5497 /* Narrowing operation. */
5498 tmp = tcg_temp_new_i32();
5499 if (!u) {
5500 switch (size) {
5501 case 0:
5502 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5503 break;
5504 case 1:
5505 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5506 break;
5507 case 2:
5508 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5509 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5510 break;
5511 default: abort();
5513 } else {
5514 switch (size) {
5515 case 0:
5516 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5517 break;
5518 case 1:
5519 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5520 break;
5521 case 2:
5522 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5523 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5524 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5525 break;
5526 default: abort();
5529 if (pass == 0) {
5530 tmp3 = tmp;
5531 } else {
5532 neon_store_reg(rd, 0, tmp3);
5533 neon_store_reg(rd, 1, tmp);
5535 } else {
5536 /* Write back the result. */
5537 neon_store_reg64(cpu_V0, rd + pass);
5540 } else {
5541 /* Two registers and a scalar. NB that for ops of this form
5542 * the ARM ARM labels bit 24 as Q, but it is in our variable
5543 * 'u', not 'q'.
5545 if (size == 0) {
5546 return 1;
5548 switch (op) {
5549 case 1: /* Float VMLA scalar */
5550 case 5: /* Floating point VMLS scalar */
5551 case 9: /* Floating point VMUL scalar */
5552 if (size == 1) {
5553 return 1;
5555 /* fall through */
5556 case 0: /* Integer VMLA scalar */
5557 case 4: /* Integer VMLS scalar */
5558 case 8: /* Integer VMUL scalar */
5559 case 12: /* VQDMULH scalar */
5560 case 13: /* VQRDMULH scalar */
5561 if (u && ((rd | rn) & 1)) {
5562 return 1;
5564 tmp = neon_get_scalar(size, rm);
5565 neon_store_scratch(0, tmp);
5566 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5567 tmp = neon_load_scratch(0);
5568 tmp2 = neon_load_reg(rn, pass);
5569 if (op == 12) {
5570 if (size == 1) {
5571 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5572 } else {
5573 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5575 } else if (op == 13) {
5576 if (size == 1) {
5577 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5578 } else {
5579 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5581 } else if (op & 1) {
5582 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5583 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5584 tcg_temp_free_ptr(fpstatus);
5585 } else {
5586 switch (size) {
5587 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5588 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5589 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5590 default: abort();
5593 tcg_temp_free_i32(tmp2);
5594 if (op < 8) {
5595 /* Accumulate. */
5596 tmp2 = neon_load_reg(rd, pass);
5597 switch (op) {
5598 case 0:
5599 gen_neon_add(size, tmp, tmp2);
5600 break;
5601 case 1:
5603 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5604 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5605 tcg_temp_free_ptr(fpstatus);
5606 break;
5608 case 4:
5609 gen_neon_rsb(size, tmp, tmp2);
5610 break;
5611 case 5:
5613 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5614 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5615 tcg_temp_free_ptr(fpstatus);
5616 break;
5618 default:
5619 abort();
5621 tcg_temp_free_i32(tmp2);
5623 neon_store_reg(rd, pass, tmp);
5625 break;
5626 case 3: /* VQDMLAL scalar */
5627 case 7: /* VQDMLSL scalar */
5628 case 11: /* VQDMULL scalar */
5629 if (u == 1) {
5630 return 1;
5632 /* fall through */
5633 case 2: /* VMLAL sclar */
5634 case 6: /* VMLSL scalar */
5635 case 10: /* VMULL scalar */
5636 if (rd & 1) {
5637 return 1;
5639 tmp2 = neon_get_scalar(size, rm);
5640 /* We need a copy of tmp2 because gen_neon_mull
5641 * deletes it during pass 0. */
5642 tmp4 = tcg_temp_new_i32();
5643 tcg_gen_mov_i32(tmp4, tmp2);
5644 tmp3 = neon_load_reg(rn, 1);
5646 for (pass = 0; pass < 2; pass++) {
5647 if (pass == 0) {
5648 tmp = neon_load_reg(rn, 0);
5649 } else {
5650 tmp = tmp3;
5651 tmp2 = tmp4;
5653 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5654 if (op != 11) {
5655 neon_load_reg64(cpu_V1, rd + pass);
5657 switch (op) {
5658 case 6:
5659 gen_neon_negl(cpu_V0, size);
5660 /* Fall through */
5661 case 2:
5662 gen_neon_addl(size);
5663 break;
5664 case 3: case 7:
5665 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5666 if (op == 7) {
5667 gen_neon_negl(cpu_V0, size);
5669 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5670 break;
5671 case 10:
5672 /* no-op */
5673 break;
5674 case 11:
5675 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5676 break;
5677 default:
5678 abort();
5680 neon_store_reg64(cpu_V0, rd + pass);
5684 break;
5685 default: /* 14 and 15 are RESERVED */
5686 return 1;
5689 } else { /* size == 3 */
5690 if (!u) {
5691 /* Extract. */
5692 imm = (insn >> 8) & 0xf;
5694 if (imm > 7 && !q)
5695 return 1;
5697 if (q && ((rd | rn | rm) & 1)) {
5698 return 1;
5701 if (imm == 0) {
5702 neon_load_reg64(cpu_V0, rn);
5703 if (q) {
5704 neon_load_reg64(cpu_V1, rn + 1);
5706 } else if (imm == 8) {
5707 neon_load_reg64(cpu_V0, rn + 1);
5708 if (q) {
5709 neon_load_reg64(cpu_V1, rm);
5711 } else if (q) {
5712 tmp64 = tcg_temp_new_i64();
5713 if (imm < 8) {
5714 neon_load_reg64(cpu_V0, rn);
5715 neon_load_reg64(tmp64, rn + 1);
5716 } else {
5717 neon_load_reg64(cpu_V0, rn + 1);
5718 neon_load_reg64(tmp64, rm);
5720 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5721 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5722 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5723 if (imm < 8) {
5724 neon_load_reg64(cpu_V1, rm);
5725 } else {
5726 neon_load_reg64(cpu_V1, rm + 1);
5727 imm -= 8;
5729 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5730 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5731 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5732 tcg_temp_free_i64(tmp64);
5733 } else {
5734 /* BUGFIX */
5735 neon_load_reg64(cpu_V0, rn);
5736 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5737 neon_load_reg64(cpu_V1, rm);
5738 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5739 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5741 neon_store_reg64(cpu_V0, rd);
5742 if (q) {
5743 neon_store_reg64(cpu_V1, rd + 1);
5745 } else if ((insn & (1 << 11)) == 0) {
5746 /* Two register misc. */
5747 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5748 size = (insn >> 18) & 3;
5749 /* UNDEF for unknown op values and bad op-size combinations */
5750 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5751 return 1;
5753 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5754 q && ((rm | rd) & 1)) {
5755 return 1;
5757 switch (op) {
5758 case NEON_2RM_VREV64:
5759 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5760 tmp = neon_load_reg(rm, pass * 2);
5761 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5762 switch (size) {
5763 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5764 case 1: gen_swap_half(tmp); break;
5765 case 2: /* no-op */ break;
5766 default: abort();
5768 neon_store_reg(rd, pass * 2 + 1, tmp);
5769 if (size == 2) {
5770 neon_store_reg(rd, pass * 2, tmp2);
5771 } else {
5772 switch (size) {
5773 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5774 case 1: gen_swap_half(tmp2); break;
5775 default: abort();
5777 neon_store_reg(rd, pass * 2, tmp2);
5780 break;
5781 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5782 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5783 for (pass = 0; pass < q + 1; pass++) {
5784 tmp = neon_load_reg(rm, pass * 2);
5785 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5786 tmp = neon_load_reg(rm, pass * 2 + 1);
5787 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5788 switch (size) {
5789 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5790 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5791 case 2: tcg_gen_add_i64(CPU_V001); break;
5792 default: abort();
5794 if (op >= NEON_2RM_VPADAL) {
5795 /* Accumulate. */
5796 neon_load_reg64(cpu_V1, rd + pass);
5797 gen_neon_addl(size);
5799 neon_store_reg64(cpu_V0, rd + pass);
5801 break;
5802 case NEON_2RM_VTRN:
5803 if (size == 2) {
5804 int n;
5805 for (n = 0; n < (q ? 4 : 2); n += 2) {
5806 tmp = neon_load_reg(rm, n);
5807 tmp2 = neon_load_reg(rd, n + 1);
5808 neon_store_reg(rm, n, tmp2);
5809 neon_store_reg(rd, n + 1, tmp);
5811 } else {
5812 goto elementwise;
5814 break;
5815 case NEON_2RM_VUZP:
5816 if (gen_neon_unzip(rd, rm, size, q)) {
5817 return 1;
5819 break;
5820 case NEON_2RM_VZIP:
5821 if (gen_neon_zip(rd, rm, size, q)) {
5822 return 1;
5824 break;
5825 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5826 /* also VQMOVUN; op field and mnemonics don't line up */
5827 if (rm & 1) {
5828 return 1;
5830 TCGV_UNUSED(tmp2);
5831 for (pass = 0; pass < 2; pass++) {
5832 neon_load_reg64(cpu_V0, rm + pass);
5833 tmp = tcg_temp_new_i32();
5834 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5835 tmp, cpu_V0);
5836 if (pass == 0) {
5837 tmp2 = tmp;
5838 } else {
5839 neon_store_reg(rd, 0, tmp2);
5840 neon_store_reg(rd, 1, tmp);
5843 break;
5844 case NEON_2RM_VSHLL:
5845 if (q || (rd & 1)) {
5846 return 1;
5848 tmp = neon_load_reg(rm, 0);
5849 tmp2 = neon_load_reg(rm, 1);
5850 for (pass = 0; pass < 2; pass++) {
5851 if (pass == 1)
5852 tmp = tmp2;
5853 gen_neon_widen(cpu_V0, tmp, size, 1);
5854 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5855 neon_store_reg64(cpu_V0, rd + pass);
5857 break;
5858 case NEON_2RM_VCVT_F16_F32:
5859 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5860 q || (rm & 1)) {
5861 return 1;
5863 tmp = tcg_temp_new_i32();
5864 tmp2 = tcg_temp_new_i32();
5865 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5866 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5867 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5868 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5869 tcg_gen_shli_i32(tmp2, tmp2, 16);
5870 tcg_gen_or_i32(tmp2, tmp2, tmp);
5871 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5872 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5873 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5874 neon_store_reg(rd, 0, tmp2);
5875 tmp2 = tcg_temp_new_i32();
5876 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5877 tcg_gen_shli_i32(tmp2, tmp2, 16);
5878 tcg_gen_or_i32(tmp2, tmp2, tmp);
5879 neon_store_reg(rd, 1, tmp2);
5880 tcg_temp_free_i32(tmp);
5881 break;
5882 case NEON_2RM_VCVT_F32_F16:
5883 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5884 q || (rd & 1)) {
5885 return 1;
5887 tmp3 = tcg_temp_new_i32();
5888 tmp = neon_load_reg(rm, 0);
5889 tmp2 = neon_load_reg(rm, 1);
5890 tcg_gen_ext16u_i32(tmp3, tmp);
5891 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5892 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5893 tcg_gen_shri_i32(tmp3, tmp, 16);
5894 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5895 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5896 tcg_temp_free_i32(tmp);
5897 tcg_gen_ext16u_i32(tmp3, tmp2);
5898 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5899 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5900 tcg_gen_shri_i32(tmp3, tmp2, 16);
5901 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5902 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5903 tcg_temp_free_i32(tmp2);
5904 tcg_temp_free_i32(tmp3);
5905 break;
5906 default:
5907 elementwise:
5908 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5909 if (neon_2rm_is_float_op(op)) {
5910 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5911 neon_reg_offset(rm, pass));
5912 TCGV_UNUSED(tmp);
5913 } else {
5914 tmp = neon_load_reg(rm, pass);
5916 switch (op) {
5917 case NEON_2RM_VREV32:
5918 switch (size) {
5919 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5920 case 1: gen_swap_half(tmp); break;
5921 default: abort();
5923 break;
5924 case NEON_2RM_VREV16:
5925 gen_rev16(tmp);
5926 break;
5927 case NEON_2RM_VCLS:
5928 switch (size) {
5929 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5930 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5931 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5932 default: abort();
5934 break;
5935 case NEON_2RM_VCLZ:
5936 switch (size) {
5937 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5938 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5939 case 2: gen_helper_clz(tmp, tmp); break;
5940 default: abort();
5942 break;
5943 case NEON_2RM_VCNT:
5944 gen_helper_neon_cnt_u8(tmp, tmp);
5945 break;
5946 case NEON_2RM_VMVN:
5947 tcg_gen_not_i32(tmp, tmp);
5948 break;
5949 case NEON_2RM_VQABS:
5950 switch (size) {
5951 case 0:
5952 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5953 break;
5954 case 1:
5955 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5956 break;
5957 case 2:
5958 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5959 break;
5960 default: abort();
5962 break;
5963 case NEON_2RM_VQNEG:
5964 switch (size) {
5965 case 0:
5966 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5967 break;
5968 case 1:
5969 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5970 break;
5971 case 2:
5972 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5973 break;
5974 default: abort();
5976 break;
5977 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
5978 tmp2 = tcg_const_i32(0);
5979 switch(size) {
5980 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5981 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5982 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5983 default: abort();
5985 tcg_temp_free(tmp2);
5986 if (op == NEON_2RM_VCLE0) {
5987 tcg_gen_not_i32(tmp, tmp);
5989 break;
5990 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
5991 tmp2 = tcg_const_i32(0);
5992 switch(size) {
5993 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5994 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5995 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5996 default: abort();
5998 tcg_temp_free(tmp2);
5999 if (op == NEON_2RM_VCLT0) {
6000 tcg_gen_not_i32(tmp, tmp);
6002 break;
6003 case NEON_2RM_VCEQ0:
6004 tmp2 = tcg_const_i32(0);
6005 switch(size) {
6006 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6007 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6008 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6009 default: abort();
6011 tcg_temp_free(tmp2);
6012 break;
6013 case NEON_2RM_VABS:
6014 switch(size) {
6015 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6016 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6017 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6018 default: abort();
6020 break;
6021 case NEON_2RM_VNEG:
6022 tmp2 = tcg_const_i32(0);
6023 gen_neon_rsb(size, tmp, tmp2);
6024 tcg_temp_free(tmp2);
6025 break;
6026 case NEON_2RM_VCGT0_F:
6028 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6029 tmp2 = tcg_const_i32(0);
6030 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6031 tcg_temp_free(tmp2);
6032 tcg_temp_free_ptr(fpstatus);
6033 break;
6035 case NEON_2RM_VCGE0_F:
6037 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6038 tmp2 = tcg_const_i32(0);
6039 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6040 tcg_temp_free(tmp2);
6041 tcg_temp_free_ptr(fpstatus);
6042 break;
6044 case NEON_2RM_VCEQ0_F:
6046 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6047 tmp2 = tcg_const_i32(0);
6048 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6049 tcg_temp_free(tmp2);
6050 tcg_temp_free_ptr(fpstatus);
6051 break;
6053 case NEON_2RM_VCLE0_F:
6055 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6056 tmp2 = tcg_const_i32(0);
6057 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6058 tcg_temp_free(tmp2);
6059 tcg_temp_free_ptr(fpstatus);
6060 break;
6062 case NEON_2RM_VCLT0_F:
6064 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6065 tmp2 = tcg_const_i32(0);
6066 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6067 tcg_temp_free(tmp2);
6068 tcg_temp_free_ptr(fpstatus);
6069 break;
6071 case NEON_2RM_VABS_F:
6072 gen_vfp_abs(0);
6073 break;
6074 case NEON_2RM_VNEG_F:
6075 gen_vfp_neg(0);
6076 break;
6077 case NEON_2RM_VSWP:
6078 tmp2 = neon_load_reg(rd, pass);
6079 neon_store_reg(rm, pass, tmp2);
6080 break;
6081 case NEON_2RM_VTRN:
6082 tmp2 = neon_load_reg(rd, pass);
6083 switch (size) {
6084 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6085 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6086 default: abort();
6088 neon_store_reg(rm, pass, tmp2);
6089 break;
6090 case NEON_2RM_VRECPE:
6091 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6092 break;
6093 case NEON_2RM_VRSQRTE:
6094 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6095 break;
6096 case NEON_2RM_VRECPE_F:
6097 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6098 break;
6099 case NEON_2RM_VRSQRTE_F:
6100 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6101 break;
6102 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6103 gen_vfp_sito(0, 1);
6104 break;
6105 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6106 gen_vfp_uito(0, 1);
6107 break;
6108 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6109 gen_vfp_tosiz(0, 1);
6110 break;
6111 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6112 gen_vfp_touiz(0, 1);
6113 break;
6114 default:
6115 /* Reserved op values were caught by the
6116 * neon_2rm_sizes[] check earlier.
6118 abort();
6120 if (neon_2rm_is_float_op(op)) {
6121 tcg_gen_st_f32(cpu_F0s, cpu_env,
6122 neon_reg_offset(rd, pass));
6123 } else {
6124 neon_store_reg(rd, pass, tmp);
6127 break;
6129 } else if ((insn & (1 << 10)) == 0) {
6130 /* VTBL, VTBX. */
6131 int n = ((insn >> 8) & 3) + 1;
6132 if ((rn + n) > 32) {
6133 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6134 * helper function running off the end of the register file.
6136 return 1;
6138 n <<= 3;
6139 if (insn & (1 << 6)) {
6140 tmp = neon_load_reg(rd, 0);
6141 } else {
6142 tmp = tcg_temp_new_i32();
6143 tcg_gen_movi_i32(tmp, 0);
6145 tmp2 = neon_load_reg(rm, 0);
6146 tmp4 = tcg_const_i32(rn);
6147 tmp5 = tcg_const_i32(n);
6148 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
6149 tcg_temp_free_i32(tmp);
6150 if (insn & (1 << 6)) {
6151 tmp = neon_load_reg(rd, 1);
6152 } else {
6153 tmp = tcg_temp_new_i32();
6154 tcg_gen_movi_i32(tmp, 0);
6156 tmp3 = neon_load_reg(rm, 1);
6157 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
6158 tcg_temp_free_i32(tmp5);
6159 tcg_temp_free_i32(tmp4);
6160 neon_store_reg(rd, 0, tmp2);
6161 neon_store_reg(rd, 1, tmp3);
6162 tcg_temp_free_i32(tmp);
6163 } else if ((insn & 0x380) == 0) {
6164 /* VDUP */
6165 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6166 return 1;
6168 if (insn & (1 << 19)) {
6169 tmp = neon_load_reg(rm, 1);
6170 } else {
6171 tmp = neon_load_reg(rm, 0);
6173 if (insn & (1 << 16)) {
6174 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6175 } else if (insn & (1 << 17)) {
6176 if ((insn >> 18) & 1)
6177 gen_neon_dup_high16(tmp);
6178 else
6179 gen_neon_dup_low16(tmp);
6181 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6182 tmp2 = tcg_temp_new_i32();
6183 tcg_gen_mov_i32(tmp2, tmp);
6184 neon_store_reg(rd, pass, tmp2);
6186 tcg_temp_free_i32(tmp);
6187 } else {
6188 return 1;
6192 return 0;
6195 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6197 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6198 const ARMCPRegInfo *ri;
6199 ARMCPU *cpu = arm_env_get_cpu(env);
6201 cpnum = (insn >> 8) & 0xf;
6202 if (arm_feature(env, ARM_FEATURE_XSCALE)
6203 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6204 return 1;
6206 /* First check for coprocessor space used for actual instructions */
6207 switch (cpnum) {
6208 case 0:
6209 case 1:
6210 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6211 return disas_iwmmxt_insn(env, s, insn);
6212 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6213 return disas_dsp_insn(env, s, insn);
6215 return 1;
6216 case 10:
6217 case 11:
6218 return disas_vfp_insn (env, s, insn);
6219 default:
6220 break;
6223 /* Otherwise treat as a generic register access */
6224 is64 = (insn & (1 << 25)) == 0;
6225 if (!is64 && ((insn & (1 << 4)) == 0)) {
6226 /* cdp */
6227 return 1;
6230 crm = insn & 0xf;
6231 if (is64) {
6232 crn = 0;
6233 opc1 = (insn >> 4) & 0xf;
6234 opc2 = 0;
6235 rt2 = (insn >> 16) & 0xf;
6236 } else {
6237 crn = (insn >> 16) & 0xf;
6238 opc1 = (insn >> 21) & 7;
6239 opc2 = (insn >> 5) & 7;
6240 rt2 = 0;
6242 isread = (insn >> 20) & 1;
6243 rt = (insn >> 12) & 0xf;
6245 ri = get_arm_cp_reginfo(cpu,
6246 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6247 if (ri) {
6248 /* Check access permissions */
6249 if (!cp_access_ok(env, ri, isread)) {
6250 return 1;
6253 /* Handle special cases first */
6254 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6255 case ARM_CP_NOP:
6256 return 0;
6257 case ARM_CP_WFI:
6258 if (isread) {
6259 return 1;
6261 gen_set_pc_im(s->pc);
6262 s->is_jmp = DISAS_WFI;
6263 return 0;
6264 default:
6265 break;
6268 if (isread) {
6269 /* Read */
6270 if (is64) {
6271 TCGv_i64 tmp64;
6272 TCGv_i32 tmp;
6273 if (ri->type & ARM_CP_CONST) {
6274 tmp64 = tcg_const_i64(ri->resetvalue);
6275 } else if (ri->readfn) {
6276 TCGv_ptr tmpptr;
6277 gen_set_pc_im(s->pc);
6278 tmp64 = tcg_temp_new_i64();
6279 tmpptr = tcg_const_ptr(ri);
6280 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6281 tcg_temp_free_ptr(tmpptr);
6282 } else {
6283 tmp64 = tcg_temp_new_i64();
6284 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6286 tmp = tcg_temp_new_i32();
6287 tcg_gen_trunc_i64_i32(tmp, tmp64);
6288 store_reg(s, rt, tmp);
6289 tcg_gen_shri_i64(tmp64, tmp64, 32);
6290 tmp = tcg_temp_new_i32();
6291 tcg_gen_trunc_i64_i32(tmp, tmp64);
6292 tcg_temp_free_i64(tmp64);
6293 store_reg(s, rt2, tmp);
6294 } else {
6295 TCGv tmp;
6296 if (ri->type & ARM_CP_CONST) {
6297 tmp = tcg_const_i32(ri->resetvalue);
6298 } else if (ri->readfn) {
6299 TCGv_ptr tmpptr;
6300 gen_set_pc_im(s->pc);
6301 tmp = tcg_temp_new_i32();
6302 tmpptr = tcg_const_ptr(ri);
6303 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6304 tcg_temp_free_ptr(tmpptr);
6305 } else {
6306 tmp = load_cpu_offset(ri->fieldoffset);
6308 if (rt == 15) {
6309 /* Destination register of r15 for 32 bit loads sets
6310 * the condition codes from the high 4 bits of the value
6312 gen_set_nzcv(tmp);
6313 tcg_temp_free_i32(tmp);
6314 } else {
6315 store_reg(s, rt, tmp);
6318 } else {
6319 /* Write */
6320 if (ri->type & ARM_CP_CONST) {
6321 /* If not forbidden by access permissions, treat as WI */
6322 return 0;
6325 if (is64) {
6326 TCGv tmplo, tmphi;
6327 TCGv_i64 tmp64 = tcg_temp_new_i64();
6328 tmplo = load_reg(s, rt);
6329 tmphi = load_reg(s, rt2);
6330 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6331 tcg_temp_free_i32(tmplo);
6332 tcg_temp_free_i32(tmphi);
6333 if (ri->writefn) {
6334 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6335 gen_set_pc_im(s->pc);
6336 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6337 tcg_temp_free_ptr(tmpptr);
6338 } else {
6339 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6341 tcg_temp_free_i64(tmp64);
6342 } else {
6343 if (ri->writefn) {
6344 TCGv tmp;
6345 TCGv_ptr tmpptr;
6346 gen_set_pc_im(s->pc);
6347 tmp = load_reg(s, rt);
6348 tmpptr = tcg_const_ptr(ri);
6349 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6350 tcg_temp_free_ptr(tmpptr);
6351 tcg_temp_free_i32(tmp);
6352 } else {
6353 TCGv tmp = load_reg(s, rt);
6354 store_cpu_offset(tmp, ri->fieldoffset);
6357 /* We default to ending the TB on a coprocessor register write,
6358 * but allow this to be suppressed by the register definition
6359 * (usually only necessary to work around guest bugs).
6361 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6362 gen_lookup_tb(s);
6365 return 0;
6368 return 1;
6372 /* Store a 64-bit value to a register pair. Clobbers val. */
6373 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6375 TCGv tmp;
6376 tmp = tcg_temp_new_i32();
6377 tcg_gen_trunc_i64_i32(tmp, val);
6378 store_reg(s, rlow, tmp);
6379 tmp = tcg_temp_new_i32();
6380 tcg_gen_shri_i64(val, val, 32);
6381 tcg_gen_trunc_i64_i32(tmp, val);
6382 store_reg(s, rhigh, tmp);
6385 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6386 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6388 TCGv_i64 tmp;
6389 TCGv tmp2;
6391 /* Load value and extend to 64 bits. */
6392 tmp = tcg_temp_new_i64();
6393 tmp2 = load_reg(s, rlow);
6394 tcg_gen_extu_i32_i64(tmp, tmp2);
6395 tcg_temp_free_i32(tmp2);
6396 tcg_gen_add_i64(val, val, tmp);
6397 tcg_temp_free_i64(tmp);
6400 /* load and add a 64-bit value from a register pair. */
6401 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6403 TCGv_i64 tmp;
6404 TCGv tmpl;
6405 TCGv tmph;
6407 /* Load 64-bit value rd:rn. */
6408 tmpl = load_reg(s, rlow);
6409 tmph = load_reg(s, rhigh);
6410 tmp = tcg_temp_new_i64();
6411 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6412 tcg_temp_free_i32(tmpl);
6413 tcg_temp_free_i32(tmph);
6414 tcg_gen_add_i64(val, val, tmp);
6415 tcg_temp_free_i64(tmp);
6418 /* Set N and Z flags from a 64-bit value. */
6419 static void gen_logicq_cc(TCGv_i64 val)
6421 TCGv tmp = tcg_temp_new_i32();
6422 gen_helper_logicq_cc(tmp, val);
6423 gen_logic_CC(tmp);
6424 tcg_temp_free_i32(tmp);
6427 /* Load/Store exclusive instructions are implemented by remembering
6428 the value/address loaded, and seeing if these are the same
6429 when the store is performed. This should be sufficient to implement
6430 the architecturally mandated semantics, and avoids having to monitor
6431 regular stores.
6433 In system emulation mode only one CPU will be running at once, so
6434 this sequence is effectively atomic. In user emulation mode we
6435 throw an exception and handle the atomic operation elsewhere. */
6436 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6437 TCGv addr, int size)
6439 TCGv tmp;
6441 switch (size) {
6442 case 0:
6443 tmp = gen_ld8u(addr, IS_USER(s));
6444 break;
6445 case 1:
6446 tmp = gen_ld16u(addr, IS_USER(s));
6447 break;
6448 case 2:
6449 case 3:
6450 tmp = gen_ld32(addr, IS_USER(s));
6451 break;
6452 default:
6453 abort();
6455 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6456 store_reg(s, rt, tmp);
6457 if (size == 3) {
6458 TCGv tmp2 = tcg_temp_new_i32();
6459 tcg_gen_addi_i32(tmp2, addr, 4);
6460 tmp = gen_ld32(tmp2, IS_USER(s));
6461 tcg_temp_free_i32(tmp2);
6462 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6463 store_reg(s, rt2, tmp);
6465 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6468 static void gen_clrex(DisasContext *s)
6470 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6473 #ifdef CONFIG_USER_ONLY
6474 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6475 TCGv addr, int size)
6477 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6478 tcg_gen_movi_i32(cpu_exclusive_info,
6479 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6480 gen_exception_insn(s, 4, EXCP_STREX);
6482 #else
6483 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6484 TCGv addr, int size)
6486 TCGv tmp;
6487 int done_label;
6488 int fail_label;
6490 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6491 [addr] = {Rt};
6492 {Rd} = 0;
6493 } else {
6494 {Rd} = 1;
6495 } */
6496 fail_label = gen_new_label();
6497 done_label = gen_new_label();
6498 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6499 switch (size) {
6500 case 0:
6501 tmp = gen_ld8u(addr, IS_USER(s));
6502 break;
6503 case 1:
6504 tmp = gen_ld16u(addr, IS_USER(s));
6505 break;
6506 case 2:
6507 case 3:
6508 tmp = gen_ld32(addr, IS_USER(s));
6509 break;
6510 default:
6511 abort();
6513 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6514 tcg_temp_free_i32(tmp);
6515 if (size == 3) {
6516 TCGv tmp2 = tcg_temp_new_i32();
6517 tcg_gen_addi_i32(tmp2, addr, 4);
6518 tmp = gen_ld32(tmp2, IS_USER(s));
6519 tcg_temp_free_i32(tmp2);
6520 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6521 tcg_temp_free_i32(tmp);
6523 tmp = load_reg(s, rt);
6524 switch (size) {
6525 case 0:
6526 gen_st8(tmp, addr, IS_USER(s));
6527 break;
6528 case 1:
6529 gen_st16(tmp, addr, IS_USER(s));
6530 break;
6531 case 2:
6532 case 3:
6533 gen_st32(tmp, addr, IS_USER(s));
6534 break;
6535 default:
6536 abort();
6538 if (size == 3) {
6539 tcg_gen_addi_i32(addr, addr, 4);
6540 tmp = load_reg(s, rt2);
6541 gen_st32(tmp, addr, IS_USER(s));
6543 tcg_gen_movi_i32(cpu_R[rd], 0);
6544 tcg_gen_br(done_label);
6545 gen_set_label(fail_label);
6546 tcg_gen_movi_i32(cpu_R[rd], 1);
6547 gen_set_label(done_label);
6548 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6550 #endif
6552 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6554 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6555 TCGv tmp;
6556 TCGv tmp2;
6557 TCGv tmp3;
6558 TCGv addr;
6559 TCGv_i64 tmp64;
6561 insn = arm_ldl_code(env, s->pc, s->bswap_code);
6562 s->pc += 4;
6564 /* M variants do not implement ARM mode. */
6565 if (IS_M(env))
6566 goto illegal_op;
6567 cond = insn >> 28;
6568 if (cond == 0xf){
6569 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6570 * choose to UNDEF. In ARMv5 and above the space is used
6571 * for miscellaneous unconditional instructions.
6573 ARCH(5);
6575 /* Unconditional instructions. */
6576 if (((insn >> 25) & 7) == 1) {
6577 /* NEON Data processing. */
6578 if (!arm_feature(env, ARM_FEATURE_NEON))
6579 goto illegal_op;
6581 if (disas_neon_data_insn(env, s, insn))
6582 goto illegal_op;
6583 return;
6585 if ((insn & 0x0f100000) == 0x04000000) {
6586 /* NEON load/store. */
6587 if (!arm_feature(env, ARM_FEATURE_NEON))
6588 goto illegal_op;
6590 if (disas_neon_ls_insn(env, s, insn))
6591 goto illegal_op;
6592 return;
6594 if (((insn & 0x0f30f000) == 0x0510f000) ||
6595 ((insn & 0x0f30f010) == 0x0710f000)) {
6596 if ((insn & (1 << 22)) == 0) {
6597 /* PLDW; v7MP */
6598 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6599 goto illegal_op;
6602 /* Otherwise PLD; v5TE+ */
6603 ARCH(5TE);
6604 return;
6606 if (((insn & 0x0f70f000) == 0x0450f000) ||
6607 ((insn & 0x0f70f010) == 0x0650f000)) {
6608 ARCH(7);
6609 return; /* PLI; V7 */
6611 if (((insn & 0x0f700000) == 0x04100000) ||
6612 ((insn & 0x0f700010) == 0x06100000)) {
6613 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6614 goto illegal_op;
6616 return; /* v7MP: Unallocated memory hint: must NOP */
6619 if ((insn & 0x0ffffdff) == 0x01010000) {
6620 ARCH(6);
6621 /* setend */
6622 if (((insn >> 9) & 1) != s->bswap_code) {
6623 /* Dynamic endianness switching not implemented. */
6624 goto illegal_op;
6626 return;
6627 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6628 switch ((insn >> 4) & 0xf) {
6629 case 1: /* clrex */
6630 ARCH(6K);
6631 gen_clrex(s);
6632 return;
6633 case 4: /* dsb */
6634 case 5: /* dmb */
6635 case 6: /* isb */
6636 ARCH(7);
6637 /* We don't emulate caches so these are a no-op. */
6638 return;
6639 default:
6640 goto illegal_op;
6642 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6643 /* srs */
6644 int32_t offset;
6645 if (IS_USER(s))
6646 goto illegal_op;
6647 ARCH(6);
6648 op1 = (insn & 0x1f);
6649 addr = tcg_temp_new_i32();
6650 tmp = tcg_const_i32(op1);
6651 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6652 tcg_temp_free_i32(tmp);
6653 i = (insn >> 23) & 3;
6654 switch (i) {
6655 case 0: offset = -4; break; /* DA */
6656 case 1: offset = 0; break; /* IA */
6657 case 2: offset = -8; break; /* DB */
6658 case 3: offset = 4; break; /* IB */
6659 default: abort();
6661 if (offset)
6662 tcg_gen_addi_i32(addr, addr, offset);
6663 tmp = load_reg(s, 14);
6664 gen_st32(tmp, addr, 0);
6665 tmp = load_cpu_field(spsr);
6666 tcg_gen_addi_i32(addr, addr, 4);
6667 gen_st32(tmp, addr, 0);
6668 if (insn & (1 << 21)) {
6669 /* Base writeback. */
6670 switch (i) {
6671 case 0: offset = -8; break;
6672 case 1: offset = 4; break;
6673 case 2: offset = -4; break;
6674 case 3: offset = 0; break;
6675 default: abort();
6677 if (offset)
6678 tcg_gen_addi_i32(addr, addr, offset);
6679 tmp = tcg_const_i32(op1);
6680 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6681 tcg_temp_free_i32(tmp);
6682 tcg_temp_free_i32(addr);
6683 } else {
6684 tcg_temp_free_i32(addr);
6686 return;
6687 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6688 /* rfe */
6689 int32_t offset;
6690 if (IS_USER(s))
6691 goto illegal_op;
6692 ARCH(6);
6693 rn = (insn >> 16) & 0xf;
6694 addr = load_reg(s, rn);
6695 i = (insn >> 23) & 3;
6696 switch (i) {
6697 case 0: offset = -4; break; /* DA */
6698 case 1: offset = 0; break; /* IA */
6699 case 2: offset = -8; break; /* DB */
6700 case 3: offset = 4; break; /* IB */
6701 default: abort();
6703 if (offset)
6704 tcg_gen_addi_i32(addr, addr, offset);
6705 /* Load PC into tmp and CPSR into tmp2. */
6706 tmp = gen_ld32(addr, 0);
6707 tcg_gen_addi_i32(addr, addr, 4);
6708 tmp2 = gen_ld32(addr, 0);
6709 if (insn & (1 << 21)) {
6710 /* Base writeback. */
6711 switch (i) {
6712 case 0: offset = -8; break;
6713 case 1: offset = 4; break;
6714 case 2: offset = -4; break;
6715 case 3: offset = 0; break;
6716 default: abort();
6718 if (offset)
6719 tcg_gen_addi_i32(addr, addr, offset);
6720 store_reg(s, rn, addr);
6721 } else {
6722 tcg_temp_free_i32(addr);
6724 gen_rfe(s, tmp, tmp2);
6725 return;
6726 } else if ((insn & 0x0e000000) == 0x0a000000) {
6727 /* branch link and change to thumb (blx <offset>) */
6728 int32_t offset;
6730 val = (uint32_t)s->pc;
6731 tmp = tcg_temp_new_i32();
6732 tcg_gen_movi_i32(tmp, val);
6733 store_reg(s, 14, tmp);
6734 /* Sign-extend the 24-bit offset */
6735 offset = (((int32_t)insn) << 8) >> 8;
6736 /* offset * 4 + bit24 * 2 + (thumb bit) */
6737 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6738 /* pipeline offset */
6739 val += 4;
6740 /* protected by ARCH(5); above, near the start of uncond block */
6741 gen_bx_im(s, val);
6742 return;
6743 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6744 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6745 /* iWMMXt register transfer. */
6746 if (env->cp15.c15_cpar & (1 << 1))
6747 if (!disas_iwmmxt_insn(env, s, insn))
6748 return;
6750 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6751 /* Coprocessor double register transfer. */
6752 ARCH(5TE);
6753 } else if ((insn & 0x0f000010) == 0x0e000010) {
6754 /* Additional coprocessor register transfer. */
6755 } else if ((insn & 0x0ff10020) == 0x01000000) {
6756 uint32_t mask;
6757 uint32_t val;
6758 /* cps (privileged) */
6759 if (IS_USER(s))
6760 return;
6761 mask = val = 0;
6762 if (insn & (1 << 19)) {
6763 if (insn & (1 << 8))
6764 mask |= CPSR_A;
6765 if (insn & (1 << 7))
6766 mask |= CPSR_I;
6767 if (insn & (1 << 6))
6768 mask |= CPSR_F;
6769 if (insn & (1 << 18))
6770 val |= mask;
6772 if (insn & (1 << 17)) {
6773 mask |= CPSR_M;
6774 val |= (insn & 0x1f);
6776 if (mask) {
6777 gen_set_psr_im(s, mask, 0, val);
6779 return;
6781 goto illegal_op;
6783 if (cond != 0xe) {
6784 /* if not always execute, we generate a conditional jump to
6785 next instruction */
6786 s->condlabel = gen_new_label();
6787 gen_test_cc(cond ^ 1, s->condlabel);
6788 s->condjmp = 1;
6790 if ((insn & 0x0f900000) == 0x03000000) {
6791 if ((insn & (1 << 21)) == 0) {
6792 ARCH(6T2);
6793 rd = (insn >> 12) & 0xf;
6794 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6795 if ((insn & (1 << 22)) == 0) {
6796 /* MOVW */
6797 tmp = tcg_temp_new_i32();
6798 tcg_gen_movi_i32(tmp, val);
6799 } else {
6800 /* MOVT */
6801 tmp = load_reg(s, rd);
6802 tcg_gen_ext16u_i32(tmp, tmp);
6803 tcg_gen_ori_i32(tmp, tmp, val << 16);
6805 store_reg(s, rd, tmp);
6806 } else {
6807 if (((insn >> 12) & 0xf) != 0xf)
6808 goto illegal_op;
6809 if (((insn >> 16) & 0xf) == 0) {
6810 gen_nop_hint(s, insn & 0xff);
6811 } else {
6812 /* CPSR = immediate */
6813 val = insn & 0xff;
6814 shift = ((insn >> 8) & 0xf) * 2;
6815 if (shift)
6816 val = (val >> shift) | (val << (32 - shift));
6817 i = ((insn & (1 << 22)) != 0);
6818 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6819 goto illegal_op;
6822 } else if ((insn & 0x0f900000) == 0x01000000
6823 && (insn & 0x00000090) != 0x00000090) {
6824 /* miscellaneous instructions */
6825 op1 = (insn >> 21) & 3;
6826 sh = (insn >> 4) & 0xf;
6827 rm = insn & 0xf;
6828 switch (sh) {
6829 case 0x0: /* move program status register */
6830 if (op1 & 1) {
6831 /* PSR = reg */
6832 tmp = load_reg(s, rm);
6833 i = ((op1 & 2) != 0);
6834 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6835 goto illegal_op;
6836 } else {
6837 /* reg = PSR */
6838 rd = (insn >> 12) & 0xf;
6839 if (op1 & 2) {
6840 if (IS_USER(s))
6841 goto illegal_op;
6842 tmp = load_cpu_field(spsr);
6843 } else {
6844 tmp = tcg_temp_new_i32();
6845 gen_helper_cpsr_read(tmp, cpu_env);
6847 store_reg(s, rd, tmp);
6849 break;
6850 case 0x1:
6851 if (op1 == 1) {
6852 /* branch/exchange thumb (bx). */
6853 ARCH(4T);
6854 tmp = load_reg(s, rm);
6855 gen_bx(s, tmp);
6856 } else if (op1 == 3) {
6857 /* clz */
6858 ARCH(5);
6859 rd = (insn >> 12) & 0xf;
6860 tmp = load_reg(s, rm);
6861 gen_helper_clz(tmp, tmp);
6862 store_reg(s, rd, tmp);
6863 } else {
6864 goto illegal_op;
6866 break;
6867 case 0x2:
6868 if (op1 == 1) {
6869 ARCH(5J); /* bxj */
6870 /* Trivial implementation equivalent to bx. */
6871 tmp = load_reg(s, rm);
6872 gen_bx(s, tmp);
6873 } else {
6874 goto illegal_op;
6876 break;
6877 case 0x3:
6878 if (op1 != 1)
6879 goto illegal_op;
6881 ARCH(5);
6882 /* branch link/exchange thumb (blx) */
6883 tmp = load_reg(s, rm);
6884 tmp2 = tcg_temp_new_i32();
6885 tcg_gen_movi_i32(tmp2, s->pc);
6886 store_reg(s, 14, tmp2);
6887 gen_bx(s, tmp);
6888 break;
6889 case 0x5: /* saturating add/subtract */
6890 ARCH(5TE);
6891 rd = (insn >> 12) & 0xf;
6892 rn = (insn >> 16) & 0xf;
6893 tmp = load_reg(s, rm);
6894 tmp2 = load_reg(s, rn);
6895 if (op1 & 2)
6896 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
6897 if (op1 & 1)
6898 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
6899 else
6900 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
6901 tcg_temp_free_i32(tmp2);
6902 store_reg(s, rd, tmp);
6903 break;
6904 case 7:
6905 /* SMC instruction (op1 == 3)
6906 and undefined instructions (op1 == 0 || op1 == 2)
6907 will trap */
6908 if (op1 != 1) {
6909 goto illegal_op;
6911 /* bkpt */
6912 ARCH(5);
6913 gen_exception_insn(s, 4, EXCP_BKPT);
6914 break;
6915 case 0x8: /* signed multiply */
6916 case 0xa:
6917 case 0xc:
6918 case 0xe:
6919 ARCH(5TE);
6920 rs = (insn >> 8) & 0xf;
6921 rn = (insn >> 12) & 0xf;
6922 rd = (insn >> 16) & 0xf;
6923 if (op1 == 1) {
6924 /* (32 * 16) >> 16 */
6925 tmp = load_reg(s, rm);
6926 tmp2 = load_reg(s, rs);
6927 if (sh & 4)
6928 tcg_gen_sari_i32(tmp2, tmp2, 16);
6929 else
6930 gen_sxth(tmp2);
6931 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6932 tcg_gen_shri_i64(tmp64, tmp64, 16);
6933 tmp = tcg_temp_new_i32();
6934 tcg_gen_trunc_i64_i32(tmp, tmp64);
6935 tcg_temp_free_i64(tmp64);
6936 if ((sh & 2) == 0) {
6937 tmp2 = load_reg(s, rn);
6938 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
6939 tcg_temp_free_i32(tmp2);
6941 store_reg(s, rd, tmp);
6942 } else {
6943 /* 16 * 16 */
6944 tmp = load_reg(s, rm);
6945 tmp2 = load_reg(s, rs);
6946 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6947 tcg_temp_free_i32(tmp2);
6948 if (op1 == 2) {
6949 tmp64 = tcg_temp_new_i64();
6950 tcg_gen_ext_i32_i64(tmp64, tmp);
6951 tcg_temp_free_i32(tmp);
6952 gen_addq(s, tmp64, rn, rd);
6953 gen_storeq_reg(s, rn, rd, tmp64);
6954 tcg_temp_free_i64(tmp64);
6955 } else {
6956 if (op1 == 0) {
6957 tmp2 = load_reg(s, rn);
6958 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
6959 tcg_temp_free_i32(tmp2);
6961 store_reg(s, rd, tmp);
6964 break;
6965 default:
6966 goto illegal_op;
6968 } else if (((insn & 0x0e000000) == 0 &&
6969 (insn & 0x00000090) != 0x90) ||
6970 ((insn & 0x0e000000) == (1 << 25))) {
6971 int set_cc, logic_cc, shiftop;
6973 op1 = (insn >> 21) & 0xf;
6974 set_cc = (insn >> 20) & 1;
6975 logic_cc = table_logic_cc[op1] & set_cc;
6977 /* data processing instruction */
6978 if (insn & (1 << 25)) {
6979 /* immediate operand */
6980 val = insn & 0xff;
6981 shift = ((insn >> 8) & 0xf) * 2;
6982 if (shift) {
6983 val = (val >> shift) | (val << (32 - shift));
6985 tmp2 = tcg_temp_new_i32();
6986 tcg_gen_movi_i32(tmp2, val);
6987 if (logic_cc && shift) {
6988 gen_set_CF_bit31(tmp2);
6990 } else {
6991 /* register */
6992 rm = (insn) & 0xf;
6993 tmp2 = load_reg(s, rm);
6994 shiftop = (insn >> 5) & 3;
6995 if (!(insn & (1 << 4))) {
6996 shift = (insn >> 7) & 0x1f;
6997 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6998 } else {
6999 rs = (insn >> 8) & 0xf;
7000 tmp = load_reg(s, rs);
7001 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7004 if (op1 != 0x0f && op1 != 0x0d) {
7005 rn = (insn >> 16) & 0xf;
7006 tmp = load_reg(s, rn);
7007 } else {
7008 TCGV_UNUSED(tmp);
7010 rd = (insn >> 12) & 0xf;
7011 switch(op1) {
7012 case 0x00:
7013 tcg_gen_and_i32(tmp, tmp, tmp2);
7014 if (logic_cc) {
7015 gen_logic_CC(tmp);
7017 store_reg_bx(env, s, rd, tmp);
7018 break;
7019 case 0x01:
7020 tcg_gen_xor_i32(tmp, tmp, tmp2);
7021 if (logic_cc) {
7022 gen_logic_CC(tmp);
7024 store_reg_bx(env, s, rd, tmp);
7025 break;
7026 case 0x02:
7027 if (set_cc && rd == 15) {
7028 /* SUBS r15, ... is used for exception return. */
7029 if (IS_USER(s)) {
7030 goto illegal_op;
7032 gen_sub_CC(tmp, tmp, tmp2);
7033 gen_exception_return(s, tmp);
7034 } else {
7035 if (set_cc) {
7036 gen_sub_CC(tmp, tmp, tmp2);
7037 } else {
7038 tcg_gen_sub_i32(tmp, tmp, tmp2);
7040 store_reg_bx(env, s, rd, tmp);
7042 break;
7043 case 0x03:
7044 if (set_cc) {
7045 gen_sub_CC(tmp, tmp2, tmp);
7046 } else {
7047 tcg_gen_sub_i32(tmp, tmp2, tmp);
7049 store_reg_bx(env, s, rd, tmp);
7050 break;
7051 case 0x04:
7052 if (set_cc) {
7053 gen_add_CC(tmp, tmp, tmp2);
7054 } else {
7055 tcg_gen_add_i32(tmp, tmp, tmp2);
7057 store_reg_bx(env, s, rd, tmp);
7058 break;
7059 case 0x05:
7060 if (set_cc) {
7061 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
7062 } else {
7063 gen_add_carry(tmp, tmp, tmp2);
7065 store_reg_bx(env, s, rd, tmp);
7066 break;
7067 case 0x06:
7068 if (set_cc) {
7069 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
7070 } else {
7071 gen_sub_carry(tmp, tmp, tmp2);
7073 store_reg_bx(env, s, rd, tmp);
7074 break;
7075 case 0x07:
7076 if (set_cc) {
7077 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
7078 } else {
7079 gen_sub_carry(tmp, tmp2, tmp);
7081 store_reg_bx(env, s, rd, tmp);
7082 break;
7083 case 0x08:
7084 if (set_cc) {
7085 tcg_gen_and_i32(tmp, tmp, tmp2);
7086 gen_logic_CC(tmp);
7088 tcg_temp_free_i32(tmp);
7089 break;
7090 case 0x09:
7091 if (set_cc) {
7092 tcg_gen_xor_i32(tmp, tmp, tmp2);
7093 gen_logic_CC(tmp);
7095 tcg_temp_free_i32(tmp);
7096 break;
7097 case 0x0a:
7098 if (set_cc) {
7099 gen_sub_CC(tmp, tmp, tmp2);
7101 tcg_temp_free_i32(tmp);
7102 break;
7103 case 0x0b:
7104 if (set_cc) {
7105 gen_add_CC(tmp, tmp, tmp2);
7107 tcg_temp_free_i32(tmp);
7108 break;
7109 case 0x0c:
7110 tcg_gen_or_i32(tmp, tmp, tmp2);
7111 if (logic_cc) {
7112 gen_logic_CC(tmp);
7114 store_reg_bx(env, s, rd, tmp);
7115 break;
7116 case 0x0d:
7117 if (logic_cc && rd == 15) {
7118 /* MOVS r15, ... is used for exception return. */
7119 if (IS_USER(s)) {
7120 goto illegal_op;
7122 gen_exception_return(s, tmp2);
7123 } else {
7124 if (logic_cc) {
7125 gen_logic_CC(tmp2);
7127 store_reg_bx(env, s, rd, tmp2);
7129 break;
7130 case 0x0e:
7131 tcg_gen_andc_i32(tmp, tmp, tmp2);
7132 if (logic_cc) {
7133 gen_logic_CC(tmp);
7135 store_reg_bx(env, s, rd, tmp);
7136 break;
7137 default:
7138 case 0x0f:
7139 tcg_gen_not_i32(tmp2, tmp2);
7140 if (logic_cc) {
7141 gen_logic_CC(tmp2);
7143 store_reg_bx(env, s, rd, tmp2);
7144 break;
7146 if (op1 != 0x0f && op1 != 0x0d) {
7147 tcg_temp_free_i32(tmp2);
7149 } else {
7150 /* other instructions */
7151 op1 = (insn >> 24) & 0xf;
7152 switch(op1) {
7153 case 0x0:
7154 case 0x1:
7155 /* multiplies, extra load/stores */
7156 sh = (insn >> 5) & 3;
7157 if (sh == 0) {
7158 if (op1 == 0x0) {
7159 rd = (insn >> 16) & 0xf;
7160 rn = (insn >> 12) & 0xf;
7161 rs = (insn >> 8) & 0xf;
7162 rm = (insn) & 0xf;
7163 op1 = (insn >> 20) & 0xf;
7164 switch (op1) {
7165 case 0: case 1: case 2: case 3: case 6:
7166 /* 32 bit mul */
7167 tmp = load_reg(s, rs);
7168 tmp2 = load_reg(s, rm);
7169 tcg_gen_mul_i32(tmp, tmp, tmp2);
7170 tcg_temp_free_i32(tmp2);
7171 if (insn & (1 << 22)) {
7172 /* Subtract (mls) */
7173 ARCH(6T2);
7174 tmp2 = load_reg(s, rn);
7175 tcg_gen_sub_i32(tmp, tmp2, tmp);
7176 tcg_temp_free_i32(tmp2);
7177 } else if (insn & (1 << 21)) {
7178 /* Add */
7179 tmp2 = load_reg(s, rn);
7180 tcg_gen_add_i32(tmp, tmp, tmp2);
7181 tcg_temp_free_i32(tmp2);
7183 if (insn & (1 << 20))
7184 gen_logic_CC(tmp);
7185 store_reg(s, rd, tmp);
7186 break;
7187 case 4:
7188 /* 64 bit mul double accumulate (UMAAL) */
7189 ARCH(6);
7190 tmp = load_reg(s, rs);
7191 tmp2 = load_reg(s, rm);
7192 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7193 gen_addq_lo(s, tmp64, rn);
7194 gen_addq_lo(s, tmp64, rd);
7195 gen_storeq_reg(s, rn, rd, tmp64);
7196 tcg_temp_free_i64(tmp64);
7197 break;
7198 case 8: case 9: case 10: case 11:
7199 case 12: case 13: case 14: case 15:
7200 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7201 tmp = load_reg(s, rs);
7202 tmp2 = load_reg(s, rm);
7203 if (insn & (1 << 22)) {
7204 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7205 } else {
7206 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7208 if (insn & (1 << 21)) { /* mult accumulate */
7209 gen_addq(s, tmp64, rn, rd);
7211 if (insn & (1 << 20)) {
7212 gen_logicq_cc(tmp64);
7214 gen_storeq_reg(s, rn, rd, tmp64);
7215 tcg_temp_free_i64(tmp64);
7216 break;
7217 default:
7218 goto illegal_op;
7220 } else {
7221 rn = (insn >> 16) & 0xf;
7222 rd = (insn >> 12) & 0xf;
7223 if (insn & (1 << 23)) {
7224 /* load/store exclusive */
7225 op1 = (insn >> 21) & 0x3;
7226 if (op1)
7227 ARCH(6K);
7228 else
7229 ARCH(6);
7230 addr = tcg_temp_local_new_i32();
7231 load_reg_var(s, addr, rn);
7232 if (insn & (1 << 20)) {
7233 switch (op1) {
7234 case 0: /* ldrex */
7235 gen_load_exclusive(s, rd, 15, addr, 2);
7236 break;
7237 case 1: /* ldrexd */
7238 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7239 break;
7240 case 2: /* ldrexb */
7241 gen_load_exclusive(s, rd, 15, addr, 0);
7242 break;
7243 case 3: /* ldrexh */
7244 gen_load_exclusive(s, rd, 15, addr, 1);
7245 break;
7246 default:
7247 abort();
7249 } else {
7250 rm = insn & 0xf;
7251 switch (op1) {
7252 case 0: /* strex */
7253 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7254 break;
7255 case 1: /* strexd */
7256 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7257 break;
7258 case 2: /* strexb */
7259 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7260 break;
7261 case 3: /* strexh */
7262 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7263 break;
7264 default:
7265 abort();
7268 tcg_temp_free(addr);
7269 } else {
7270 /* SWP instruction */
7271 rm = (insn) & 0xf;
7273 /* ??? This is not really atomic. However we know
7274 we never have multiple CPUs running in parallel,
7275 so it is good enough. */
7276 addr = load_reg(s, rn);
7277 tmp = load_reg(s, rm);
7278 if (insn & (1 << 22)) {
7279 tmp2 = gen_ld8u(addr, IS_USER(s));
7280 gen_st8(tmp, addr, IS_USER(s));
7281 } else {
7282 tmp2 = gen_ld32(addr, IS_USER(s));
7283 gen_st32(tmp, addr, IS_USER(s));
7285 tcg_temp_free_i32(addr);
7286 store_reg(s, rd, tmp2);
7289 } else {
7290 int address_offset;
7291 int load;
7292 /* Misc load/store */
7293 rn = (insn >> 16) & 0xf;
7294 rd = (insn >> 12) & 0xf;
7295 addr = load_reg(s, rn);
7296 if (insn & (1 << 24))
7297 gen_add_datah_offset(s, insn, 0, addr);
7298 address_offset = 0;
7299 if (insn & (1 << 20)) {
7300 /* load */
7301 switch(sh) {
7302 case 1:
7303 tmp = gen_ld16u(addr, IS_USER(s));
7304 break;
7305 case 2:
7306 tmp = gen_ld8s(addr, IS_USER(s));
7307 break;
7308 default:
7309 case 3:
7310 tmp = gen_ld16s(addr, IS_USER(s));
7311 break;
7313 load = 1;
7314 } else if (sh & 2) {
7315 ARCH(5TE);
7316 /* doubleword */
7317 if (sh & 1) {
7318 /* store */
7319 tmp = load_reg(s, rd);
7320 gen_st32(tmp, addr, IS_USER(s));
7321 tcg_gen_addi_i32(addr, addr, 4);
7322 tmp = load_reg(s, rd + 1);
7323 gen_st32(tmp, addr, IS_USER(s));
7324 load = 0;
7325 } else {
7326 /* load */
7327 tmp = gen_ld32(addr, IS_USER(s));
7328 store_reg(s, rd, tmp);
7329 tcg_gen_addi_i32(addr, addr, 4);
7330 tmp = gen_ld32(addr, IS_USER(s));
7331 rd++;
7332 load = 1;
7334 address_offset = -4;
7335 } else {
7336 /* store */
7337 tmp = load_reg(s, rd);
7338 gen_st16(tmp, addr, IS_USER(s));
7339 load = 0;
7341 /* Perform base writeback before the loaded value to
7342 ensure correct behavior with overlapping index registers.
7343 ldrd with base writeback is is undefined if the
7344 destination and index registers overlap. */
7345 if (!(insn & (1 << 24))) {
7346 gen_add_datah_offset(s, insn, address_offset, addr);
7347 store_reg(s, rn, addr);
7348 } else if (insn & (1 << 21)) {
7349 if (address_offset)
7350 tcg_gen_addi_i32(addr, addr, address_offset);
7351 store_reg(s, rn, addr);
7352 } else {
7353 tcg_temp_free_i32(addr);
7355 if (load) {
7356 /* Complete the load. */
7357 store_reg(s, rd, tmp);
7360 break;
7361 case 0x4:
7362 case 0x5:
7363 goto do_ldst;
7364 case 0x6:
7365 case 0x7:
7366 if (insn & (1 << 4)) {
7367 ARCH(6);
7368 /* Armv6 Media instructions. */
7369 rm = insn & 0xf;
7370 rn = (insn >> 16) & 0xf;
7371 rd = (insn >> 12) & 0xf;
7372 rs = (insn >> 8) & 0xf;
7373 switch ((insn >> 23) & 3) {
7374 case 0: /* Parallel add/subtract. */
7375 op1 = (insn >> 20) & 7;
7376 tmp = load_reg(s, rn);
7377 tmp2 = load_reg(s, rm);
7378 sh = (insn >> 5) & 7;
7379 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7380 goto illegal_op;
7381 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7382 tcg_temp_free_i32(tmp2);
7383 store_reg(s, rd, tmp);
7384 break;
7385 case 1:
7386 if ((insn & 0x00700020) == 0) {
7387 /* Halfword pack. */
7388 tmp = load_reg(s, rn);
7389 tmp2 = load_reg(s, rm);
7390 shift = (insn >> 7) & 0x1f;
7391 if (insn & (1 << 6)) {
7392 /* pkhtb */
7393 if (shift == 0)
7394 shift = 31;
7395 tcg_gen_sari_i32(tmp2, tmp2, shift);
7396 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7397 tcg_gen_ext16u_i32(tmp2, tmp2);
7398 } else {
7399 /* pkhbt */
7400 if (shift)
7401 tcg_gen_shli_i32(tmp2, tmp2, shift);
7402 tcg_gen_ext16u_i32(tmp, tmp);
7403 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7405 tcg_gen_or_i32(tmp, tmp, tmp2);
7406 tcg_temp_free_i32(tmp2);
7407 store_reg(s, rd, tmp);
7408 } else if ((insn & 0x00200020) == 0x00200000) {
7409 /* [us]sat */
7410 tmp = load_reg(s, rm);
7411 shift = (insn >> 7) & 0x1f;
7412 if (insn & (1 << 6)) {
7413 if (shift == 0)
7414 shift = 31;
7415 tcg_gen_sari_i32(tmp, tmp, shift);
7416 } else {
7417 tcg_gen_shli_i32(tmp, tmp, shift);
7419 sh = (insn >> 16) & 0x1f;
7420 tmp2 = tcg_const_i32(sh);
7421 if (insn & (1 << 22))
7422 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
7423 else
7424 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
7425 tcg_temp_free_i32(tmp2);
7426 store_reg(s, rd, tmp);
7427 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7428 /* [us]sat16 */
7429 tmp = load_reg(s, rm);
7430 sh = (insn >> 16) & 0x1f;
7431 tmp2 = tcg_const_i32(sh);
7432 if (insn & (1 << 22))
7433 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
7434 else
7435 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
7436 tcg_temp_free_i32(tmp2);
7437 store_reg(s, rd, tmp);
7438 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7439 /* Select bytes. */
7440 tmp = load_reg(s, rn);
7441 tmp2 = load_reg(s, rm);
7442 tmp3 = tcg_temp_new_i32();
7443 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7444 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7445 tcg_temp_free_i32(tmp3);
7446 tcg_temp_free_i32(tmp2);
7447 store_reg(s, rd, tmp);
7448 } else if ((insn & 0x000003e0) == 0x00000060) {
7449 tmp = load_reg(s, rm);
7450 shift = (insn >> 10) & 3;
7451 /* ??? In many cases it's not necessary to do a
7452 rotate, a shift is sufficient. */
7453 if (shift != 0)
7454 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7455 op1 = (insn >> 20) & 7;
7456 switch (op1) {
7457 case 0: gen_sxtb16(tmp); break;
7458 case 2: gen_sxtb(tmp); break;
7459 case 3: gen_sxth(tmp); break;
7460 case 4: gen_uxtb16(tmp); break;
7461 case 6: gen_uxtb(tmp); break;
7462 case 7: gen_uxth(tmp); break;
7463 default: goto illegal_op;
7465 if (rn != 15) {
7466 tmp2 = load_reg(s, rn);
7467 if ((op1 & 3) == 0) {
7468 gen_add16(tmp, tmp2);
7469 } else {
7470 tcg_gen_add_i32(tmp, tmp, tmp2);
7471 tcg_temp_free_i32(tmp2);
7474 store_reg(s, rd, tmp);
7475 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7476 /* rev */
7477 tmp = load_reg(s, rm);
7478 if (insn & (1 << 22)) {
7479 if (insn & (1 << 7)) {
7480 gen_revsh(tmp);
7481 } else {
7482 ARCH(6T2);
7483 gen_helper_rbit(tmp, tmp);
7485 } else {
7486 if (insn & (1 << 7))
7487 gen_rev16(tmp);
7488 else
7489 tcg_gen_bswap32_i32(tmp, tmp);
7491 store_reg(s, rd, tmp);
7492 } else {
7493 goto illegal_op;
7495 break;
7496 case 2: /* Multiplies (Type 3). */
7497 switch ((insn >> 20) & 0x7) {
7498 case 5:
7499 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7500 /* op2 not 00x or 11x : UNDEF */
7501 goto illegal_op;
7503 /* Signed multiply most significant [accumulate].
7504 (SMMUL, SMMLA, SMMLS) */
7505 tmp = load_reg(s, rm);
7506 tmp2 = load_reg(s, rs);
7507 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7509 if (rd != 15) {
7510 tmp = load_reg(s, rd);
7511 if (insn & (1 << 6)) {
7512 tmp64 = gen_subq_msw(tmp64, tmp);
7513 } else {
7514 tmp64 = gen_addq_msw(tmp64, tmp);
7517 if (insn & (1 << 5)) {
7518 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7520 tcg_gen_shri_i64(tmp64, tmp64, 32);
7521 tmp = tcg_temp_new_i32();
7522 tcg_gen_trunc_i64_i32(tmp, tmp64);
7523 tcg_temp_free_i64(tmp64);
7524 store_reg(s, rn, tmp);
7525 break;
7526 case 0:
7527 case 4:
7528 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7529 if (insn & (1 << 7)) {
7530 goto illegal_op;
7532 tmp = load_reg(s, rm);
7533 tmp2 = load_reg(s, rs);
7534 if (insn & (1 << 5))
7535 gen_swap_half(tmp2);
7536 gen_smul_dual(tmp, tmp2);
7537 if (insn & (1 << 6)) {
7538 /* This subtraction cannot overflow. */
7539 tcg_gen_sub_i32(tmp, tmp, tmp2);
7540 } else {
7541 /* This addition cannot overflow 32 bits;
7542 * however it may overflow considered as a signed
7543 * operation, in which case we must set the Q flag.
7545 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7547 tcg_temp_free_i32(tmp2);
7548 if (insn & (1 << 22)) {
7549 /* smlald, smlsld */
7550 tmp64 = tcg_temp_new_i64();
7551 tcg_gen_ext_i32_i64(tmp64, tmp);
7552 tcg_temp_free_i32(tmp);
7553 gen_addq(s, tmp64, rd, rn);
7554 gen_storeq_reg(s, rd, rn, tmp64);
7555 tcg_temp_free_i64(tmp64);
7556 } else {
7557 /* smuad, smusd, smlad, smlsd */
7558 if (rd != 15)
7560 tmp2 = load_reg(s, rd);
7561 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7562 tcg_temp_free_i32(tmp2);
7564 store_reg(s, rn, tmp);
7566 break;
7567 case 1:
7568 case 3:
7569 /* SDIV, UDIV */
7570 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7571 goto illegal_op;
7573 if (((insn >> 5) & 7) || (rd != 15)) {
7574 goto illegal_op;
7576 tmp = load_reg(s, rm);
7577 tmp2 = load_reg(s, rs);
7578 if (insn & (1 << 21)) {
7579 gen_helper_udiv(tmp, tmp, tmp2);
7580 } else {
7581 gen_helper_sdiv(tmp, tmp, tmp2);
7583 tcg_temp_free_i32(tmp2);
7584 store_reg(s, rn, tmp);
7585 break;
7586 default:
7587 goto illegal_op;
7589 break;
7590 case 3:
7591 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7592 switch (op1) {
7593 case 0: /* Unsigned sum of absolute differences. */
7594 ARCH(6);
7595 tmp = load_reg(s, rm);
7596 tmp2 = load_reg(s, rs);
7597 gen_helper_usad8(tmp, tmp, tmp2);
7598 tcg_temp_free_i32(tmp2);
7599 if (rd != 15) {
7600 tmp2 = load_reg(s, rd);
7601 tcg_gen_add_i32(tmp, tmp, tmp2);
7602 tcg_temp_free_i32(tmp2);
7604 store_reg(s, rn, tmp);
7605 break;
7606 case 0x20: case 0x24: case 0x28: case 0x2c:
7607 /* Bitfield insert/clear. */
7608 ARCH(6T2);
7609 shift = (insn >> 7) & 0x1f;
7610 i = (insn >> 16) & 0x1f;
7611 i = i + 1 - shift;
7612 if (rm == 15) {
7613 tmp = tcg_temp_new_i32();
7614 tcg_gen_movi_i32(tmp, 0);
7615 } else {
7616 tmp = load_reg(s, rm);
7618 if (i != 32) {
7619 tmp2 = load_reg(s, rd);
7620 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7621 tcg_temp_free_i32(tmp2);
7623 store_reg(s, rd, tmp);
7624 break;
7625 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7626 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7627 ARCH(6T2);
7628 tmp = load_reg(s, rm);
7629 shift = (insn >> 7) & 0x1f;
7630 i = ((insn >> 16) & 0x1f) + 1;
7631 if (shift + i > 32)
7632 goto illegal_op;
7633 if (i < 32) {
7634 if (op1 & 0x20) {
7635 gen_ubfx(tmp, shift, (1u << i) - 1);
7636 } else {
7637 gen_sbfx(tmp, shift, i);
7640 store_reg(s, rd, tmp);
7641 break;
7642 default:
7643 goto illegal_op;
7645 break;
7647 break;
7649 do_ldst:
7650 /* Check for undefined extension instructions
7651 * per the ARM Bible IE:
7652 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7654 sh = (0xf << 20) | (0xf << 4);
7655 if (op1 == 0x7 && ((insn & sh) == sh))
7657 goto illegal_op;
7659 /* load/store byte/word */
7660 rn = (insn >> 16) & 0xf;
7661 rd = (insn >> 12) & 0xf;
7662 tmp2 = load_reg(s, rn);
7663 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7664 if (insn & (1 << 24))
7665 gen_add_data_offset(s, insn, tmp2);
7666 if (insn & (1 << 20)) {
7667 /* load */
7668 if (insn & (1 << 22)) {
7669 tmp = gen_ld8u(tmp2, i);
7670 } else {
7671 tmp = gen_ld32(tmp2, i);
7673 } else {
7674 /* store */
7675 tmp = load_reg(s, rd);
7676 if (insn & (1 << 22))
7677 gen_st8(tmp, tmp2, i);
7678 else
7679 gen_st32(tmp, tmp2, i);
7681 if (!(insn & (1 << 24))) {
7682 gen_add_data_offset(s, insn, tmp2);
7683 store_reg(s, rn, tmp2);
7684 } else if (insn & (1 << 21)) {
7685 store_reg(s, rn, tmp2);
7686 } else {
7687 tcg_temp_free_i32(tmp2);
7689 if (insn & (1 << 20)) {
7690 /* Complete the load. */
7691 store_reg_from_load(env, s, rd, tmp);
7693 break;
7694 case 0x08:
7695 case 0x09:
7697 int j, n, user, loaded_base;
7698 TCGv loaded_var;
7699 /* load/store multiple words */
7700 /* XXX: store correct base if write back */
7701 user = 0;
7702 if (insn & (1 << 22)) {
7703 if (IS_USER(s))
7704 goto illegal_op; /* only usable in supervisor mode */
7706 if ((insn & (1 << 15)) == 0)
7707 user = 1;
7709 rn = (insn >> 16) & 0xf;
7710 addr = load_reg(s, rn);
7712 /* compute total size */
7713 loaded_base = 0;
7714 TCGV_UNUSED(loaded_var);
7715 n = 0;
7716 for(i=0;i<16;i++) {
7717 if (insn & (1 << i))
7718 n++;
7720 /* XXX: test invalid n == 0 case ? */
7721 if (insn & (1 << 23)) {
7722 if (insn & (1 << 24)) {
7723 /* pre increment */
7724 tcg_gen_addi_i32(addr, addr, 4);
7725 } else {
7726 /* post increment */
7728 } else {
7729 if (insn & (1 << 24)) {
7730 /* pre decrement */
7731 tcg_gen_addi_i32(addr, addr, -(n * 4));
7732 } else {
7733 /* post decrement */
7734 if (n != 1)
7735 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7738 j = 0;
7739 for(i=0;i<16;i++) {
7740 if (insn & (1 << i)) {
7741 if (insn & (1 << 20)) {
7742 /* load */
7743 tmp = gen_ld32(addr, IS_USER(s));
7744 if (user) {
7745 tmp2 = tcg_const_i32(i);
7746 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7747 tcg_temp_free_i32(tmp2);
7748 tcg_temp_free_i32(tmp);
7749 } else if (i == rn) {
7750 loaded_var = tmp;
7751 loaded_base = 1;
7752 } else {
7753 store_reg_from_load(env, s, i, tmp);
7755 } else {
7756 /* store */
7757 if (i == 15) {
7758 /* special case: r15 = PC + 8 */
7759 val = (long)s->pc + 4;
7760 tmp = tcg_temp_new_i32();
7761 tcg_gen_movi_i32(tmp, val);
7762 } else if (user) {
7763 tmp = tcg_temp_new_i32();
7764 tmp2 = tcg_const_i32(i);
7765 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
7766 tcg_temp_free_i32(tmp2);
7767 } else {
7768 tmp = load_reg(s, i);
7770 gen_st32(tmp, addr, IS_USER(s));
7772 j++;
7773 /* no need to add after the last transfer */
7774 if (j != n)
7775 tcg_gen_addi_i32(addr, addr, 4);
7778 if (insn & (1 << 21)) {
7779 /* write back */
7780 if (insn & (1 << 23)) {
7781 if (insn & (1 << 24)) {
7782 /* pre increment */
7783 } else {
7784 /* post increment */
7785 tcg_gen_addi_i32(addr, addr, 4);
7787 } else {
7788 if (insn & (1 << 24)) {
7789 /* pre decrement */
7790 if (n != 1)
7791 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7792 } else {
7793 /* post decrement */
7794 tcg_gen_addi_i32(addr, addr, -(n * 4));
7797 store_reg(s, rn, addr);
7798 } else {
7799 tcg_temp_free_i32(addr);
7801 if (loaded_base) {
7802 store_reg(s, rn, loaded_var);
7804 if ((insn & (1 << 22)) && !user) {
7805 /* Restore CPSR from SPSR. */
7806 tmp = load_cpu_field(spsr);
7807 gen_set_cpsr(tmp, 0xffffffff);
7808 tcg_temp_free_i32(tmp);
7809 s->is_jmp = DISAS_UPDATE;
7812 break;
7813 case 0xa:
7814 case 0xb:
7816 int32_t offset;
7818 /* branch (and link) */
7819 val = (int32_t)s->pc;
7820 if (insn & (1 << 24)) {
7821 tmp = tcg_temp_new_i32();
7822 tcg_gen_movi_i32(tmp, val);
7823 store_reg(s, 14, tmp);
7825 offset = (((int32_t)insn << 8) >> 8);
7826 val += (offset << 2) + 4;
7827 gen_jmp(s, val);
7829 break;
7830 case 0xc:
7831 case 0xd:
7832 case 0xe:
7833 /* Coprocessor. */
7834 if (disas_coproc_insn(env, s, insn))
7835 goto illegal_op;
7836 break;
7837 case 0xf:
7838 /* swi */
7839 gen_set_pc_im(s->pc);
7840 s->is_jmp = DISAS_SWI;
7841 break;
7842 default:
7843 illegal_op:
7844 gen_exception_insn(s, 4, EXCP_UDEF);
7845 break;
7850 /* Return true if this is a Thumb-2 logical op. */
7851 static int
7852 thumb2_logic_op(int op)
7854 return (op < 8);
7857 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7858 then set condition code flags based on the result of the operation.
7859 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7860 to the high bit of T1.
7861 Returns zero if the opcode is valid. */
7863 static int
7864 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7866 int logic_cc;
7868 logic_cc = 0;
7869 switch (op) {
7870 case 0: /* and */
7871 tcg_gen_and_i32(t0, t0, t1);
7872 logic_cc = conds;
7873 break;
7874 case 1: /* bic */
7875 tcg_gen_andc_i32(t0, t0, t1);
7876 logic_cc = conds;
7877 break;
7878 case 2: /* orr */
7879 tcg_gen_or_i32(t0, t0, t1);
7880 logic_cc = conds;
7881 break;
7882 case 3: /* orn */
7883 tcg_gen_orc_i32(t0, t0, t1);
7884 logic_cc = conds;
7885 break;
7886 case 4: /* eor */
7887 tcg_gen_xor_i32(t0, t0, t1);
7888 logic_cc = conds;
7889 break;
7890 case 8: /* add */
7891 if (conds)
7892 gen_add_CC(t0, t0, t1);
7893 else
7894 tcg_gen_add_i32(t0, t0, t1);
7895 break;
7896 case 10: /* adc */
7897 if (conds)
7898 gen_helper_adc_cc(t0, cpu_env, t0, t1);
7899 else
7900 gen_adc(t0, t1);
7901 break;
7902 case 11: /* sbc */
7903 if (conds)
7904 gen_helper_sbc_cc(t0, cpu_env, t0, t1);
7905 else
7906 gen_sub_carry(t0, t0, t1);
7907 break;
7908 case 13: /* sub */
7909 if (conds)
7910 gen_sub_CC(t0, t0, t1);
7911 else
7912 tcg_gen_sub_i32(t0, t0, t1);
7913 break;
7914 case 14: /* rsb */
7915 if (conds)
7916 gen_sub_CC(t0, t1, t0);
7917 else
7918 tcg_gen_sub_i32(t0, t1, t0);
7919 break;
7920 default: /* 5, 6, 7, 9, 12, 15. */
7921 return 1;
7923 if (logic_cc) {
7924 gen_logic_CC(t0);
7925 if (shifter_out)
7926 gen_set_CF_bit31(t1);
7928 return 0;
7931 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7932 is not legal. */
7933 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
7935 uint32_t insn, imm, shift, offset;
7936 uint32_t rd, rn, rm, rs;
7937 TCGv tmp;
7938 TCGv tmp2;
7939 TCGv tmp3;
7940 TCGv addr;
7941 TCGv_i64 tmp64;
7942 int op;
7943 int shiftop;
7944 int conds;
7945 int logic_cc;
7947 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7948 || arm_feature (env, ARM_FEATURE_M))) {
7949 /* Thumb-1 cores may need to treat bl and blx as a pair of
7950 16-bit instructions to get correct prefetch abort behavior. */
7951 insn = insn_hw1;
7952 if ((insn & (1 << 12)) == 0) {
7953 ARCH(5);
7954 /* Second half of blx. */
7955 offset = ((insn & 0x7ff) << 1);
7956 tmp = load_reg(s, 14);
7957 tcg_gen_addi_i32(tmp, tmp, offset);
7958 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7960 tmp2 = tcg_temp_new_i32();
7961 tcg_gen_movi_i32(tmp2, s->pc | 1);
7962 store_reg(s, 14, tmp2);
7963 gen_bx(s, tmp);
7964 return 0;
7966 if (insn & (1 << 11)) {
7967 /* Second half of bl. */
7968 offset = ((insn & 0x7ff) << 1) | 1;
7969 tmp = load_reg(s, 14);
7970 tcg_gen_addi_i32(tmp, tmp, offset);
7972 tmp2 = tcg_temp_new_i32();
7973 tcg_gen_movi_i32(tmp2, s->pc | 1);
7974 store_reg(s, 14, tmp2);
7975 gen_bx(s, tmp);
7976 return 0;
7978 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7979 /* Instruction spans a page boundary. Implement it as two
7980 16-bit instructions in case the second half causes an
7981 prefetch abort. */
7982 offset = ((int32_t)insn << 21) >> 9;
7983 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7984 return 0;
7986 /* Fall through to 32-bit decode. */
7989 insn = arm_lduw_code(env, s->pc, s->bswap_code);
7990 s->pc += 2;
7991 insn |= (uint32_t)insn_hw1 << 16;
7993 if ((insn & 0xf800e800) != 0xf000e800) {
7994 ARCH(6T2);
7997 rn = (insn >> 16) & 0xf;
7998 rs = (insn >> 12) & 0xf;
7999 rd = (insn >> 8) & 0xf;
8000 rm = insn & 0xf;
8001 switch ((insn >> 25) & 0xf) {
8002 case 0: case 1: case 2: case 3:
8003 /* 16-bit instructions. Should never happen. */
8004 abort();
8005 case 4:
8006 if (insn & (1 << 22)) {
8007 /* Other load/store, table branch. */
8008 if (insn & 0x01200000) {
8009 /* Load/store doubleword. */
8010 if (rn == 15) {
8011 addr = tcg_temp_new_i32();
8012 tcg_gen_movi_i32(addr, s->pc & ~3);
8013 } else {
8014 addr = load_reg(s, rn);
8016 offset = (insn & 0xff) * 4;
8017 if ((insn & (1 << 23)) == 0)
8018 offset = -offset;
8019 if (insn & (1 << 24)) {
8020 tcg_gen_addi_i32(addr, addr, offset);
8021 offset = 0;
8023 if (insn & (1 << 20)) {
8024 /* ldrd */
8025 tmp = gen_ld32(addr, IS_USER(s));
8026 store_reg(s, rs, tmp);
8027 tcg_gen_addi_i32(addr, addr, 4);
8028 tmp = gen_ld32(addr, IS_USER(s));
8029 store_reg(s, rd, tmp);
8030 } else {
8031 /* strd */
8032 tmp = load_reg(s, rs);
8033 gen_st32(tmp, addr, IS_USER(s));
8034 tcg_gen_addi_i32(addr, addr, 4);
8035 tmp = load_reg(s, rd);
8036 gen_st32(tmp, addr, IS_USER(s));
8038 if (insn & (1 << 21)) {
8039 /* Base writeback. */
8040 if (rn == 15)
8041 goto illegal_op;
8042 tcg_gen_addi_i32(addr, addr, offset - 4);
8043 store_reg(s, rn, addr);
8044 } else {
8045 tcg_temp_free_i32(addr);
8047 } else if ((insn & (1 << 23)) == 0) {
8048 /* Load/store exclusive word. */
8049 addr = tcg_temp_local_new();
8050 load_reg_var(s, addr, rn);
8051 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8052 if (insn & (1 << 20)) {
8053 gen_load_exclusive(s, rs, 15, addr, 2);
8054 } else {
8055 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8057 tcg_temp_free(addr);
8058 } else if ((insn & (1 << 6)) == 0) {
8059 /* Table Branch. */
8060 if (rn == 15) {
8061 addr = tcg_temp_new_i32();
8062 tcg_gen_movi_i32(addr, s->pc);
8063 } else {
8064 addr = load_reg(s, rn);
8066 tmp = load_reg(s, rm);
8067 tcg_gen_add_i32(addr, addr, tmp);
8068 if (insn & (1 << 4)) {
8069 /* tbh */
8070 tcg_gen_add_i32(addr, addr, tmp);
8071 tcg_temp_free_i32(tmp);
8072 tmp = gen_ld16u(addr, IS_USER(s));
8073 } else { /* tbb */
8074 tcg_temp_free_i32(tmp);
8075 tmp = gen_ld8u(addr, IS_USER(s));
8077 tcg_temp_free_i32(addr);
8078 tcg_gen_shli_i32(tmp, tmp, 1);
8079 tcg_gen_addi_i32(tmp, tmp, s->pc);
8080 store_reg(s, 15, tmp);
8081 } else {
8082 /* Load/store exclusive byte/halfword/doubleword. */
8083 ARCH(7);
8084 op = (insn >> 4) & 0x3;
8085 if (op == 2) {
8086 goto illegal_op;
8088 addr = tcg_temp_local_new();
8089 load_reg_var(s, addr, rn);
8090 if (insn & (1 << 20)) {
8091 gen_load_exclusive(s, rs, rd, addr, op);
8092 } else {
8093 gen_store_exclusive(s, rm, rs, rd, addr, op);
8095 tcg_temp_free(addr);
8097 } else {
8098 /* Load/store multiple, RFE, SRS. */
8099 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8100 /* Not available in user mode. */
8101 if (IS_USER(s))
8102 goto illegal_op;
8103 if (insn & (1 << 20)) {
8104 /* rfe */
8105 addr = load_reg(s, rn);
8106 if ((insn & (1 << 24)) == 0)
8107 tcg_gen_addi_i32(addr, addr, -8);
8108 /* Load PC into tmp and CPSR into tmp2. */
8109 tmp = gen_ld32(addr, 0);
8110 tcg_gen_addi_i32(addr, addr, 4);
8111 tmp2 = gen_ld32(addr, 0);
8112 if (insn & (1 << 21)) {
8113 /* Base writeback. */
8114 if (insn & (1 << 24)) {
8115 tcg_gen_addi_i32(addr, addr, 4);
8116 } else {
8117 tcg_gen_addi_i32(addr, addr, -4);
8119 store_reg(s, rn, addr);
8120 } else {
8121 tcg_temp_free_i32(addr);
8123 gen_rfe(s, tmp, tmp2);
8124 } else {
8125 /* srs */
8126 op = (insn & 0x1f);
8127 addr = tcg_temp_new_i32();
8128 tmp = tcg_const_i32(op);
8129 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8130 tcg_temp_free_i32(tmp);
8131 if ((insn & (1 << 24)) == 0) {
8132 tcg_gen_addi_i32(addr, addr, -8);
8134 tmp = load_reg(s, 14);
8135 gen_st32(tmp, addr, 0);
8136 tcg_gen_addi_i32(addr, addr, 4);
8137 tmp = tcg_temp_new_i32();
8138 gen_helper_cpsr_read(tmp, cpu_env);
8139 gen_st32(tmp, addr, 0);
8140 if (insn & (1 << 21)) {
8141 if ((insn & (1 << 24)) == 0) {
8142 tcg_gen_addi_i32(addr, addr, -4);
8143 } else {
8144 tcg_gen_addi_i32(addr, addr, 4);
8146 tmp = tcg_const_i32(op);
8147 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8148 tcg_temp_free_i32(tmp);
8149 } else {
8150 tcg_temp_free_i32(addr);
8153 } else {
8154 int i, loaded_base = 0;
8155 TCGv loaded_var;
8156 /* Load/store multiple. */
8157 addr = load_reg(s, rn);
8158 offset = 0;
8159 for (i = 0; i < 16; i++) {
8160 if (insn & (1 << i))
8161 offset += 4;
8163 if (insn & (1 << 24)) {
8164 tcg_gen_addi_i32(addr, addr, -offset);
8167 TCGV_UNUSED(loaded_var);
8168 for (i = 0; i < 16; i++) {
8169 if ((insn & (1 << i)) == 0)
8170 continue;
8171 if (insn & (1 << 20)) {
8172 /* Load. */
8173 tmp = gen_ld32(addr, IS_USER(s));
8174 if (i == 15) {
8175 gen_bx(s, tmp);
8176 } else if (i == rn) {
8177 loaded_var = tmp;
8178 loaded_base = 1;
8179 } else {
8180 store_reg(s, i, tmp);
8182 } else {
8183 /* Store. */
8184 tmp = load_reg(s, i);
8185 gen_st32(tmp, addr, IS_USER(s));
8187 tcg_gen_addi_i32(addr, addr, 4);
8189 if (loaded_base) {
8190 store_reg(s, rn, loaded_var);
8192 if (insn & (1 << 21)) {
8193 /* Base register writeback. */
8194 if (insn & (1 << 24)) {
8195 tcg_gen_addi_i32(addr, addr, -offset);
8197 /* Fault if writeback register is in register list. */
8198 if (insn & (1 << rn))
8199 goto illegal_op;
8200 store_reg(s, rn, addr);
8201 } else {
8202 tcg_temp_free_i32(addr);
8206 break;
8207 case 5:
8209 op = (insn >> 21) & 0xf;
8210 if (op == 6) {
8211 /* Halfword pack. */
8212 tmp = load_reg(s, rn);
8213 tmp2 = load_reg(s, rm);
8214 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8215 if (insn & (1 << 5)) {
8216 /* pkhtb */
8217 if (shift == 0)
8218 shift = 31;
8219 tcg_gen_sari_i32(tmp2, tmp2, shift);
8220 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8221 tcg_gen_ext16u_i32(tmp2, tmp2);
8222 } else {
8223 /* pkhbt */
8224 if (shift)
8225 tcg_gen_shli_i32(tmp2, tmp2, shift);
8226 tcg_gen_ext16u_i32(tmp, tmp);
8227 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8229 tcg_gen_or_i32(tmp, tmp, tmp2);
8230 tcg_temp_free_i32(tmp2);
8231 store_reg(s, rd, tmp);
8232 } else {
8233 /* Data processing register constant shift. */
8234 if (rn == 15) {
8235 tmp = tcg_temp_new_i32();
8236 tcg_gen_movi_i32(tmp, 0);
8237 } else {
8238 tmp = load_reg(s, rn);
8240 tmp2 = load_reg(s, rm);
8242 shiftop = (insn >> 4) & 3;
8243 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8244 conds = (insn & (1 << 20)) != 0;
8245 logic_cc = (conds && thumb2_logic_op(op));
8246 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8247 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8248 goto illegal_op;
8249 tcg_temp_free_i32(tmp2);
8250 if (rd != 15) {
8251 store_reg(s, rd, tmp);
8252 } else {
8253 tcg_temp_free_i32(tmp);
8256 break;
8257 case 13: /* Misc data processing. */
8258 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8259 if (op < 4 && (insn & 0xf000) != 0xf000)
8260 goto illegal_op;
8261 switch (op) {
8262 case 0: /* Register controlled shift. */
8263 tmp = load_reg(s, rn);
8264 tmp2 = load_reg(s, rm);
8265 if ((insn & 0x70) != 0)
8266 goto illegal_op;
8267 op = (insn >> 21) & 3;
8268 logic_cc = (insn & (1 << 20)) != 0;
8269 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8270 if (logic_cc)
8271 gen_logic_CC(tmp);
8272 store_reg_bx(env, s, rd, tmp);
8273 break;
8274 case 1: /* Sign/zero extend. */
8275 tmp = load_reg(s, rm);
8276 shift = (insn >> 4) & 3;
8277 /* ??? In many cases it's not necessary to do a
8278 rotate, a shift is sufficient. */
8279 if (shift != 0)
8280 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8281 op = (insn >> 20) & 7;
8282 switch (op) {
8283 case 0: gen_sxth(tmp); break;
8284 case 1: gen_uxth(tmp); break;
8285 case 2: gen_sxtb16(tmp); break;
8286 case 3: gen_uxtb16(tmp); break;
8287 case 4: gen_sxtb(tmp); break;
8288 case 5: gen_uxtb(tmp); break;
8289 default: goto illegal_op;
8291 if (rn != 15) {
8292 tmp2 = load_reg(s, rn);
8293 if ((op >> 1) == 1) {
8294 gen_add16(tmp, tmp2);
8295 } else {
8296 tcg_gen_add_i32(tmp, tmp, tmp2);
8297 tcg_temp_free_i32(tmp2);
8300 store_reg(s, rd, tmp);
8301 break;
8302 case 2: /* SIMD add/subtract. */
8303 op = (insn >> 20) & 7;
8304 shift = (insn >> 4) & 7;
8305 if ((op & 3) == 3 || (shift & 3) == 3)
8306 goto illegal_op;
8307 tmp = load_reg(s, rn);
8308 tmp2 = load_reg(s, rm);
8309 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8310 tcg_temp_free_i32(tmp2);
8311 store_reg(s, rd, tmp);
8312 break;
8313 case 3: /* Other data processing. */
8314 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8315 if (op < 4) {
8316 /* Saturating add/subtract. */
8317 tmp = load_reg(s, rn);
8318 tmp2 = load_reg(s, rm);
8319 if (op & 1)
8320 gen_helper_double_saturate(tmp, cpu_env, tmp);
8321 if (op & 2)
8322 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
8323 else
8324 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8325 tcg_temp_free_i32(tmp2);
8326 } else {
8327 tmp = load_reg(s, rn);
8328 switch (op) {
8329 case 0x0a: /* rbit */
8330 gen_helper_rbit(tmp, tmp);
8331 break;
8332 case 0x08: /* rev */
8333 tcg_gen_bswap32_i32(tmp, tmp);
8334 break;
8335 case 0x09: /* rev16 */
8336 gen_rev16(tmp);
8337 break;
8338 case 0x0b: /* revsh */
8339 gen_revsh(tmp);
8340 break;
8341 case 0x10: /* sel */
8342 tmp2 = load_reg(s, rm);
8343 tmp3 = tcg_temp_new_i32();
8344 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8345 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8346 tcg_temp_free_i32(tmp3);
8347 tcg_temp_free_i32(tmp2);
8348 break;
8349 case 0x18: /* clz */
8350 gen_helper_clz(tmp, tmp);
8351 break;
8352 default:
8353 goto illegal_op;
8356 store_reg(s, rd, tmp);
8357 break;
8358 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8359 op = (insn >> 4) & 0xf;
8360 tmp = load_reg(s, rn);
8361 tmp2 = load_reg(s, rm);
8362 switch ((insn >> 20) & 7) {
8363 case 0: /* 32 x 32 -> 32 */
8364 tcg_gen_mul_i32(tmp, tmp, tmp2);
8365 tcg_temp_free_i32(tmp2);
8366 if (rs != 15) {
8367 tmp2 = load_reg(s, rs);
8368 if (op)
8369 tcg_gen_sub_i32(tmp, tmp2, tmp);
8370 else
8371 tcg_gen_add_i32(tmp, tmp, tmp2);
8372 tcg_temp_free_i32(tmp2);
8374 break;
8375 case 1: /* 16 x 16 -> 32 */
8376 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8377 tcg_temp_free_i32(tmp2);
8378 if (rs != 15) {
8379 tmp2 = load_reg(s, rs);
8380 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8381 tcg_temp_free_i32(tmp2);
8383 break;
8384 case 2: /* Dual multiply add. */
8385 case 4: /* Dual multiply subtract. */
8386 if (op)
8387 gen_swap_half(tmp2);
8388 gen_smul_dual(tmp, tmp2);
8389 if (insn & (1 << 22)) {
8390 /* This subtraction cannot overflow. */
8391 tcg_gen_sub_i32(tmp, tmp, tmp2);
8392 } else {
8393 /* This addition cannot overflow 32 bits;
8394 * however it may overflow considered as a signed
8395 * operation, in which case we must set the Q flag.
8397 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8399 tcg_temp_free_i32(tmp2);
8400 if (rs != 15)
8402 tmp2 = load_reg(s, rs);
8403 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8404 tcg_temp_free_i32(tmp2);
8406 break;
8407 case 3: /* 32 * 16 -> 32msb */
8408 if (op)
8409 tcg_gen_sari_i32(tmp2, tmp2, 16);
8410 else
8411 gen_sxth(tmp2);
8412 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8413 tcg_gen_shri_i64(tmp64, tmp64, 16);
8414 tmp = tcg_temp_new_i32();
8415 tcg_gen_trunc_i64_i32(tmp, tmp64);
8416 tcg_temp_free_i64(tmp64);
8417 if (rs != 15)
8419 tmp2 = load_reg(s, rs);
8420 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8421 tcg_temp_free_i32(tmp2);
8423 break;
8424 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8425 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8426 if (rs != 15) {
8427 tmp = load_reg(s, rs);
8428 if (insn & (1 << 20)) {
8429 tmp64 = gen_addq_msw(tmp64, tmp);
8430 } else {
8431 tmp64 = gen_subq_msw(tmp64, tmp);
8434 if (insn & (1 << 4)) {
8435 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8437 tcg_gen_shri_i64(tmp64, tmp64, 32);
8438 tmp = tcg_temp_new_i32();
8439 tcg_gen_trunc_i64_i32(tmp, tmp64);
8440 tcg_temp_free_i64(tmp64);
8441 break;
8442 case 7: /* Unsigned sum of absolute differences. */
8443 gen_helper_usad8(tmp, tmp, tmp2);
8444 tcg_temp_free_i32(tmp2);
8445 if (rs != 15) {
8446 tmp2 = load_reg(s, rs);
8447 tcg_gen_add_i32(tmp, tmp, tmp2);
8448 tcg_temp_free_i32(tmp2);
8450 break;
8452 store_reg(s, rd, tmp);
8453 break;
8454 case 6: case 7: /* 64-bit multiply, Divide. */
8455 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8456 tmp = load_reg(s, rn);
8457 tmp2 = load_reg(s, rm);
8458 if ((op & 0x50) == 0x10) {
8459 /* sdiv, udiv */
8460 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8461 goto illegal_op;
8463 if (op & 0x20)
8464 gen_helper_udiv(tmp, tmp, tmp2);
8465 else
8466 gen_helper_sdiv(tmp, tmp, tmp2);
8467 tcg_temp_free_i32(tmp2);
8468 store_reg(s, rd, tmp);
8469 } else if ((op & 0xe) == 0xc) {
8470 /* Dual multiply accumulate long. */
8471 if (op & 1)
8472 gen_swap_half(tmp2);
8473 gen_smul_dual(tmp, tmp2);
8474 if (op & 0x10) {
8475 tcg_gen_sub_i32(tmp, tmp, tmp2);
8476 } else {
8477 tcg_gen_add_i32(tmp, tmp, tmp2);
8479 tcg_temp_free_i32(tmp2);
8480 /* BUGFIX */
8481 tmp64 = tcg_temp_new_i64();
8482 tcg_gen_ext_i32_i64(tmp64, tmp);
8483 tcg_temp_free_i32(tmp);
8484 gen_addq(s, tmp64, rs, rd);
8485 gen_storeq_reg(s, rs, rd, tmp64);
8486 tcg_temp_free_i64(tmp64);
8487 } else {
8488 if (op & 0x20) {
8489 /* Unsigned 64-bit multiply */
8490 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8491 } else {
8492 if (op & 8) {
8493 /* smlalxy */
8494 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8495 tcg_temp_free_i32(tmp2);
8496 tmp64 = tcg_temp_new_i64();
8497 tcg_gen_ext_i32_i64(tmp64, tmp);
8498 tcg_temp_free_i32(tmp);
8499 } else {
8500 /* Signed 64-bit multiply */
8501 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8504 if (op & 4) {
8505 /* umaal */
8506 gen_addq_lo(s, tmp64, rs);
8507 gen_addq_lo(s, tmp64, rd);
8508 } else if (op & 0x40) {
8509 /* 64-bit accumulate. */
8510 gen_addq(s, tmp64, rs, rd);
8512 gen_storeq_reg(s, rs, rd, tmp64);
8513 tcg_temp_free_i64(tmp64);
8515 break;
8517 break;
8518 case 6: case 7: case 14: case 15:
8519 /* Coprocessor. */
8520 if (((insn >> 24) & 3) == 3) {
8521 /* Translate into the equivalent ARM encoding. */
8522 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8523 if (disas_neon_data_insn(env, s, insn))
8524 goto illegal_op;
8525 } else {
8526 if (insn & (1 << 28))
8527 goto illegal_op;
8528 if (disas_coproc_insn (env, s, insn))
8529 goto illegal_op;
8531 break;
8532 case 8: case 9: case 10: case 11:
8533 if (insn & (1 << 15)) {
8534 /* Branches, misc control. */
8535 if (insn & 0x5000) {
8536 /* Unconditional branch. */
8537 /* signextend(hw1[10:0]) -> offset[:12]. */
8538 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8539 /* hw1[10:0] -> offset[11:1]. */
8540 offset |= (insn & 0x7ff) << 1;
8541 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8542 offset[24:22] already have the same value because of the
8543 sign extension above. */
8544 offset ^= ((~insn) & (1 << 13)) << 10;
8545 offset ^= ((~insn) & (1 << 11)) << 11;
8547 if (insn & (1 << 14)) {
8548 /* Branch and link. */
8549 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8552 offset += s->pc;
8553 if (insn & (1 << 12)) {
8554 /* b/bl */
8555 gen_jmp(s, offset);
8556 } else {
8557 /* blx */
8558 offset &= ~(uint32_t)2;
8559 /* thumb2 bx, no need to check */
8560 gen_bx_im(s, offset);
8562 } else if (((insn >> 23) & 7) == 7) {
8563 /* Misc control */
8564 if (insn & (1 << 13))
8565 goto illegal_op;
8567 if (insn & (1 << 26)) {
8568 /* Secure monitor call (v6Z) */
8569 goto illegal_op; /* not implemented. */
8570 } else {
8571 op = (insn >> 20) & 7;
8572 switch (op) {
8573 case 0: /* msr cpsr. */
8574 if (IS_M(env)) {
8575 tmp = load_reg(s, rn);
8576 addr = tcg_const_i32(insn & 0xff);
8577 gen_helper_v7m_msr(cpu_env, addr, tmp);
8578 tcg_temp_free_i32(addr);
8579 tcg_temp_free_i32(tmp);
8580 gen_lookup_tb(s);
8581 break;
8583 /* fall through */
8584 case 1: /* msr spsr. */
8585 if (IS_M(env))
8586 goto illegal_op;
8587 tmp = load_reg(s, rn);
8588 if (gen_set_psr(s,
8589 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8590 op == 1, tmp))
8591 goto illegal_op;
8592 break;
8593 case 2: /* cps, nop-hint. */
8594 if (((insn >> 8) & 7) == 0) {
8595 gen_nop_hint(s, insn & 0xff);
8597 /* Implemented as NOP in user mode. */
8598 if (IS_USER(s))
8599 break;
8600 offset = 0;
8601 imm = 0;
8602 if (insn & (1 << 10)) {
8603 if (insn & (1 << 7))
8604 offset |= CPSR_A;
8605 if (insn & (1 << 6))
8606 offset |= CPSR_I;
8607 if (insn & (1 << 5))
8608 offset |= CPSR_F;
8609 if (insn & (1 << 9))
8610 imm = CPSR_A | CPSR_I | CPSR_F;
8612 if (insn & (1 << 8)) {
8613 offset |= 0x1f;
8614 imm |= (insn & 0x1f);
8616 if (offset) {
8617 gen_set_psr_im(s, offset, 0, imm);
8619 break;
8620 case 3: /* Special control operations. */
8621 ARCH(7);
8622 op = (insn >> 4) & 0xf;
8623 switch (op) {
8624 case 2: /* clrex */
8625 gen_clrex(s);
8626 break;
8627 case 4: /* dsb */
8628 case 5: /* dmb */
8629 case 6: /* isb */
8630 /* These execute as NOPs. */
8631 break;
8632 default:
8633 goto illegal_op;
8635 break;
8636 case 4: /* bxj */
8637 /* Trivial implementation equivalent to bx. */
8638 tmp = load_reg(s, rn);
8639 gen_bx(s, tmp);
8640 break;
8641 case 5: /* Exception return. */
8642 if (IS_USER(s)) {
8643 goto illegal_op;
8645 if (rn != 14 || rd != 15) {
8646 goto illegal_op;
8648 tmp = load_reg(s, rn);
8649 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8650 gen_exception_return(s, tmp);
8651 break;
8652 case 6: /* mrs cpsr. */
8653 tmp = tcg_temp_new_i32();
8654 if (IS_M(env)) {
8655 addr = tcg_const_i32(insn & 0xff);
8656 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8657 tcg_temp_free_i32(addr);
8658 } else {
8659 gen_helper_cpsr_read(tmp, cpu_env);
8661 store_reg(s, rd, tmp);
8662 break;
8663 case 7: /* mrs spsr. */
8664 /* Not accessible in user mode. */
8665 if (IS_USER(s) || IS_M(env))
8666 goto illegal_op;
8667 tmp = load_cpu_field(spsr);
8668 store_reg(s, rd, tmp);
8669 break;
8672 } else {
8673 /* Conditional branch. */
8674 op = (insn >> 22) & 0xf;
8675 /* Generate a conditional jump to next instruction. */
8676 s->condlabel = gen_new_label();
8677 gen_test_cc(op ^ 1, s->condlabel);
8678 s->condjmp = 1;
8680 /* offset[11:1] = insn[10:0] */
8681 offset = (insn & 0x7ff) << 1;
8682 /* offset[17:12] = insn[21:16]. */
8683 offset |= (insn & 0x003f0000) >> 4;
8684 /* offset[31:20] = insn[26]. */
8685 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8686 /* offset[18] = insn[13]. */
8687 offset |= (insn & (1 << 13)) << 5;
8688 /* offset[19] = insn[11]. */
8689 offset |= (insn & (1 << 11)) << 8;
8691 /* jump to the offset */
8692 gen_jmp(s, s->pc + offset);
8694 } else {
8695 /* Data processing immediate. */
8696 if (insn & (1 << 25)) {
8697 if (insn & (1 << 24)) {
8698 if (insn & (1 << 20))
8699 goto illegal_op;
8700 /* Bitfield/Saturate. */
8701 op = (insn >> 21) & 7;
8702 imm = insn & 0x1f;
8703 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8704 if (rn == 15) {
8705 tmp = tcg_temp_new_i32();
8706 tcg_gen_movi_i32(tmp, 0);
8707 } else {
8708 tmp = load_reg(s, rn);
8710 switch (op) {
8711 case 2: /* Signed bitfield extract. */
8712 imm++;
8713 if (shift + imm > 32)
8714 goto illegal_op;
8715 if (imm < 32)
8716 gen_sbfx(tmp, shift, imm);
8717 break;
8718 case 6: /* Unsigned bitfield extract. */
8719 imm++;
8720 if (shift + imm > 32)
8721 goto illegal_op;
8722 if (imm < 32)
8723 gen_ubfx(tmp, shift, (1u << imm) - 1);
8724 break;
8725 case 3: /* Bitfield insert/clear. */
8726 if (imm < shift)
8727 goto illegal_op;
8728 imm = imm + 1 - shift;
8729 if (imm != 32) {
8730 tmp2 = load_reg(s, rd);
8731 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
8732 tcg_temp_free_i32(tmp2);
8734 break;
8735 case 7:
8736 goto illegal_op;
8737 default: /* Saturate. */
8738 if (shift) {
8739 if (op & 1)
8740 tcg_gen_sari_i32(tmp, tmp, shift);
8741 else
8742 tcg_gen_shli_i32(tmp, tmp, shift);
8744 tmp2 = tcg_const_i32(imm);
8745 if (op & 4) {
8746 /* Unsigned. */
8747 if ((op & 1) && shift == 0)
8748 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8749 else
8750 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8751 } else {
8752 /* Signed. */
8753 if ((op & 1) && shift == 0)
8754 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8755 else
8756 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8758 tcg_temp_free_i32(tmp2);
8759 break;
8761 store_reg(s, rd, tmp);
8762 } else {
8763 imm = ((insn & 0x04000000) >> 15)
8764 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8765 if (insn & (1 << 22)) {
8766 /* 16-bit immediate. */
8767 imm |= (insn >> 4) & 0xf000;
8768 if (insn & (1 << 23)) {
8769 /* movt */
8770 tmp = load_reg(s, rd);
8771 tcg_gen_ext16u_i32(tmp, tmp);
8772 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8773 } else {
8774 /* movw */
8775 tmp = tcg_temp_new_i32();
8776 tcg_gen_movi_i32(tmp, imm);
8778 } else {
8779 /* Add/sub 12-bit immediate. */
8780 if (rn == 15) {
8781 offset = s->pc & ~(uint32_t)3;
8782 if (insn & (1 << 23))
8783 offset -= imm;
8784 else
8785 offset += imm;
8786 tmp = tcg_temp_new_i32();
8787 tcg_gen_movi_i32(tmp, offset);
8788 } else {
8789 tmp = load_reg(s, rn);
8790 if (insn & (1 << 23))
8791 tcg_gen_subi_i32(tmp, tmp, imm);
8792 else
8793 tcg_gen_addi_i32(tmp, tmp, imm);
8796 store_reg(s, rd, tmp);
8798 } else {
8799 int shifter_out = 0;
8800 /* modified 12-bit immediate. */
8801 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8802 imm = (insn & 0xff);
8803 switch (shift) {
8804 case 0: /* XY */
8805 /* Nothing to do. */
8806 break;
8807 case 1: /* 00XY00XY */
8808 imm |= imm << 16;
8809 break;
8810 case 2: /* XY00XY00 */
8811 imm |= imm << 16;
8812 imm <<= 8;
8813 break;
8814 case 3: /* XYXYXYXY */
8815 imm |= imm << 16;
8816 imm |= imm << 8;
8817 break;
8818 default: /* Rotated constant. */
8819 shift = (shift << 1) | (imm >> 7);
8820 imm |= 0x80;
8821 imm = imm << (32 - shift);
8822 shifter_out = 1;
8823 break;
8825 tmp2 = tcg_temp_new_i32();
8826 tcg_gen_movi_i32(tmp2, imm);
8827 rn = (insn >> 16) & 0xf;
8828 if (rn == 15) {
8829 tmp = tcg_temp_new_i32();
8830 tcg_gen_movi_i32(tmp, 0);
8831 } else {
8832 tmp = load_reg(s, rn);
8834 op = (insn >> 21) & 0xf;
8835 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8836 shifter_out, tmp, tmp2))
8837 goto illegal_op;
8838 tcg_temp_free_i32(tmp2);
8839 rd = (insn >> 8) & 0xf;
8840 if (rd != 15) {
8841 store_reg(s, rd, tmp);
8842 } else {
8843 tcg_temp_free_i32(tmp);
8847 break;
8848 case 12: /* Load/store single data item. */
8850 int postinc = 0;
8851 int writeback = 0;
8852 int user;
8853 if ((insn & 0x01100000) == 0x01000000) {
8854 if (disas_neon_ls_insn(env, s, insn))
8855 goto illegal_op;
8856 break;
8858 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8859 if (rs == 15) {
8860 if (!(insn & (1 << 20))) {
8861 goto illegal_op;
8863 if (op != 2) {
8864 /* Byte or halfword load space with dest == r15 : memory hints.
8865 * Catch them early so we don't emit pointless addressing code.
8866 * This space is a mix of:
8867 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8868 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8869 * cores)
8870 * unallocated hints, which must be treated as NOPs
8871 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8872 * which is easiest for the decoding logic
8873 * Some space which must UNDEF
8875 int op1 = (insn >> 23) & 3;
8876 int op2 = (insn >> 6) & 0x3f;
8877 if (op & 2) {
8878 goto illegal_op;
8880 if (rn == 15) {
8881 /* UNPREDICTABLE, unallocated hint or
8882 * PLD/PLDW/PLI (literal)
8884 return 0;
8886 if (op1 & 1) {
8887 return 0; /* PLD/PLDW/PLI or unallocated hint */
8889 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8890 return 0; /* PLD/PLDW/PLI or unallocated hint */
8892 /* UNDEF space, or an UNPREDICTABLE */
8893 return 1;
8896 user = IS_USER(s);
8897 if (rn == 15) {
8898 addr = tcg_temp_new_i32();
8899 /* PC relative. */
8900 /* s->pc has already been incremented by 4. */
8901 imm = s->pc & 0xfffffffc;
8902 if (insn & (1 << 23))
8903 imm += insn & 0xfff;
8904 else
8905 imm -= insn & 0xfff;
8906 tcg_gen_movi_i32(addr, imm);
8907 } else {
8908 addr = load_reg(s, rn);
8909 if (insn & (1 << 23)) {
8910 /* Positive offset. */
8911 imm = insn & 0xfff;
8912 tcg_gen_addi_i32(addr, addr, imm);
8913 } else {
8914 imm = insn & 0xff;
8915 switch ((insn >> 8) & 0xf) {
8916 case 0x0: /* Shifted Register. */
8917 shift = (insn >> 4) & 0xf;
8918 if (shift > 3) {
8919 tcg_temp_free_i32(addr);
8920 goto illegal_op;
8922 tmp = load_reg(s, rm);
8923 if (shift)
8924 tcg_gen_shli_i32(tmp, tmp, shift);
8925 tcg_gen_add_i32(addr, addr, tmp);
8926 tcg_temp_free_i32(tmp);
8927 break;
8928 case 0xc: /* Negative offset. */
8929 tcg_gen_addi_i32(addr, addr, -imm);
8930 break;
8931 case 0xe: /* User privilege. */
8932 tcg_gen_addi_i32(addr, addr, imm);
8933 user = 1;
8934 break;
8935 case 0x9: /* Post-decrement. */
8936 imm = -imm;
8937 /* Fall through. */
8938 case 0xb: /* Post-increment. */
8939 postinc = 1;
8940 writeback = 1;
8941 break;
8942 case 0xd: /* Pre-decrement. */
8943 imm = -imm;
8944 /* Fall through. */
8945 case 0xf: /* Pre-increment. */
8946 tcg_gen_addi_i32(addr, addr, imm);
8947 writeback = 1;
8948 break;
8949 default:
8950 tcg_temp_free_i32(addr);
8951 goto illegal_op;
8955 if (insn & (1 << 20)) {
8956 /* Load. */
8957 switch (op) {
8958 case 0: tmp = gen_ld8u(addr, user); break;
8959 case 4: tmp = gen_ld8s(addr, user); break;
8960 case 1: tmp = gen_ld16u(addr, user); break;
8961 case 5: tmp = gen_ld16s(addr, user); break;
8962 case 2: tmp = gen_ld32(addr, user); break;
8963 default:
8964 tcg_temp_free_i32(addr);
8965 goto illegal_op;
8967 if (rs == 15) {
8968 gen_bx(s, tmp);
8969 } else {
8970 store_reg(s, rs, tmp);
8972 } else {
8973 /* Store. */
8974 tmp = load_reg(s, rs);
8975 switch (op) {
8976 case 0: gen_st8(tmp, addr, user); break;
8977 case 1: gen_st16(tmp, addr, user); break;
8978 case 2: gen_st32(tmp, addr, user); break;
8979 default:
8980 tcg_temp_free_i32(addr);
8981 goto illegal_op;
8984 if (postinc)
8985 tcg_gen_addi_i32(addr, addr, imm);
8986 if (writeback) {
8987 store_reg(s, rn, addr);
8988 } else {
8989 tcg_temp_free_i32(addr);
8992 break;
8993 default:
8994 goto illegal_op;
8996 return 0;
8997 illegal_op:
8998 return 1;
9001 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9003 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9004 int32_t offset;
9005 int i;
9006 TCGv tmp;
9007 TCGv tmp2;
9008 TCGv addr;
9010 if (s->condexec_mask) {
9011 cond = s->condexec_cond;
9012 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9013 s->condlabel = gen_new_label();
9014 gen_test_cc(cond ^ 1, s->condlabel);
9015 s->condjmp = 1;
9019 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9020 s->pc += 2;
9022 switch (insn >> 12) {
9023 case 0: case 1:
9025 rd = insn & 7;
9026 op = (insn >> 11) & 3;
9027 if (op == 3) {
9028 /* add/subtract */
9029 rn = (insn >> 3) & 7;
9030 tmp = load_reg(s, rn);
9031 if (insn & (1 << 10)) {
9032 /* immediate */
9033 tmp2 = tcg_temp_new_i32();
9034 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9035 } else {
9036 /* reg */
9037 rm = (insn >> 6) & 7;
9038 tmp2 = load_reg(s, rm);
9040 if (insn & (1 << 9)) {
9041 if (s->condexec_mask)
9042 tcg_gen_sub_i32(tmp, tmp, tmp2);
9043 else
9044 gen_sub_CC(tmp, tmp, tmp2);
9045 } else {
9046 if (s->condexec_mask)
9047 tcg_gen_add_i32(tmp, tmp, tmp2);
9048 else
9049 gen_add_CC(tmp, tmp, tmp2);
9051 tcg_temp_free_i32(tmp2);
9052 store_reg(s, rd, tmp);
9053 } else {
9054 /* shift immediate */
9055 rm = (insn >> 3) & 7;
9056 shift = (insn >> 6) & 0x1f;
9057 tmp = load_reg(s, rm);
9058 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9059 if (!s->condexec_mask)
9060 gen_logic_CC(tmp);
9061 store_reg(s, rd, tmp);
9063 break;
9064 case 2: case 3:
9065 /* arithmetic large immediate */
9066 op = (insn >> 11) & 3;
9067 rd = (insn >> 8) & 0x7;
9068 if (op == 0) { /* mov */
9069 tmp = tcg_temp_new_i32();
9070 tcg_gen_movi_i32(tmp, insn & 0xff);
9071 if (!s->condexec_mask)
9072 gen_logic_CC(tmp);
9073 store_reg(s, rd, tmp);
9074 } else {
9075 tmp = load_reg(s, rd);
9076 tmp2 = tcg_temp_new_i32();
9077 tcg_gen_movi_i32(tmp2, insn & 0xff);
9078 switch (op) {
9079 case 1: /* cmp */
9080 gen_sub_CC(tmp, tmp, tmp2);
9081 tcg_temp_free_i32(tmp);
9082 tcg_temp_free_i32(tmp2);
9083 break;
9084 case 2: /* add */
9085 if (s->condexec_mask)
9086 tcg_gen_add_i32(tmp, tmp, tmp2);
9087 else
9088 gen_add_CC(tmp, tmp, tmp2);
9089 tcg_temp_free_i32(tmp2);
9090 store_reg(s, rd, tmp);
9091 break;
9092 case 3: /* sub */
9093 if (s->condexec_mask)
9094 tcg_gen_sub_i32(tmp, tmp, tmp2);
9095 else
9096 gen_sub_CC(tmp, tmp, tmp2);
9097 tcg_temp_free_i32(tmp2);
9098 store_reg(s, rd, tmp);
9099 break;
9102 break;
9103 case 4:
9104 if (insn & (1 << 11)) {
9105 rd = (insn >> 8) & 7;
9106 /* load pc-relative. Bit 1 of PC is ignored. */
9107 val = s->pc + 2 + ((insn & 0xff) * 4);
9108 val &= ~(uint32_t)2;
9109 addr = tcg_temp_new_i32();
9110 tcg_gen_movi_i32(addr, val);
9111 tmp = gen_ld32(addr, IS_USER(s));
9112 tcg_temp_free_i32(addr);
9113 store_reg(s, rd, tmp);
9114 break;
9116 if (insn & (1 << 10)) {
9117 /* data processing extended or blx */
9118 rd = (insn & 7) | ((insn >> 4) & 8);
9119 rm = (insn >> 3) & 0xf;
9120 op = (insn >> 8) & 3;
9121 switch (op) {
9122 case 0: /* add */
9123 tmp = load_reg(s, rd);
9124 tmp2 = load_reg(s, rm);
9125 tcg_gen_add_i32(tmp, tmp, tmp2);
9126 tcg_temp_free_i32(tmp2);
9127 store_reg(s, rd, tmp);
9128 break;
9129 case 1: /* cmp */
9130 tmp = load_reg(s, rd);
9131 tmp2 = load_reg(s, rm);
9132 gen_sub_CC(tmp, tmp, tmp2);
9133 tcg_temp_free_i32(tmp2);
9134 tcg_temp_free_i32(tmp);
9135 break;
9136 case 2: /* mov/cpy */
9137 tmp = load_reg(s, rm);
9138 store_reg(s, rd, tmp);
9139 break;
9140 case 3:/* branch [and link] exchange thumb register */
9141 tmp = load_reg(s, rm);
9142 if (insn & (1 << 7)) {
9143 ARCH(5);
9144 val = (uint32_t)s->pc | 1;
9145 tmp2 = tcg_temp_new_i32();
9146 tcg_gen_movi_i32(tmp2, val);
9147 store_reg(s, 14, tmp2);
9149 /* already thumb, no need to check */
9150 gen_bx(s, tmp);
9151 break;
9153 break;
9156 /* data processing register */
9157 rd = insn & 7;
9158 rm = (insn >> 3) & 7;
9159 op = (insn >> 6) & 0xf;
9160 if (op == 2 || op == 3 || op == 4 || op == 7) {
9161 /* the shift/rotate ops want the operands backwards */
9162 val = rm;
9163 rm = rd;
9164 rd = val;
9165 val = 1;
9166 } else {
9167 val = 0;
9170 if (op == 9) { /* neg */
9171 tmp = tcg_temp_new_i32();
9172 tcg_gen_movi_i32(tmp, 0);
9173 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9174 tmp = load_reg(s, rd);
9175 } else {
9176 TCGV_UNUSED(tmp);
9179 tmp2 = load_reg(s, rm);
9180 switch (op) {
9181 case 0x0: /* and */
9182 tcg_gen_and_i32(tmp, tmp, tmp2);
9183 if (!s->condexec_mask)
9184 gen_logic_CC(tmp);
9185 break;
9186 case 0x1: /* eor */
9187 tcg_gen_xor_i32(tmp, tmp, tmp2);
9188 if (!s->condexec_mask)
9189 gen_logic_CC(tmp);
9190 break;
9191 case 0x2: /* lsl */
9192 if (s->condexec_mask) {
9193 gen_shl(tmp2, tmp2, tmp);
9194 } else {
9195 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
9196 gen_logic_CC(tmp2);
9198 break;
9199 case 0x3: /* lsr */
9200 if (s->condexec_mask) {
9201 gen_shr(tmp2, tmp2, tmp);
9202 } else {
9203 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
9204 gen_logic_CC(tmp2);
9206 break;
9207 case 0x4: /* asr */
9208 if (s->condexec_mask) {
9209 gen_sar(tmp2, tmp2, tmp);
9210 } else {
9211 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
9212 gen_logic_CC(tmp2);
9214 break;
9215 case 0x5: /* adc */
9216 if (s->condexec_mask)
9217 gen_adc(tmp, tmp2);
9218 else
9219 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
9220 break;
9221 case 0x6: /* sbc */
9222 if (s->condexec_mask)
9223 gen_sub_carry(tmp, tmp, tmp2);
9224 else
9225 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
9226 break;
9227 case 0x7: /* ror */
9228 if (s->condexec_mask) {
9229 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9230 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9231 } else {
9232 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
9233 gen_logic_CC(tmp2);
9235 break;
9236 case 0x8: /* tst */
9237 tcg_gen_and_i32(tmp, tmp, tmp2);
9238 gen_logic_CC(tmp);
9239 rd = 16;
9240 break;
9241 case 0x9: /* neg */
9242 if (s->condexec_mask)
9243 tcg_gen_neg_i32(tmp, tmp2);
9244 else
9245 gen_sub_CC(tmp, tmp, tmp2);
9246 break;
9247 case 0xa: /* cmp */
9248 gen_sub_CC(tmp, tmp, tmp2);
9249 rd = 16;
9250 break;
9251 case 0xb: /* cmn */
9252 gen_add_CC(tmp, tmp, tmp2);
9253 rd = 16;
9254 break;
9255 case 0xc: /* orr */
9256 tcg_gen_or_i32(tmp, tmp, tmp2);
9257 if (!s->condexec_mask)
9258 gen_logic_CC(tmp);
9259 break;
9260 case 0xd: /* mul */
9261 tcg_gen_mul_i32(tmp, tmp, tmp2);
9262 if (!s->condexec_mask)
9263 gen_logic_CC(tmp);
9264 break;
9265 case 0xe: /* bic */
9266 tcg_gen_andc_i32(tmp, tmp, tmp2);
9267 if (!s->condexec_mask)
9268 gen_logic_CC(tmp);
9269 break;
9270 case 0xf: /* mvn */
9271 tcg_gen_not_i32(tmp2, tmp2);
9272 if (!s->condexec_mask)
9273 gen_logic_CC(tmp2);
9274 val = 1;
9275 rm = rd;
9276 break;
9278 if (rd != 16) {
9279 if (val) {
9280 store_reg(s, rm, tmp2);
9281 if (op != 0xf)
9282 tcg_temp_free_i32(tmp);
9283 } else {
9284 store_reg(s, rd, tmp);
9285 tcg_temp_free_i32(tmp2);
9287 } else {
9288 tcg_temp_free_i32(tmp);
9289 tcg_temp_free_i32(tmp2);
9291 break;
9293 case 5:
9294 /* load/store register offset. */
9295 rd = insn & 7;
9296 rn = (insn >> 3) & 7;
9297 rm = (insn >> 6) & 7;
9298 op = (insn >> 9) & 7;
9299 addr = load_reg(s, rn);
9300 tmp = load_reg(s, rm);
9301 tcg_gen_add_i32(addr, addr, tmp);
9302 tcg_temp_free_i32(tmp);
9304 if (op < 3) /* store */
9305 tmp = load_reg(s, rd);
9307 switch (op) {
9308 case 0: /* str */
9309 gen_st32(tmp, addr, IS_USER(s));
9310 break;
9311 case 1: /* strh */
9312 gen_st16(tmp, addr, IS_USER(s));
9313 break;
9314 case 2: /* strb */
9315 gen_st8(tmp, addr, IS_USER(s));
9316 break;
9317 case 3: /* ldrsb */
9318 tmp = gen_ld8s(addr, IS_USER(s));
9319 break;
9320 case 4: /* ldr */
9321 tmp = gen_ld32(addr, IS_USER(s));
9322 break;
9323 case 5: /* ldrh */
9324 tmp = gen_ld16u(addr, IS_USER(s));
9325 break;
9326 case 6: /* ldrb */
9327 tmp = gen_ld8u(addr, IS_USER(s));
9328 break;
9329 case 7: /* ldrsh */
9330 tmp = gen_ld16s(addr, IS_USER(s));
9331 break;
9333 if (op >= 3) /* load */
9334 store_reg(s, rd, tmp);
9335 tcg_temp_free_i32(addr);
9336 break;
9338 case 6:
9339 /* load/store word immediate offset */
9340 rd = insn & 7;
9341 rn = (insn >> 3) & 7;
9342 addr = load_reg(s, rn);
9343 val = (insn >> 4) & 0x7c;
9344 tcg_gen_addi_i32(addr, addr, val);
9346 if (insn & (1 << 11)) {
9347 /* load */
9348 tmp = gen_ld32(addr, IS_USER(s));
9349 store_reg(s, rd, tmp);
9350 } else {
9351 /* store */
9352 tmp = load_reg(s, rd);
9353 gen_st32(tmp, addr, IS_USER(s));
9355 tcg_temp_free_i32(addr);
9356 break;
9358 case 7:
9359 /* load/store byte immediate offset */
9360 rd = insn & 7;
9361 rn = (insn >> 3) & 7;
9362 addr = load_reg(s, rn);
9363 val = (insn >> 6) & 0x1f;
9364 tcg_gen_addi_i32(addr, addr, val);
9366 if (insn & (1 << 11)) {
9367 /* load */
9368 tmp = gen_ld8u(addr, IS_USER(s));
9369 store_reg(s, rd, tmp);
9370 } else {
9371 /* store */
9372 tmp = load_reg(s, rd);
9373 gen_st8(tmp, addr, IS_USER(s));
9375 tcg_temp_free_i32(addr);
9376 break;
9378 case 8:
9379 /* load/store halfword immediate offset */
9380 rd = insn & 7;
9381 rn = (insn >> 3) & 7;
9382 addr = load_reg(s, rn);
9383 val = (insn >> 5) & 0x3e;
9384 tcg_gen_addi_i32(addr, addr, val);
9386 if (insn & (1 << 11)) {
9387 /* load */
9388 tmp = gen_ld16u(addr, IS_USER(s));
9389 store_reg(s, rd, tmp);
9390 } else {
9391 /* store */
9392 tmp = load_reg(s, rd);
9393 gen_st16(tmp, addr, IS_USER(s));
9395 tcg_temp_free_i32(addr);
9396 break;
9398 case 9:
9399 /* load/store from stack */
9400 rd = (insn >> 8) & 7;
9401 addr = load_reg(s, 13);
9402 val = (insn & 0xff) * 4;
9403 tcg_gen_addi_i32(addr, addr, val);
9405 if (insn & (1 << 11)) {
9406 /* load */
9407 tmp = gen_ld32(addr, IS_USER(s));
9408 store_reg(s, rd, tmp);
9409 } else {
9410 /* store */
9411 tmp = load_reg(s, rd);
9412 gen_st32(tmp, addr, IS_USER(s));
9414 tcg_temp_free_i32(addr);
9415 break;
9417 case 10:
9418 /* add to high reg */
9419 rd = (insn >> 8) & 7;
9420 if (insn & (1 << 11)) {
9421 /* SP */
9422 tmp = load_reg(s, 13);
9423 } else {
9424 /* PC. bit 1 is ignored. */
9425 tmp = tcg_temp_new_i32();
9426 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9428 val = (insn & 0xff) * 4;
9429 tcg_gen_addi_i32(tmp, tmp, val);
9430 store_reg(s, rd, tmp);
9431 break;
9433 case 11:
9434 /* misc */
9435 op = (insn >> 8) & 0xf;
9436 switch (op) {
9437 case 0:
9438 /* adjust stack pointer */
9439 tmp = load_reg(s, 13);
9440 val = (insn & 0x7f) * 4;
9441 if (insn & (1 << 7))
9442 val = -(int32_t)val;
9443 tcg_gen_addi_i32(tmp, tmp, val);
9444 store_reg(s, 13, tmp);
9445 break;
9447 case 2: /* sign/zero extend. */
9448 ARCH(6);
9449 rd = insn & 7;
9450 rm = (insn >> 3) & 7;
9451 tmp = load_reg(s, rm);
9452 switch ((insn >> 6) & 3) {
9453 case 0: gen_sxth(tmp); break;
9454 case 1: gen_sxtb(tmp); break;
9455 case 2: gen_uxth(tmp); break;
9456 case 3: gen_uxtb(tmp); break;
9458 store_reg(s, rd, tmp);
9459 break;
9460 case 4: case 5: case 0xc: case 0xd:
9461 /* push/pop */
9462 addr = load_reg(s, 13);
9463 if (insn & (1 << 8))
9464 offset = 4;
9465 else
9466 offset = 0;
9467 for (i = 0; i < 8; i++) {
9468 if (insn & (1 << i))
9469 offset += 4;
9471 if ((insn & (1 << 11)) == 0) {
9472 tcg_gen_addi_i32(addr, addr, -offset);
9474 for (i = 0; i < 8; i++) {
9475 if (insn & (1 << i)) {
9476 if (insn & (1 << 11)) {
9477 /* pop */
9478 tmp = gen_ld32(addr, IS_USER(s));
9479 store_reg(s, i, tmp);
9480 } else {
9481 /* push */
9482 tmp = load_reg(s, i);
9483 gen_st32(tmp, addr, IS_USER(s));
9485 /* advance to the next address. */
9486 tcg_gen_addi_i32(addr, addr, 4);
9489 TCGV_UNUSED(tmp);
9490 if (insn & (1 << 8)) {
9491 if (insn & (1 << 11)) {
9492 /* pop pc */
9493 tmp = gen_ld32(addr, IS_USER(s));
9494 /* don't set the pc until the rest of the instruction
9495 has completed */
9496 } else {
9497 /* push lr */
9498 tmp = load_reg(s, 14);
9499 gen_st32(tmp, addr, IS_USER(s));
9501 tcg_gen_addi_i32(addr, addr, 4);
9503 if ((insn & (1 << 11)) == 0) {
9504 tcg_gen_addi_i32(addr, addr, -offset);
9506 /* write back the new stack pointer */
9507 store_reg(s, 13, addr);
9508 /* set the new PC value */
9509 if ((insn & 0x0900) == 0x0900) {
9510 store_reg_from_load(env, s, 15, tmp);
9512 break;
9514 case 1: case 3: case 9: case 11: /* czb */
9515 rm = insn & 7;
9516 tmp = load_reg(s, rm);
9517 s->condlabel = gen_new_label();
9518 s->condjmp = 1;
9519 if (insn & (1 << 11))
9520 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9521 else
9522 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9523 tcg_temp_free_i32(tmp);
9524 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9525 val = (uint32_t)s->pc + 2;
9526 val += offset;
9527 gen_jmp(s, val);
9528 break;
9530 case 15: /* IT, nop-hint. */
9531 if ((insn & 0xf) == 0) {
9532 gen_nop_hint(s, (insn >> 4) & 0xf);
9533 break;
9535 /* If Then. */
9536 s->condexec_cond = (insn >> 4) & 0xe;
9537 s->condexec_mask = insn & 0x1f;
9538 /* No actual code generated for this insn, just setup state. */
9539 break;
9541 case 0xe: /* bkpt */
9542 ARCH(5);
9543 gen_exception_insn(s, 2, EXCP_BKPT);
9544 break;
9546 case 0xa: /* rev */
9547 ARCH(6);
9548 rn = (insn >> 3) & 0x7;
9549 rd = insn & 0x7;
9550 tmp = load_reg(s, rn);
9551 switch ((insn >> 6) & 3) {
9552 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9553 case 1: gen_rev16(tmp); break;
9554 case 3: gen_revsh(tmp); break;
9555 default: goto illegal_op;
9557 store_reg(s, rd, tmp);
9558 break;
9560 case 6:
9561 switch ((insn >> 5) & 7) {
9562 case 2:
9563 /* setend */
9564 ARCH(6);
9565 if (((insn >> 3) & 1) != s->bswap_code) {
9566 /* Dynamic endianness switching not implemented. */
9567 goto illegal_op;
9569 break;
9570 case 3:
9571 /* cps */
9572 ARCH(6);
9573 if (IS_USER(s)) {
9574 break;
9576 if (IS_M(env)) {
9577 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9578 /* FAULTMASK */
9579 if (insn & 1) {
9580 addr = tcg_const_i32(19);
9581 gen_helper_v7m_msr(cpu_env, addr, tmp);
9582 tcg_temp_free_i32(addr);
9584 /* PRIMASK */
9585 if (insn & 2) {
9586 addr = tcg_const_i32(16);
9587 gen_helper_v7m_msr(cpu_env, addr, tmp);
9588 tcg_temp_free_i32(addr);
9590 tcg_temp_free_i32(tmp);
9591 gen_lookup_tb(s);
9592 } else {
9593 if (insn & (1 << 4)) {
9594 shift = CPSR_A | CPSR_I | CPSR_F;
9595 } else {
9596 shift = 0;
9598 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9600 break;
9601 default:
9602 goto undef;
9604 break;
9606 default:
9607 goto undef;
9609 break;
9611 case 12:
9613 /* load/store multiple */
9614 TCGv loaded_var;
9615 TCGV_UNUSED(loaded_var);
9616 rn = (insn >> 8) & 0x7;
9617 addr = load_reg(s, rn);
9618 for (i = 0; i < 8; i++) {
9619 if (insn & (1 << i)) {
9620 if (insn & (1 << 11)) {
9621 /* load */
9622 tmp = gen_ld32(addr, IS_USER(s));
9623 if (i == rn) {
9624 loaded_var = tmp;
9625 } else {
9626 store_reg(s, i, tmp);
9628 } else {
9629 /* store */
9630 tmp = load_reg(s, i);
9631 gen_st32(tmp, addr, IS_USER(s));
9633 /* advance to the next address */
9634 tcg_gen_addi_i32(addr, addr, 4);
9637 if ((insn & (1 << rn)) == 0) {
9638 /* base reg not in list: base register writeback */
9639 store_reg(s, rn, addr);
9640 } else {
9641 /* base reg in list: if load, complete it now */
9642 if (insn & (1 << 11)) {
9643 store_reg(s, rn, loaded_var);
9645 tcg_temp_free_i32(addr);
9647 break;
9649 case 13:
9650 /* conditional branch or swi */
9651 cond = (insn >> 8) & 0xf;
9652 if (cond == 0xe)
9653 goto undef;
9655 if (cond == 0xf) {
9656 /* swi */
9657 gen_set_pc_im(s->pc);
9658 s->is_jmp = DISAS_SWI;
9659 break;
9661 /* generate a conditional jump to next instruction */
9662 s->condlabel = gen_new_label();
9663 gen_test_cc(cond ^ 1, s->condlabel);
9664 s->condjmp = 1;
9666 /* jump to the offset */
9667 val = (uint32_t)s->pc + 2;
9668 offset = ((int32_t)insn << 24) >> 24;
9669 val += offset << 1;
9670 gen_jmp(s, val);
9671 break;
9673 case 14:
9674 if (insn & (1 << 11)) {
9675 if (disas_thumb2_insn(env, s, insn))
9676 goto undef32;
9677 break;
9679 /* unconditional branch */
9680 val = (uint32_t)s->pc;
9681 offset = ((int32_t)insn << 21) >> 21;
9682 val += (offset << 1) + 2;
9683 gen_jmp(s, val);
9684 break;
9686 case 15:
9687 if (disas_thumb2_insn(env, s, insn))
9688 goto undef32;
9689 break;
9691 return;
9692 undef32:
9693 gen_exception_insn(s, 4, EXCP_UDEF);
9694 return;
9695 illegal_op:
9696 undef:
9697 gen_exception_insn(s, 2, EXCP_UDEF);
9700 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9701 basic block 'tb'. If search_pc is TRUE, also generate PC
9702 information for each intermediate instruction. */
9703 static inline void gen_intermediate_code_internal(CPUARMState *env,
9704 TranslationBlock *tb,
9705 int search_pc)
9707 DisasContext dc1, *dc = &dc1;
9708 CPUBreakpoint *bp;
9709 uint16_t *gen_opc_end;
9710 int j, lj;
9711 target_ulong pc_start;
9712 uint32_t next_page_start;
9713 int num_insns;
9714 int max_insns;
9716 /* generate intermediate code */
9717 pc_start = tb->pc;
9719 dc->tb = tb;
9721 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9723 dc->is_jmp = DISAS_NEXT;
9724 dc->pc = pc_start;
9725 dc->singlestep_enabled = env->singlestep_enabled;
9726 dc->condjmp = 0;
9727 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9728 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9729 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9730 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9731 #if !defined(CONFIG_USER_ONLY)
9732 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9733 #endif
9734 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9735 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9736 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9737 cpu_F0s = tcg_temp_new_i32();
9738 cpu_F1s = tcg_temp_new_i32();
9739 cpu_F0d = tcg_temp_new_i64();
9740 cpu_F1d = tcg_temp_new_i64();
9741 cpu_V0 = cpu_F0d;
9742 cpu_V1 = cpu_F1d;
9743 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9744 cpu_M0 = tcg_temp_new_i64();
9745 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9746 lj = -1;
9747 num_insns = 0;
9748 max_insns = tb->cflags & CF_COUNT_MASK;
9749 if (max_insns == 0)
9750 max_insns = CF_COUNT_MASK;
9752 gen_icount_start();
9754 tcg_clear_temp_count();
9756 /* A note on handling of the condexec (IT) bits:
9758 * We want to avoid the overhead of having to write the updated condexec
9759 * bits back to the CPUARMState for every instruction in an IT block. So:
9760 * (1) if the condexec bits are not already zero then we write
9761 * zero back into the CPUARMState now. This avoids complications trying
9762 * to do it at the end of the block. (For example if we don't do this
9763 * it's hard to identify whether we can safely skip writing condexec
9764 * at the end of the TB, which we definitely want to do for the case
9765 * where a TB doesn't do anything with the IT state at all.)
9766 * (2) if we are going to leave the TB then we call gen_set_condexec()
9767 * which will write the correct value into CPUARMState if zero is wrong.
9768 * This is done both for leaving the TB at the end, and for leaving
9769 * it because of an exception we know will happen, which is done in
9770 * gen_exception_insn(). The latter is necessary because we need to
9771 * leave the TB with the PC/IT state just prior to execution of the
9772 * instruction which caused the exception.
9773 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9774 * then the CPUARMState will be wrong and we need to reset it.
9775 * This is handled in the same way as restoration of the
9776 * PC in these situations: we will be called again with search_pc=1
9777 * and generate a mapping of the condexec bits for each PC in
9778 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9779 * this to restore the condexec bits.
9781 * Note that there are no instructions which can read the condexec
9782 * bits, and none which can write non-static values to them, so
9783 * we don't need to care about whether CPUARMState is correct in the
9784 * middle of a TB.
9787 /* Reset the conditional execution bits immediately. This avoids
9788 complications trying to do it at the end of the block. */
9789 if (dc->condexec_mask || dc->condexec_cond)
9791 TCGv tmp = tcg_temp_new_i32();
9792 tcg_gen_movi_i32(tmp, 0);
9793 store_cpu_field(tmp, condexec_bits);
9795 do {
9796 #ifdef CONFIG_USER_ONLY
9797 /* Intercept jump to the magic kernel page. */
9798 if (dc->pc >= 0xffff0000) {
9799 /* We always get here via a jump, so know we are not in a
9800 conditional execution block. */
9801 gen_exception(EXCP_KERNEL_TRAP);
9802 dc->is_jmp = DISAS_UPDATE;
9803 break;
9805 #else
9806 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9807 /* We always get here via a jump, so know we are not in a
9808 conditional execution block. */
9809 gen_exception(EXCP_EXCEPTION_EXIT);
9810 dc->is_jmp = DISAS_UPDATE;
9811 break;
9813 #endif
9815 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9816 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9817 if (bp->pc == dc->pc) {
9818 gen_exception_insn(dc, 0, EXCP_DEBUG);
9819 /* Advance PC so that clearing the breakpoint will
9820 invalidate this TB. */
9821 dc->pc += 2;
9822 goto done_generating;
9823 break;
9827 if (search_pc) {
9828 j = gen_opc_ptr - gen_opc_buf;
9829 if (lj < j) {
9830 lj++;
9831 while (lj < j)
9832 gen_opc_instr_start[lj++] = 0;
9834 gen_opc_pc[lj] = dc->pc;
9835 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9836 gen_opc_instr_start[lj] = 1;
9837 gen_opc_icount[lj] = num_insns;
9840 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9841 gen_io_start();
9843 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
9844 tcg_gen_debug_insn_start(dc->pc);
9847 if (dc->thumb) {
9848 disas_thumb_insn(env, dc);
9849 if (dc->condexec_mask) {
9850 dc->condexec_cond = (dc->condexec_cond & 0xe)
9851 | ((dc->condexec_mask >> 4) & 1);
9852 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9853 if (dc->condexec_mask == 0) {
9854 dc->condexec_cond = 0;
9857 } else {
9858 disas_arm_insn(env, dc);
9861 if (dc->condjmp && !dc->is_jmp) {
9862 gen_set_label(dc->condlabel);
9863 dc->condjmp = 0;
9866 if (tcg_check_temp_count()) {
9867 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9870 /* Translation stops when a conditional branch is encountered.
9871 * Otherwise the subsequent code could get translated several times.
9872 * Also stop translation when a page boundary is reached. This
9873 * ensures prefetch aborts occur at the right place. */
9874 num_insns ++;
9875 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9876 !env->singlestep_enabled &&
9877 !singlestep &&
9878 dc->pc < next_page_start &&
9879 num_insns < max_insns);
9881 if (tb->cflags & CF_LAST_IO) {
9882 if (dc->condjmp) {
9883 /* FIXME: This can theoretically happen with self-modifying
9884 code. */
9885 cpu_abort(env, "IO on conditional branch instruction");
9887 gen_io_end();
9890 /* At this stage dc->condjmp will only be set when the skipped
9891 instruction was a conditional branch or trap, and the PC has
9892 already been written. */
9893 if (unlikely(env->singlestep_enabled)) {
9894 /* Make sure the pc is updated, and raise a debug exception. */
9895 if (dc->condjmp) {
9896 gen_set_condexec(dc);
9897 if (dc->is_jmp == DISAS_SWI) {
9898 gen_exception(EXCP_SWI);
9899 } else {
9900 gen_exception(EXCP_DEBUG);
9902 gen_set_label(dc->condlabel);
9904 if (dc->condjmp || !dc->is_jmp) {
9905 gen_set_pc_im(dc->pc);
9906 dc->condjmp = 0;
9908 gen_set_condexec(dc);
9909 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9910 gen_exception(EXCP_SWI);
9911 } else {
9912 /* FIXME: Single stepping a WFI insn will not halt
9913 the CPU. */
9914 gen_exception(EXCP_DEBUG);
9916 } else {
9917 /* While branches must always occur at the end of an IT block,
9918 there are a few other things that can cause us to terminate
9919 the TB in the middle of an IT block:
9920 - Exception generating instructions (bkpt, swi, undefined).
9921 - Page boundaries.
9922 - Hardware watchpoints.
9923 Hardware breakpoints have already been handled and skip this code.
9925 gen_set_condexec(dc);
9926 switch(dc->is_jmp) {
9927 case DISAS_NEXT:
9928 gen_goto_tb(dc, 1, dc->pc);
9929 break;
9930 default:
9931 case DISAS_JUMP:
9932 case DISAS_UPDATE:
9933 /* indicate that the hash table must be used to find the next TB */
9934 tcg_gen_exit_tb(0);
9935 break;
9936 case DISAS_TB_JUMP:
9937 /* nothing more to generate */
9938 break;
9939 case DISAS_WFI:
9940 gen_helper_wfi(cpu_env);
9941 break;
9942 case DISAS_SWI:
9943 gen_exception(EXCP_SWI);
9944 break;
9946 if (dc->condjmp) {
9947 gen_set_label(dc->condlabel);
9948 gen_set_condexec(dc);
9949 gen_goto_tb(dc, 1, dc->pc);
9950 dc->condjmp = 0;
9954 done_generating:
9955 gen_icount_end(tb, num_insns);
9956 *gen_opc_ptr = INDEX_op_end;
9958 #ifdef DEBUG_DISAS
9959 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9960 qemu_log("----------------\n");
9961 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9962 log_target_disas(pc_start, dc->pc - pc_start,
9963 dc->thumb | (dc->bswap_code << 1));
9964 qemu_log("\n");
9966 #endif
9967 if (search_pc) {
9968 j = gen_opc_ptr - gen_opc_buf;
9969 lj++;
9970 while (lj <= j)
9971 gen_opc_instr_start[lj++] = 0;
9972 } else {
9973 tb->size = dc->pc - pc_start;
9974 tb->icount = num_insns;
9978 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
9980 gen_intermediate_code_internal(env, tb, 0);
9983 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
9985 gen_intermediate_code_internal(env, tb, 1);
9988 static const char *cpu_mode_names[16] = {
9989 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9990 "???", "???", "???", "und", "???", "???", "???", "sys"
9993 void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
9994 int flags)
9996 int i;
9997 uint32_t psr;
9999 for(i=0;i<16;i++) {
10000 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10001 if ((i % 4) == 3)
10002 cpu_fprintf(f, "\n");
10003 else
10004 cpu_fprintf(f, " ");
10006 psr = cpsr_read(env);
10007 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10008 psr,
10009 psr & (1 << 31) ? 'N' : '-',
10010 psr & (1 << 30) ? 'Z' : '-',
10011 psr & (1 << 29) ? 'C' : '-',
10012 psr & (1 << 28) ? 'V' : '-',
10013 psr & CPSR_T ? 'T' : 'A',
10014 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10016 if (flags & CPU_DUMP_FPU) {
10017 int numvfpregs = 0;
10018 if (arm_feature(env, ARM_FEATURE_VFP)) {
10019 numvfpregs += 16;
10021 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10022 numvfpregs += 16;
10024 for (i = 0; i < numvfpregs; i++) {
10025 uint64_t v = float64_val(env->vfp.regs[i]);
10026 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10027 i * 2, (uint32_t)v,
10028 i * 2 + 1, (uint32_t)(v >> 32),
10029 i, v);
10031 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10035 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10037 env->regs[15] = gen_opc_pc[pc_pos];
10038 env->condexec_bits = gen_opc_condexec_bits[pc_pos];