4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext
{
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 conditional execution bits. */
59 struct TranslationBlock
*tb
;
60 int singlestep_enabled
;
63 #if !defined(CONFIG_USER_ONLY)
71 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
73 #if defined(CONFIG_USER_ONLY)
76 #define IS_USER(s) (s->user)
79 /* These instructions trap after executing, so defer them until after the
80 conditional execution state has been updated. */
84 static TCGv_ptr cpu_env
;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
87 static TCGv_i32 cpu_R
[16];
88 static TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
89 static TCGv_i32 cpu_exclusive_addr
;
90 static TCGv_i32 cpu_exclusive_val
;
91 static TCGv_i32 cpu_exclusive_high
;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv_i32 cpu_exclusive_test
;
94 static TCGv_i32 cpu_exclusive_info
;
97 /* FIXME: These should be removed. */
98 static TCGv cpu_F0s
, cpu_F1s
;
99 static TCGv_i64 cpu_F0d
, cpu_F1d
;
101 #include "gen-icount.h"
103 static const char *regnames
[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
107 /* initialize TCG globals. */
108 void arm_translate_init(void)
112 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
114 for (i
= 0; i
< 16; i
++) {
115 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUARMState
, regs
[i
]),
119 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
120 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
121 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
122 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
124 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
126 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
128 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
130 #ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
133 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
141 static inline TCGv
load_cpu_offset(int offset
)
143 TCGv tmp
= tcg_temp_new_i32();
144 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
148 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
150 static inline void store_cpu_offset(TCGv var
, int offset
)
152 tcg_gen_st_i32(var
, cpu_env
, offset
);
153 tcg_temp_free_i32(var
);
156 #define store_cpu_field(var, name) \
157 store_cpu_offset(var, offsetof(CPUARMState, name))
159 /* Set a variable to the value of a CPU register. */
160 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
164 /* normally, since we updated PC, we need only to add one insn */
166 addr
= (long)s
->pc
+ 2;
168 addr
= (long)s
->pc
+ 4;
169 tcg_gen_movi_i32(var
, addr
);
171 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
175 /* Create a new temporary and set it to the value of a CPU register. */
176 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
178 TCGv tmp
= tcg_temp_new_i32();
179 load_reg_var(s
, tmp
, reg
);
183 /* Set a CPU register. The source must be a temporary and will be
185 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
188 tcg_gen_andi_i32(var
, var
, ~1);
189 s
->is_jmp
= DISAS_JUMP
;
191 tcg_gen_mov_i32(cpu_R
[reg
], var
);
192 tcg_temp_free_i32(var
);
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
205 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
207 TCGv tmp_mask
= tcg_const_i32(mask
);
208 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
209 tcg_temp_free_i32(tmp_mask
);
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214 static void gen_exception(int excp
)
216 TCGv tmp
= tcg_temp_new_i32();
217 tcg_gen_movi_i32(tmp
, excp
);
218 gen_helper_exception(cpu_env
, tmp
);
219 tcg_temp_free_i32(tmp
);
222 static void gen_smul_dual(TCGv a
, TCGv b
)
224 TCGv tmp1
= tcg_temp_new_i32();
225 TCGv tmp2
= tcg_temp_new_i32();
226 tcg_gen_ext16s_i32(tmp1
, a
);
227 tcg_gen_ext16s_i32(tmp2
, b
);
228 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
229 tcg_temp_free_i32(tmp2
);
230 tcg_gen_sari_i32(a
, a
, 16);
231 tcg_gen_sari_i32(b
, b
, 16);
232 tcg_gen_mul_i32(b
, b
, a
);
233 tcg_gen_mov_i32(a
, tmp1
);
234 tcg_temp_free_i32(tmp1
);
237 /* Byteswap each halfword. */
238 static void gen_rev16(TCGv var
)
240 TCGv tmp
= tcg_temp_new_i32();
241 tcg_gen_shri_i32(tmp
, var
, 8);
242 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
243 tcg_gen_shli_i32(var
, var
, 8);
244 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
245 tcg_gen_or_i32(var
, var
, tmp
);
246 tcg_temp_free_i32(tmp
);
249 /* Byteswap low halfword and sign extend. */
250 static void gen_revsh(TCGv var
)
252 tcg_gen_ext16u_i32(var
, var
);
253 tcg_gen_bswap16_i32(var
, var
);
254 tcg_gen_ext16s_i32(var
, var
);
257 /* Unsigned bitfield extract. */
258 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
261 tcg_gen_shri_i32(var
, var
, shift
);
262 tcg_gen_andi_i32(var
, var
, mask
);
265 /* Signed bitfield extract. */
266 static void gen_sbfx(TCGv var
, int shift
, int width
)
271 tcg_gen_sari_i32(var
, var
, shift
);
272 if (shift
+ width
< 32) {
273 signbit
= 1u << (width
- 1);
274 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
275 tcg_gen_xori_i32(var
, var
, signbit
);
276 tcg_gen_subi_i32(var
, var
, signbit
);
280 /* Return (b << 32) + a. Mark inputs as dead */
281 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
283 TCGv_i64 tmp64
= tcg_temp_new_i64();
285 tcg_gen_extu_i32_i64(tmp64
, b
);
286 tcg_temp_free_i32(b
);
287 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
288 tcg_gen_add_i64(a
, tmp64
, a
);
290 tcg_temp_free_i64(tmp64
);
294 /* Return (b << 32) - a. Mark inputs as dead. */
295 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
297 TCGv_i64 tmp64
= tcg_temp_new_i64();
299 tcg_gen_extu_i32_i64(tmp64
, b
);
300 tcg_temp_free_i32(b
);
301 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
302 tcg_gen_sub_i64(a
, tmp64
, a
);
304 tcg_temp_free_i64(tmp64
);
308 /* FIXME: Most targets have native widening multiplication.
309 It would be good to use that instead of a full wide multiply. */
310 /* 32x32->64 multiply. Marks inputs as dead. */
311 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
313 TCGv_i64 tmp1
= tcg_temp_new_i64();
314 TCGv_i64 tmp2
= tcg_temp_new_i64();
316 tcg_gen_extu_i32_i64(tmp1
, a
);
317 tcg_temp_free_i32(a
);
318 tcg_gen_extu_i32_i64(tmp2
, b
);
319 tcg_temp_free_i32(b
);
320 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
321 tcg_temp_free_i64(tmp2
);
325 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
327 TCGv_i64 tmp1
= tcg_temp_new_i64();
328 TCGv_i64 tmp2
= tcg_temp_new_i64();
330 tcg_gen_ext_i32_i64(tmp1
, a
);
331 tcg_temp_free_i32(a
);
332 tcg_gen_ext_i32_i64(tmp2
, b
);
333 tcg_temp_free_i32(b
);
334 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
335 tcg_temp_free_i64(tmp2
);
339 /* Swap low and high halfwords. */
340 static void gen_swap_half(TCGv var
)
342 TCGv tmp
= tcg_temp_new_i32();
343 tcg_gen_shri_i32(tmp
, var
, 16);
344 tcg_gen_shli_i32(var
, var
, 16);
345 tcg_gen_or_i32(var
, var
, tmp
);
346 tcg_temp_free_i32(tmp
);
349 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
350 tmp = (t0 ^ t1) & 0x8000;
353 t0 = (t0 + t1) ^ tmp;
356 static void gen_add16(TCGv t0
, TCGv t1
)
358 TCGv tmp
= tcg_temp_new_i32();
359 tcg_gen_xor_i32(tmp
, t0
, t1
);
360 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
361 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
362 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
363 tcg_gen_add_i32(t0
, t0
, t1
);
364 tcg_gen_xor_i32(t0
, t0
, tmp
);
365 tcg_temp_free_i32(tmp
);
366 tcg_temp_free_i32(t1
);
369 /* Set CF to the top bit of var. */
370 static void gen_set_CF_bit31(TCGv var
)
372 tcg_gen_shri_i32(cpu_CF
, var
, 31);
375 /* Set N and Z flags from var. */
376 static inline void gen_logic_CC(TCGv var
)
378 tcg_gen_mov_i32(cpu_NF
, var
);
379 tcg_gen_mov_i32(cpu_ZF
, var
);
383 static void gen_adc(TCGv t0
, TCGv t1
)
385 tcg_gen_add_i32(t0
, t0
, t1
);
386 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
389 /* dest = T0 + T1 + CF. */
390 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
392 tcg_gen_add_i32(dest
, t0
, t1
);
393 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
396 /* dest = T0 - T1 + CF - 1. */
397 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
399 tcg_gen_sub_i32(dest
, t0
, t1
);
400 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
401 tcg_gen_subi_i32(dest
, dest
, 1);
404 /* dest = T0 + T1. Compute C, N, V and Z flags */
405 static void gen_add_CC(TCGv dest
, TCGv t0
, TCGv t1
)
408 tcg_gen_add_i32(cpu_NF
, t0
, t1
);
409 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
410 tcg_gen_setcond_i32(TCG_COND_LTU
, cpu_CF
, cpu_NF
, t0
);
411 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
412 tmp
= tcg_temp_new_i32();
413 tcg_gen_xor_i32(tmp
, t0
, t1
);
414 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
415 tcg_temp_free_i32(tmp
);
416 tcg_gen_mov_i32(dest
, cpu_NF
);
419 /* dest = T0 - T1. Compute C, N, V and Z flags */
420 static void gen_sub_CC(TCGv dest
, TCGv t0
, TCGv t1
)
423 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
424 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
425 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
426 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
427 tmp
= tcg_temp_new_i32();
428 tcg_gen_xor_i32(tmp
, t0
, t1
);
429 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
430 tcg_temp_free_i32(tmp
);
431 tcg_gen_mov_i32(dest
, cpu_NF
);
434 #define GEN_SHIFT(name) \
435 static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
437 TCGv tmp1, tmp2, tmp3; \
438 tmp1 = tcg_temp_new_i32(); \
439 tcg_gen_andi_i32(tmp1, t1, 0xff); \
440 tmp2 = tcg_const_i32(0); \
441 tmp3 = tcg_const_i32(0x1f); \
442 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
443 tcg_temp_free_i32(tmp3); \
444 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
445 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
446 tcg_temp_free_i32(tmp2); \
447 tcg_temp_free_i32(tmp1); \
453 static void gen_sar(TCGv dest
, TCGv t0
, TCGv t1
)
456 tmp1
= tcg_temp_new_i32();
457 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
458 tmp2
= tcg_const_i32(0x1f);
459 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
460 tcg_temp_free_i32(tmp2
);
461 tcg_gen_sar_i32(dest
, t0
, tmp1
);
462 tcg_temp_free_i32(tmp1
);
465 /* FIXME: Implement this natively. */
466 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
468 static void shifter_out_im(TCGv var
, int shift
)
471 tcg_gen_andi_i32(cpu_CF
, var
, 1);
473 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
475 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
480 /* Shift by immediate. Includes special handling for shift == 0. */
481 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
487 shifter_out_im(var
, 32 - shift
);
488 tcg_gen_shli_i32(var
, var
, shift
);
494 tcg_gen_shri_i32(cpu_CF
, var
, 31);
496 tcg_gen_movi_i32(var
, 0);
499 shifter_out_im(var
, shift
- 1);
500 tcg_gen_shri_i32(var
, var
, shift
);
507 shifter_out_im(var
, shift
- 1);
510 tcg_gen_sari_i32(var
, var
, shift
);
512 case 3: /* ROR/RRX */
515 shifter_out_im(var
, shift
- 1);
516 tcg_gen_rotri_i32(var
, var
, shift
); break;
518 TCGv tmp
= tcg_temp_new_i32();
519 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
521 shifter_out_im(var
, 0);
522 tcg_gen_shri_i32(var
, var
, 1);
523 tcg_gen_or_i32(var
, var
, tmp
);
524 tcg_temp_free_i32(tmp
);
529 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
530 TCGv shift
, int flags
)
534 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
535 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
536 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
537 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
542 gen_shl(var
, var
, shift
);
545 gen_shr(var
, var
, shift
);
548 gen_sar(var
, var
, shift
);
550 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
551 tcg_gen_rotr_i32(var
, var
, shift
); break;
554 tcg_temp_free_i32(shift
);
557 #define PAS_OP(pfx) \
559 case 0: gen_pas_helper(glue(pfx,add16)); break; \
560 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
562 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 4: gen_pas_helper(glue(pfx,add8)); break; \
564 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
566 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
573 tmp
= tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
576 tcg_temp_free_ptr(tmp
);
579 tmp
= tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
582 tcg_temp_free_ptr(tmp
);
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 #undef gen_pas_helper
603 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
604 #define PAS_OP(pfx) \
606 case 0: gen_pas_helper(glue(pfx,add8)); break; \
607 case 1: gen_pas_helper(glue(pfx,add16)); break; \
608 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
609 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
610 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
611 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
613 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
618 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
620 tmp
= tcg_temp_new_ptr();
621 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
623 tcg_temp_free_ptr(tmp
);
626 tmp
= tcg_temp_new_ptr();
627 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
629 tcg_temp_free_ptr(tmp
);
631 #undef gen_pas_helper
632 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
645 #undef gen_pas_helper
650 static void gen_test_cc(int cc
, int label
)
657 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
660 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
663 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_CF
, 0, label
);
666 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
669 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_NF
, 0, label
);
672 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_NF
, 0, label
);
675 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_VF
, 0, label
);
678 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_VF
, 0, label
);
680 case 8: /* hi: C && !Z */
681 inv
= gen_new_label();
682 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, inv
);
683 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
686 case 9: /* ls: !C || Z */
687 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
688 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
690 case 10: /* ge: N == V -> N ^ V == 0 */
691 tmp
= tcg_temp_new_i32();
692 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
693 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
694 tcg_temp_free_i32(tmp
);
696 case 11: /* lt: N != V -> N ^ V != 0 */
697 tmp
= tcg_temp_new_i32();
698 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
699 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
700 tcg_temp_free_i32(tmp
);
702 case 12: /* gt: !Z && N == V */
703 inv
= gen_new_label();
704 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, inv
);
705 tmp
= tcg_temp_new_i32();
706 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
707 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
708 tcg_temp_free_i32(tmp
);
711 case 13: /* le: Z || N != V */
712 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
713 tmp
= tcg_temp_new_i32();
714 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
715 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
716 tcg_temp_free_i32(tmp
);
719 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
724 static const uint8_t table_logic_cc
[16] = {
743 /* Set PC and Thumb state from an immediate address. */
744 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
748 s
->is_jmp
= DISAS_UPDATE
;
749 if (s
->thumb
!= (addr
& 1)) {
750 tmp
= tcg_temp_new_i32();
751 tcg_gen_movi_i32(tmp
, addr
& 1);
752 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
753 tcg_temp_free_i32(tmp
);
755 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
758 /* Set PC and Thumb state from var. var is marked as dead. */
759 static inline void gen_bx(DisasContext
*s
, TCGv var
)
761 s
->is_jmp
= DISAS_UPDATE
;
762 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
763 tcg_gen_andi_i32(var
, var
, 1);
764 store_cpu_field(var
, thumb
);
767 /* Variant of store_reg which uses branch&exchange logic when storing
768 to r15 in ARM architecture v7 and above. The source must be a temporary
769 and will be marked as dead. */
770 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
773 if (reg
== 15 && ENABLE_ARCH_7
) {
776 store_reg(s
, reg
, var
);
780 /* Variant of store_reg which uses branch&exchange logic when storing
781 * to r15 in ARM architecture v5T and above. This is used for storing
782 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
783 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
784 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
787 if (reg
== 15 && ENABLE_ARCH_5
) {
790 store_reg(s
, reg
, var
);
794 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
796 TCGv tmp
= tcg_temp_new_i32();
797 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
800 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
802 TCGv tmp
= tcg_temp_new_i32();
803 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
806 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
808 TCGv tmp
= tcg_temp_new_i32();
809 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
812 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
814 TCGv tmp
= tcg_temp_new_i32();
815 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
818 static inline TCGv
gen_ld32(TCGv addr
, int index
)
820 TCGv tmp
= tcg_temp_new_i32();
821 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
824 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
826 TCGv_i64 tmp
= tcg_temp_new_i64();
827 tcg_gen_qemu_ld64(tmp
, addr
, index
);
830 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
832 tcg_gen_qemu_st8(val
, addr
, index
);
833 tcg_temp_free_i32(val
);
835 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
837 tcg_gen_qemu_st16(val
, addr
, index
);
838 tcg_temp_free_i32(val
);
840 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
842 tcg_gen_qemu_st32(val
, addr
, index
);
843 tcg_temp_free_i32(val
);
845 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
847 tcg_gen_qemu_st64(val
, addr
, index
);
848 tcg_temp_free_i64(val
);
851 static inline void gen_set_pc_im(uint32_t val
)
853 tcg_gen_movi_i32(cpu_R
[15], val
);
856 /* Force a TB lookup after an instruction that changes the CPU state. */
857 static inline void gen_lookup_tb(DisasContext
*s
)
859 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
860 s
->is_jmp
= DISAS_UPDATE
;
863 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
866 int val
, rm
, shift
, shiftop
;
869 if (!(insn
& (1 << 25))) {
872 if (!(insn
& (1 << 23)))
875 tcg_gen_addi_i32(var
, var
, val
);
879 shift
= (insn
>> 7) & 0x1f;
880 shiftop
= (insn
>> 5) & 3;
881 offset
= load_reg(s
, rm
);
882 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
883 if (!(insn
& (1 << 23)))
884 tcg_gen_sub_i32(var
, var
, offset
);
886 tcg_gen_add_i32(var
, var
, offset
);
887 tcg_temp_free_i32(offset
);
891 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
897 if (insn
& (1 << 22)) {
899 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
900 if (!(insn
& (1 << 23)))
904 tcg_gen_addi_i32(var
, var
, val
);
908 tcg_gen_addi_i32(var
, var
, extra
);
910 offset
= load_reg(s
, rm
);
911 if (!(insn
& (1 << 23)))
912 tcg_gen_sub_i32(var
, var
, offset
);
914 tcg_gen_add_i32(var
, var
, offset
);
915 tcg_temp_free_i32(offset
);
919 static TCGv_ptr
get_fpstatus_ptr(int neon
)
921 TCGv_ptr statusptr
= tcg_temp_new_ptr();
924 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
926 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
928 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
932 #define VFP_OP2(name) \
933 static inline void gen_vfp_##name(int dp) \
935 TCGv_ptr fpst = get_fpstatus_ptr(0); \
937 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
939 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
941 tcg_temp_free_ptr(fpst); \
951 static inline void gen_vfp_F1_mul(int dp
)
953 /* Like gen_vfp_mul() but put result in F1 */
954 TCGv_ptr fpst
= get_fpstatus_ptr(0);
956 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
958 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
960 tcg_temp_free_ptr(fpst
);
963 static inline void gen_vfp_F1_neg(int dp
)
965 /* Like gen_vfp_neg() but put result in F1 */
967 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
969 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
973 static inline void gen_vfp_abs(int dp
)
976 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
978 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
981 static inline void gen_vfp_neg(int dp
)
984 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
986 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
989 static inline void gen_vfp_sqrt(int dp
)
992 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
994 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
997 static inline void gen_vfp_cmp(int dp
)
1000 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1002 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1005 static inline void gen_vfp_cmpe(int dp
)
1008 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1010 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1013 static inline void gen_vfp_F1_ld0(int dp
)
1016 tcg_gen_movi_i64(cpu_F1d
, 0);
1018 tcg_gen_movi_i32(cpu_F1s
, 0);
1021 #define VFP_GEN_ITOF(name) \
1022 static inline void gen_vfp_##name(int dp, int neon) \
1024 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1026 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1028 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1030 tcg_temp_free_ptr(statusptr); \
1037 #define VFP_GEN_FTOI(name) \
1038 static inline void gen_vfp_##name(int dp, int neon) \
1040 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1042 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1044 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1046 tcg_temp_free_ptr(statusptr); \
1055 #define VFP_GEN_FIX(name) \
1056 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1058 TCGv tmp_shift = tcg_const_i32(shift); \
1059 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1061 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1063 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1065 tcg_temp_free_i32(tmp_shift); \
1066 tcg_temp_free_ptr(statusptr); \
1078 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1081 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1083 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1086 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1089 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1091 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1095 vfp_reg_offset (int dp
, int reg
)
1098 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1100 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1101 + offsetof(CPU_DoubleU
, l
.upper
);
1103 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1104 + offsetof(CPU_DoubleU
, l
.lower
);
1108 /* Return the offset of a 32-bit piece of a NEON register.
1109 zero is the least significant end of the register. */
1111 neon_reg_offset (int reg
, int n
)
1115 return vfp_reg_offset(0, sreg
);
1118 static TCGv
neon_load_reg(int reg
, int pass
)
1120 TCGv tmp
= tcg_temp_new_i32();
1121 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1125 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1127 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1128 tcg_temp_free_i32(var
);
1131 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1133 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1136 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1138 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1141 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1142 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1143 #define tcg_gen_st_f32 tcg_gen_st_i32
1144 #define tcg_gen_st_f64 tcg_gen_st_i64
1146 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1149 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1151 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1154 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1157 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1159 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1162 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1165 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1167 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1170 #define ARM_CP_RW_BIT (1 << 20)
1172 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1174 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1177 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1179 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1182 static inline TCGv
iwmmxt_load_creg(int reg
)
1184 TCGv var
= tcg_temp_new_i32();
1185 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1189 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1191 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1192 tcg_temp_free_i32(var
);
1195 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1197 iwmmxt_store_reg(cpu_M0
, rn
);
1200 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1202 iwmmxt_load_reg(cpu_M0
, rn
);
1205 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1207 iwmmxt_load_reg(cpu_V1
, rn
);
1208 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1211 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1213 iwmmxt_load_reg(cpu_V1
, rn
);
1214 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1217 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1219 iwmmxt_load_reg(cpu_V1
, rn
);
1220 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1223 #define IWMMXT_OP(name) \
1224 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1226 iwmmxt_load_reg(cpu_V1, rn); \
1227 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1230 #define IWMMXT_OP_ENV(name) \
1231 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1233 iwmmxt_load_reg(cpu_V1, rn); \
1234 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1237 #define IWMMXT_OP_ENV_SIZE(name) \
1238 IWMMXT_OP_ENV(name##b) \
1239 IWMMXT_OP_ENV(name##w) \
1240 IWMMXT_OP_ENV(name##l)
1242 #define IWMMXT_OP_ENV1(name) \
1243 static inline void gen_op_iwmmxt_##name##_M0(void) \
1245 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1259 IWMMXT_OP_ENV_SIZE(unpackl
)
1260 IWMMXT_OP_ENV_SIZE(unpackh
)
1262 IWMMXT_OP_ENV1(unpacklub
)
1263 IWMMXT_OP_ENV1(unpackluw
)
1264 IWMMXT_OP_ENV1(unpacklul
)
1265 IWMMXT_OP_ENV1(unpackhub
)
1266 IWMMXT_OP_ENV1(unpackhuw
)
1267 IWMMXT_OP_ENV1(unpackhul
)
1268 IWMMXT_OP_ENV1(unpacklsb
)
1269 IWMMXT_OP_ENV1(unpacklsw
)
1270 IWMMXT_OP_ENV1(unpacklsl
)
1271 IWMMXT_OP_ENV1(unpackhsb
)
1272 IWMMXT_OP_ENV1(unpackhsw
)
1273 IWMMXT_OP_ENV1(unpackhsl
)
1275 IWMMXT_OP_ENV_SIZE(cmpeq
)
1276 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1277 IWMMXT_OP_ENV_SIZE(cmpgts
)
1279 IWMMXT_OP_ENV_SIZE(mins
)
1280 IWMMXT_OP_ENV_SIZE(minu
)
1281 IWMMXT_OP_ENV_SIZE(maxs
)
1282 IWMMXT_OP_ENV_SIZE(maxu
)
1284 IWMMXT_OP_ENV_SIZE(subn
)
1285 IWMMXT_OP_ENV_SIZE(addn
)
1286 IWMMXT_OP_ENV_SIZE(subu
)
1287 IWMMXT_OP_ENV_SIZE(addu
)
1288 IWMMXT_OP_ENV_SIZE(subs
)
1289 IWMMXT_OP_ENV_SIZE(adds
)
1291 IWMMXT_OP_ENV(avgb0
)
1292 IWMMXT_OP_ENV(avgb1
)
1293 IWMMXT_OP_ENV(avgw0
)
1294 IWMMXT_OP_ENV(avgw1
)
1298 IWMMXT_OP_ENV(packuw
)
1299 IWMMXT_OP_ENV(packul
)
1300 IWMMXT_OP_ENV(packuq
)
1301 IWMMXT_OP_ENV(packsw
)
1302 IWMMXT_OP_ENV(packsl
)
1303 IWMMXT_OP_ENV(packsq
)
1305 static void gen_op_iwmmxt_set_mup(void)
1308 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1309 tcg_gen_ori_i32(tmp
, tmp
, 2);
1310 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1313 static void gen_op_iwmmxt_set_cup(void)
1316 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1317 tcg_gen_ori_i32(tmp
, tmp
, 1);
1318 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1321 static void gen_op_iwmmxt_setpsr_nz(void)
1323 TCGv tmp
= tcg_temp_new_i32();
1324 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1325 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1328 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1330 iwmmxt_load_reg(cpu_V1
, rn
);
1331 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1332 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1335 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1341 rd
= (insn
>> 16) & 0xf;
1342 tmp
= load_reg(s
, rd
);
1344 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1345 if (insn
& (1 << 24)) {
1347 if (insn
& (1 << 23))
1348 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1350 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1351 tcg_gen_mov_i32(dest
, tmp
);
1352 if (insn
& (1 << 21))
1353 store_reg(s
, rd
, tmp
);
1355 tcg_temp_free_i32(tmp
);
1356 } else if (insn
& (1 << 21)) {
1358 tcg_gen_mov_i32(dest
, tmp
);
1359 if (insn
& (1 << 23))
1360 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1362 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1363 store_reg(s
, rd
, tmp
);
1364 } else if (!(insn
& (1 << 23)))
1369 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1371 int rd
= (insn
>> 0) & 0xf;
1374 if (insn
& (1 << 8)) {
1375 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1378 tmp
= iwmmxt_load_creg(rd
);
1381 tmp
= tcg_temp_new_i32();
1382 iwmmxt_load_reg(cpu_V0
, rd
);
1383 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1385 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1386 tcg_gen_mov_i32(dest
, tmp
);
1387 tcg_temp_free_i32(tmp
);
1391 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1392 (ie. an undefined instruction). */
1393 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1396 int rdhi
, rdlo
, rd0
, rd1
, i
;
1398 TCGv tmp
, tmp2
, tmp3
;
1400 if ((insn
& 0x0e000e00) == 0x0c000000) {
1401 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1403 rdlo
= (insn
>> 12) & 0xf;
1404 rdhi
= (insn
>> 16) & 0xf;
1405 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1406 iwmmxt_load_reg(cpu_V0
, wrd
);
1407 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1408 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1409 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1410 } else { /* TMCRR */
1411 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1412 iwmmxt_store_reg(cpu_V0
, wrd
);
1413 gen_op_iwmmxt_set_mup();
1418 wrd
= (insn
>> 12) & 0xf;
1419 addr
= tcg_temp_new_i32();
1420 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1421 tcg_temp_free_i32(addr
);
1424 if (insn
& ARM_CP_RW_BIT
) {
1425 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1426 tmp
= tcg_temp_new_i32();
1427 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1428 iwmmxt_store_creg(wrd
, tmp
);
1431 if (insn
& (1 << 8)) {
1432 if (insn
& (1 << 22)) { /* WLDRD */
1433 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1435 } else { /* WLDRW wRd */
1436 tmp
= gen_ld32(addr
, IS_USER(s
));
1439 if (insn
& (1 << 22)) { /* WLDRH */
1440 tmp
= gen_ld16u(addr
, IS_USER(s
));
1441 } else { /* WLDRB */
1442 tmp
= gen_ld8u(addr
, IS_USER(s
));
1446 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1447 tcg_temp_free_i32(tmp
);
1449 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1452 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1453 tmp
= iwmmxt_load_creg(wrd
);
1454 gen_st32(tmp
, addr
, IS_USER(s
));
1456 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1457 tmp
= tcg_temp_new_i32();
1458 if (insn
& (1 << 8)) {
1459 if (insn
& (1 << 22)) { /* WSTRD */
1460 tcg_temp_free_i32(tmp
);
1461 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1462 } else { /* WSTRW wRd */
1463 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1464 gen_st32(tmp
, addr
, IS_USER(s
));
1467 if (insn
& (1 << 22)) { /* WSTRH */
1468 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1469 gen_st16(tmp
, addr
, IS_USER(s
));
1470 } else { /* WSTRB */
1471 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1472 gen_st8(tmp
, addr
, IS_USER(s
));
1477 tcg_temp_free_i32(addr
);
1481 if ((insn
& 0x0f000000) != 0x0e000000)
1484 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1485 case 0x000: /* WOR */
1486 wrd
= (insn
>> 12) & 0xf;
1487 rd0
= (insn
>> 0) & 0xf;
1488 rd1
= (insn
>> 16) & 0xf;
1489 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1490 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1491 gen_op_iwmmxt_setpsr_nz();
1492 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1493 gen_op_iwmmxt_set_mup();
1494 gen_op_iwmmxt_set_cup();
1496 case 0x011: /* TMCR */
1499 rd
= (insn
>> 12) & 0xf;
1500 wrd
= (insn
>> 16) & 0xf;
1502 case ARM_IWMMXT_wCID
:
1503 case ARM_IWMMXT_wCASF
:
1505 case ARM_IWMMXT_wCon
:
1506 gen_op_iwmmxt_set_cup();
1508 case ARM_IWMMXT_wCSSF
:
1509 tmp
= iwmmxt_load_creg(wrd
);
1510 tmp2
= load_reg(s
, rd
);
1511 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1512 tcg_temp_free_i32(tmp2
);
1513 iwmmxt_store_creg(wrd
, tmp
);
1515 case ARM_IWMMXT_wCGR0
:
1516 case ARM_IWMMXT_wCGR1
:
1517 case ARM_IWMMXT_wCGR2
:
1518 case ARM_IWMMXT_wCGR3
:
1519 gen_op_iwmmxt_set_cup();
1520 tmp
= load_reg(s
, rd
);
1521 iwmmxt_store_creg(wrd
, tmp
);
1527 case 0x100: /* WXOR */
1528 wrd
= (insn
>> 12) & 0xf;
1529 rd0
= (insn
>> 0) & 0xf;
1530 rd1
= (insn
>> 16) & 0xf;
1531 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1532 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1533 gen_op_iwmmxt_setpsr_nz();
1534 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1535 gen_op_iwmmxt_set_mup();
1536 gen_op_iwmmxt_set_cup();
1538 case 0x111: /* TMRC */
1541 rd
= (insn
>> 12) & 0xf;
1542 wrd
= (insn
>> 16) & 0xf;
1543 tmp
= iwmmxt_load_creg(wrd
);
1544 store_reg(s
, rd
, tmp
);
1546 case 0x300: /* WANDN */
1547 wrd
= (insn
>> 12) & 0xf;
1548 rd0
= (insn
>> 0) & 0xf;
1549 rd1
= (insn
>> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1551 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1552 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1553 gen_op_iwmmxt_setpsr_nz();
1554 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1555 gen_op_iwmmxt_set_mup();
1556 gen_op_iwmmxt_set_cup();
1558 case 0x200: /* WAND */
1559 wrd
= (insn
>> 12) & 0xf;
1560 rd0
= (insn
>> 0) & 0xf;
1561 rd1
= (insn
>> 16) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1563 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1564 gen_op_iwmmxt_setpsr_nz();
1565 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1566 gen_op_iwmmxt_set_mup();
1567 gen_op_iwmmxt_set_cup();
1569 case 0x810: case 0xa10: /* WMADD */
1570 wrd
= (insn
>> 12) & 0xf;
1571 rd0
= (insn
>> 0) & 0xf;
1572 rd1
= (insn
>> 16) & 0xf;
1573 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1574 if (insn
& (1 << 21))
1575 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1577 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1578 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1579 gen_op_iwmmxt_set_mup();
1581 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1582 wrd
= (insn
>> 12) & 0xf;
1583 rd0
= (insn
>> 16) & 0xf;
1584 rd1
= (insn
>> 0) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1586 switch ((insn
>> 22) & 3) {
1588 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1591 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1594 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1599 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1600 gen_op_iwmmxt_set_mup();
1601 gen_op_iwmmxt_set_cup();
1603 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1604 wrd
= (insn
>> 12) & 0xf;
1605 rd0
= (insn
>> 16) & 0xf;
1606 rd1
= (insn
>> 0) & 0xf;
1607 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1608 switch ((insn
>> 22) & 3) {
1610 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1613 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1616 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1621 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1625 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1626 wrd
= (insn
>> 12) & 0xf;
1627 rd0
= (insn
>> 16) & 0xf;
1628 rd1
= (insn
>> 0) & 0xf;
1629 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1630 if (insn
& (1 << 22))
1631 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1633 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1634 if (!(insn
& (1 << 20)))
1635 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1636 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1637 gen_op_iwmmxt_set_mup();
1639 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1640 wrd
= (insn
>> 12) & 0xf;
1641 rd0
= (insn
>> 16) & 0xf;
1642 rd1
= (insn
>> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1644 if (insn
& (1 << 21)) {
1645 if (insn
& (1 << 20))
1646 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1648 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1650 if (insn
& (1 << 20))
1651 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1653 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1656 gen_op_iwmmxt_set_mup();
1658 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1659 wrd
= (insn
>> 12) & 0xf;
1660 rd0
= (insn
>> 16) & 0xf;
1661 rd1
= (insn
>> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1663 if (insn
& (1 << 21))
1664 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1666 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1667 if (!(insn
& (1 << 20))) {
1668 iwmmxt_load_reg(cpu_V1
, wrd
);
1669 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1671 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1672 gen_op_iwmmxt_set_mup();
1674 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1675 wrd
= (insn
>> 12) & 0xf;
1676 rd0
= (insn
>> 16) & 0xf;
1677 rd1
= (insn
>> 0) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1679 switch ((insn
>> 22) & 3) {
1681 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1684 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1687 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1692 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1693 gen_op_iwmmxt_set_mup();
1694 gen_op_iwmmxt_set_cup();
1696 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1697 wrd
= (insn
>> 12) & 0xf;
1698 rd0
= (insn
>> 16) & 0xf;
1699 rd1
= (insn
>> 0) & 0xf;
1700 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1701 if (insn
& (1 << 22)) {
1702 if (insn
& (1 << 20))
1703 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1705 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1707 if (insn
& (1 << 20))
1708 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1710 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1712 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1713 gen_op_iwmmxt_set_mup();
1714 gen_op_iwmmxt_set_cup();
1716 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1717 wrd
= (insn
>> 12) & 0xf;
1718 rd0
= (insn
>> 16) & 0xf;
1719 rd1
= (insn
>> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1721 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1722 tcg_gen_andi_i32(tmp
, tmp
, 7);
1723 iwmmxt_load_reg(cpu_V1
, rd1
);
1724 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1725 tcg_temp_free_i32(tmp
);
1726 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1727 gen_op_iwmmxt_set_mup();
1729 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1730 if (((insn
>> 6) & 3) == 3)
1732 rd
= (insn
>> 12) & 0xf;
1733 wrd
= (insn
>> 16) & 0xf;
1734 tmp
= load_reg(s
, rd
);
1735 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1736 switch ((insn
>> 6) & 3) {
1738 tmp2
= tcg_const_i32(0xff);
1739 tmp3
= tcg_const_i32((insn
& 7) << 3);
1742 tmp2
= tcg_const_i32(0xffff);
1743 tmp3
= tcg_const_i32((insn
& 3) << 4);
1746 tmp2
= tcg_const_i32(0xffffffff);
1747 tmp3
= tcg_const_i32((insn
& 1) << 5);
1753 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1754 tcg_temp_free(tmp3
);
1755 tcg_temp_free(tmp2
);
1756 tcg_temp_free_i32(tmp
);
1757 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1758 gen_op_iwmmxt_set_mup();
1760 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1761 rd
= (insn
>> 12) & 0xf;
1762 wrd
= (insn
>> 16) & 0xf;
1763 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1765 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1766 tmp
= tcg_temp_new_i32();
1767 switch ((insn
>> 22) & 3) {
1769 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1770 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1772 tcg_gen_ext8s_i32(tmp
, tmp
);
1774 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1778 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1779 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1781 tcg_gen_ext16s_i32(tmp
, tmp
);
1783 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1787 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1788 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1791 store_reg(s
, rd
, tmp
);
1793 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1794 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1796 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1797 switch ((insn
>> 22) & 3) {
1799 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1802 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1805 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1808 tcg_gen_shli_i32(tmp
, tmp
, 28);
1810 tcg_temp_free_i32(tmp
);
1812 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1813 if (((insn
>> 6) & 3) == 3)
1815 rd
= (insn
>> 12) & 0xf;
1816 wrd
= (insn
>> 16) & 0xf;
1817 tmp
= load_reg(s
, rd
);
1818 switch ((insn
>> 6) & 3) {
1820 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1823 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1826 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1829 tcg_temp_free_i32(tmp
);
1830 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1831 gen_op_iwmmxt_set_mup();
1833 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1834 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1836 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1837 tmp2
= tcg_temp_new_i32();
1838 tcg_gen_mov_i32(tmp2
, tmp
);
1839 switch ((insn
>> 22) & 3) {
1841 for (i
= 0; i
< 7; i
++) {
1842 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1843 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1847 for (i
= 0; i
< 3; i
++) {
1848 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1849 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1853 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1854 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1858 tcg_temp_free_i32(tmp2
);
1859 tcg_temp_free_i32(tmp
);
1861 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1862 wrd
= (insn
>> 12) & 0xf;
1863 rd0
= (insn
>> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1865 switch ((insn
>> 22) & 3) {
1867 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1870 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1873 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1878 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1879 gen_op_iwmmxt_set_mup();
1881 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1882 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1884 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1885 tmp2
= tcg_temp_new_i32();
1886 tcg_gen_mov_i32(tmp2
, tmp
);
1887 switch ((insn
>> 22) & 3) {
1889 for (i
= 0; i
< 7; i
++) {
1890 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1891 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1895 for (i
= 0; i
< 3; i
++) {
1896 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1897 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1901 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1902 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1906 tcg_temp_free_i32(tmp2
);
1907 tcg_temp_free_i32(tmp
);
1909 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1910 rd
= (insn
>> 12) & 0xf;
1911 rd0
= (insn
>> 16) & 0xf;
1912 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1914 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1915 tmp
= tcg_temp_new_i32();
1916 switch ((insn
>> 22) & 3) {
1918 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1921 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1924 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1927 store_reg(s
, rd
, tmp
);
1929 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1930 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1931 wrd
= (insn
>> 12) & 0xf;
1932 rd0
= (insn
>> 16) & 0xf;
1933 rd1
= (insn
>> 0) & 0xf;
1934 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1935 switch ((insn
>> 22) & 3) {
1937 if (insn
& (1 << 21))
1938 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1940 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1943 if (insn
& (1 << 21))
1944 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1946 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1949 if (insn
& (1 << 21))
1950 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1952 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1957 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1958 gen_op_iwmmxt_set_mup();
1959 gen_op_iwmmxt_set_cup();
1961 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1962 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1963 wrd
= (insn
>> 12) & 0xf;
1964 rd0
= (insn
>> 16) & 0xf;
1965 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1966 switch ((insn
>> 22) & 3) {
1968 if (insn
& (1 << 21))
1969 gen_op_iwmmxt_unpacklsb_M0();
1971 gen_op_iwmmxt_unpacklub_M0();
1974 if (insn
& (1 << 21))
1975 gen_op_iwmmxt_unpacklsw_M0();
1977 gen_op_iwmmxt_unpackluw_M0();
1980 if (insn
& (1 << 21))
1981 gen_op_iwmmxt_unpacklsl_M0();
1983 gen_op_iwmmxt_unpacklul_M0();
1988 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1989 gen_op_iwmmxt_set_mup();
1990 gen_op_iwmmxt_set_cup();
1992 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1993 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1994 wrd
= (insn
>> 12) & 0xf;
1995 rd0
= (insn
>> 16) & 0xf;
1996 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1997 switch ((insn
>> 22) & 3) {
1999 if (insn
& (1 << 21))
2000 gen_op_iwmmxt_unpackhsb_M0();
2002 gen_op_iwmmxt_unpackhub_M0();
2005 if (insn
& (1 << 21))
2006 gen_op_iwmmxt_unpackhsw_M0();
2008 gen_op_iwmmxt_unpackhuw_M0();
2011 if (insn
& (1 << 21))
2012 gen_op_iwmmxt_unpackhsl_M0();
2014 gen_op_iwmmxt_unpackhul_M0();
2019 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2023 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2024 case 0x214: case 0x614: case 0xa14: case 0xe14:
2025 if (((insn
>> 22) & 3) == 0)
2027 wrd
= (insn
>> 12) & 0xf;
2028 rd0
= (insn
>> 16) & 0xf;
2029 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2030 tmp
= tcg_temp_new_i32();
2031 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2032 tcg_temp_free_i32(tmp
);
2035 switch ((insn
>> 22) & 3) {
2037 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2040 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2043 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2046 tcg_temp_free_i32(tmp
);
2047 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2048 gen_op_iwmmxt_set_mup();
2049 gen_op_iwmmxt_set_cup();
2051 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2052 case 0x014: case 0x414: case 0x814: case 0xc14:
2053 if (((insn
>> 22) & 3) == 0)
2055 wrd
= (insn
>> 12) & 0xf;
2056 rd0
= (insn
>> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2058 tmp
= tcg_temp_new_i32();
2059 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2060 tcg_temp_free_i32(tmp
);
2063 switch ((insn
>> 22) & 3) {
2065 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2068 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2071 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2074 tcg_temp_free_i32(tmp
);
2075 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2076 gen_op_iwmmxt_set_mup();
2077 gen_op_iwmmxt_set_cup();
2079 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2080 case 0x114: case 0x514: case 0x914: case 0xd14:
2081 if (((insn
>> 22) & 3) == 0)
2083 wrd
= (insn
>> 12) & 0xf;
2084 rd0
= (insn
>> 16) & 0xf;
2085 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2086 tmp
= tcg_temp_new_i32();
2087 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2088 tcg_temp_free_i32(tmp
);
2091 switch ((insn
>> 22) & 3) {
2093 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2096 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2099 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2102 tcg_temp_free_i32(tmp
);
2103 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2104 gen_op_iwmmxt_set_mup();
2105 gen_op_iwmmxt_set_cup();
2107 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2108 case 0x314: case 0x714: case 0xb14: case 0xf14:
2109 if (((insn
>> 22) & 3) == 0)
2111 wrd
= (insn
>> 12) & 0xf;
2112 rd0
= (insn
>> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2114 tmp
= tcg_temp_new_i32();
2115 switch ((insn
>> 22) & 3) {
2117 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2118 tcg_temp_free_i32(tmp
);
2121 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2124 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2125 tcg_temp_free_i32(tmp
);
2128 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2131 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2132 tcg_temp_free_i32(tmp
);
2135 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2138 tcg_temp_free_i32(tmp
);
2139 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2143 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2144 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2145 wrd
= (insn
>> 12) & 0xf;
2146 rd0
= (insn
>> 16) & 0xf;
2147 rd1
= (insn
>> 0) & 0xf;
2148 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2149 switch ((insn
>> 22) & 3) {
2151 if (insn
& (1 << 21))
2152 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2154 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2157 if (insn
& (1 << 21))
2158 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2160 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2163 if (insn
& (1 << 21))
2164 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2166 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2171 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2172 gen_op_iwmmxt_set_mup();
2174 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2175 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2176 wrd
= (insn
>> 12) & 0xf;
2177 rd0
= (insn
>> 16) & 0xf;
2178 rd1
= (insn
>> 0) & 0xf;
2179 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2180 switch ((insn
>> 22) & 3) {
2182 if (insn
& (1 << 21))
2183 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2185 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2188 if (insn
& (1 << 21))
2189 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2191 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2194 if (insn
& (1 << 21))
2195 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2197 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2202 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2203 gen_op_iwmmxt_set_mup();
2205 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2206 case 0x402: case 0x502: case 0x602: case 0x702:
2207 wrd
= (insn
>> 12) & 0xf;
2208 rd0
= (insn
>> 16) & 0xf;
2209 rd1
= (insn
>> 0) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2211 tmp
= tcg_const_i32((insn
>> 20) & 3);
2212 iwmmxt_load_reg(cpu_V1
, rd1
);
2213 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2215 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2216 gen_op_iwmmxt_set_mup();
2218 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2219 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2220 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2221 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2222 wrd
= (insn
>> 12) & 0xf;
2223 rd0
= (insn
>> 16) & 0xf;
2224 rd1
= (insn
>> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2226 switch ((insn
>> 20) & 0xf) {
2228 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2231 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2234 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2237 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2240 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2246 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2249 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2252 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2257 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2261 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2262 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2263 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2264 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2265 wrd
= (insn
>> 12) & 0xf;
2266 rd0
= (insn
>> 16) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2268 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2269 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2271 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2275 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2276 case 0x418: case 0x518: case 0x618: case 0x718:
2277 case 0x818: case 0x918: case 0xa18: case 0xb18:
2278 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2279 wrd
= (insn
>> 12) & 0xf;
2280 rd0
= (insn
>> 16) & 0xf;
2281 rd1
= (insn
>> 0) & 0xf;
2282 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2283 switch ((insn
>> 20) & 0xf) {
2285 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2288 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2291 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2294 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2297 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2300 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2303 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2306 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2309 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2314 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2315 gen_op_iwmmxt_set_mup();
2316 gen_op_iwmmxt_set_cup();
2318 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2319 case 0x408: case 0x508: case 0x608: case 0x708:
2320 case 0x808: case 0x908: case 0xa08: case 0xb08:
2321 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2322 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2324 wrd
= (insn
>> 12) & 0xf;
2325 rd0
= (insn
>> 16) & 0xf;
2326 rd1
= (insn
>> 0) & 0xf;
2327 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2328 switch ((insn
>> 22) & 3) {
2330 if (insn
& (1 << 21))
2331 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2333 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2336 if (insn
& (1 << 21))
2337 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2339 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2342 if (insn
& (1 << 21))
2343 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2345 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2348 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2349 gen_op_iwmmxt_set_mup();
2350 gen_op_iwmmxt_set_cup();
2352 case 0x201: case 0x203: case 0x205: case 0x207:
2353 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2354 case 0x211: case 0x213: case 0x215: case 0x217:
2355 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2356 wrd
= (insn
>> 5) & 0xf;
2357 rd0
= (insn
>> 12) & 0xf;
2358 rd1
= (insn
>> 0) & 0xf;
2359 if (rd0
== 0xf || rd1
== 0xf)
2361 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2362 tmp
= load_reg(s
, rd0
);
2363 tmp2
= load_reg(s
, rd1
);
2364 switch ((insn
>> 16) & 0xf) {
2365 case 0x0: /* TMIA */
2366 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2368 case 0x8: /* TMIAPH */
2369 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2371 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2372 if (insn
& (1 << 16))
2373 tcg_gen_shri_i32(tmp
, tmp
, 16);
2374 if (insn
& (1 << 17))
2375 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2379 tcg_temp_free_i32(tmp2
);
2380 tcg_temp_free_i32(tmp
);
2383 tcg_temp_free_i32(tmp2
);
2384 tcg_temp_free_i32(tmp
);
2385 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2386 gen_op_iwmmxt_set_mup();
2395 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2396 (ie. an undefined instruction). */
2397 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2399 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2402 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2403 /* Multiply with Internal Accumulate Format */
2404 rd0
= (insn
>> 12) & 0xf;
2406 acc
= (insn
>> 5) & 7;
2411 tmp
= load_reg(s
, rd0
);
2412 tmp2
= load_reg(s
, rd1
);
2413 switch ((insn
>> 16) & 0xf) {
2415 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2417 case 0x8: /* MIAPH */
2418 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2420 case 0xc: /* MIABB */
2421 case 0xd: /* MIABT */
2422 case 0xe: /* MIATB */
2423 case 0xf: /* MIATT */
2424 if (insn
& (1 << 16))
2425 tcg_gen_shri_i32(tmp
, tmp
, 16);
2426 if (insn
& (1 << 17))
2427 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2428 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2433 tcg_temp_free_i32(tmp2
);
2434 tcg_temp_free_i32(tmp
);
2436 gen_op_iwmmxt_movq_wRn_M0(acc
);
2440 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2441 /* Internal Accumulator Access Format */
2442 rdhi
= (insn
>> 16) & 0xf;
2443 rdlo
= (insn
>> 12) & 0xf;
2449 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2450 iwmmxt_load_reg(cpu_V0
, acc
);
2451 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2452 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2453 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2454 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2456 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2457 iwmmxt_store_reg(cpu_V0
, acc
);
2465 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2466 #define VFP_SREG(insn, bigbit, smallbit) \
2467 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2468 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2469 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2470 reg = (((insn) >> (bigbit)) & 0x0f) \
2471 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2473 if (insn & (1 << (smallbit))) \
2475 reg = ((insn) >> (bigbit)) & 0x0f; \
2478 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2479 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2480 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2481 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2482 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2483 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2485 /* Move between integer and VFP cores. */
2486 static TCGv
gen_vfp_mrs(void)
2488 TCGv tmp
= tcg_temp_new_i32();
2489 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2493 static void gen_vfp_msr(TCGv tmp
)
2495 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2496 tcg_temp_free_i32(tmp
);
2499 static void gen_neon_dup_u8(TCGv var
, int shift
)
2501 TCGv tmp
= tcg_temp_new_i32();
2503 tcg_gen_shri_i32(var
, var
, shift
);
2504 tcg_gen_ext8u_i32(var
, var
);
2505 tcg_gen_shli_i32(tmp
, var
, 8);
2506 tcg_gen_or_i32(var
, var
, tmp
);
2507 tcg_gen_shli_i32(tmp
, var
, 16);
2508 tcg_gen_or_i32(var
, var
, tmp
);
2509 tcg_temp_free_i32(tmp
);
2512 static void gen_neon_dup_low16(TCGv var
)
2514 TCGv tmp
= tcg_temp_new_i32();
2515 tcg_gen_ext16u_i32(var
, var
);
2516 tcg_gen_shli_i32(tmp
, var
, 16);
2517 tcg_gen_or_i32(var
, var
, tmp
);
2518 tcg_temp_free_i32(tmp
);
2521 static void gen_neon_dup_high16(TCGv var
)
2523 TCGv tmp
= tcg_temp_new_i32();
2524 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2525 tcg_gen_shri_i32(tmp
, var
, 16);
2526 tcg_gen_or_i32(var
, var
, tmp
);
2527 tcg_temp_free_i32(tmp
);
2530 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2532 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2536 tmp
= gen_ld8u(addr
, IS_USER(s
));
2537 gen_neon_dup_u8(tmp
, 0);
2540 tmp
= gen_ld16u(addr
, IS_USER(s
));
2541 gen_neon_dup_low16(tmp
);
2544 tmp
= gen_ld32(addr
, IS_USER(s
));
2546 default: /* Avoid compiler warnings. */
2552 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2553 (ie. an undefined instruction). */
2554 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2556 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2562 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2565 if (!s
->vfp_enabled
) {
2566 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2567 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2569 rn
= (insn
>> 16) & 0xf;
2570 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2571 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2574 dp
= ((insn
& 0xf00) == 0xb00);
2575 switch ((insn
>> 24) & 0xf) {
2577 if (insn
& (1 << 4)) {
2578 /* single register transfer */
2579 rd
= (insn
>> 12) & 0xf;
2584 VFP_DREG_N(rn
, insn
);
2587 if (insn
& 0x00c00060
2588 && !arm_feature(env
, ARM_FEATURE_NEON
))
2591 pass
= (insn
>> 21) & 1;
2592 if (insn
& (1 << 22)) {
2594 offset
= ((insn
>> 5) & 3) * 8;
2595 } else if (insn
& (1 << 5)) {
2597 offset
= (insn
& (1 << 6)) ? 16 : 0;
2602 if (insn
& ARM_CP_RW_BIT
) {
2604 tmp
= neon_load_reg(rn
, pass
);
2608 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2609 if (insn
& (1 << 23))
2615 if (insn
& (1 << 23)) {
2617 tcg_gen_shri_i32(tmp
, tmp
, 16);
2623 tcg_gen_sari_i32(tmp
, tmp
, 16);
2632 store_reg(s
, rd
, tmp
);
2635 tmp
= load_reg(s
, rd
);
2636 if (insn
& (1 << 23)) {
2639 gen_neon_dup_u8(tmp
, 0);
2640 } else if (size
== 1) {
2641 gen_neon_dup_low16(tmp
);
2643 for (n
= 0; n
<= pass
* 2; n
++) {
2644 tmp2
= tcg_temp_new_i32();
2645 tcg_gen_mov_i32(tmp2
, tmp
);
2646 neon_store_reg(rn
, n
, tmp2
);
2648 neon_store_reg(rn
, n
, tmp
);
2653 tmp2
= neon_load_reg(rn
, pass
);
2654 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
2655 tcg_temp_free_i32(tmp2
);
2658 tmp2
= neon_load_reg(rn
, pass
);
2659 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
2660 tcg_temp_free_i32(tmp2
);
2665 neon_store_reg(rn
, pass
, tmp
);
2669 if ((insn
& 0x6f) != 0x00)
2671 rn
= VFP_SREG_N(insn
);
2672 if (insn
& ARM_CP_RW_BIT
) {
2674 if (insn
& (1 << 21)) {
2675 /* system register */
2680 /* VFP2 allows access to FSID from userspace.
2681 VFP3 restricts all id registers to privileged
2684 && arm_feature(env
, ARM_FEATURE_VFP3
))
2686 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2691 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2693 case ARM_VFP_FPINST
:
2694 case ARM_VFP_FPINST2
:
2695 /* Not present in VFP3. */
2697 || arm_feature(env
, ARM_FEATURE_VFP3
))
2699 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2703 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2704 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2706 tmp
= tcg_temp_new_i32();
2707 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2713 || !arm_feature(env
, ARM_FEATURE_MVFR
))
2715 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2721 gen_mov_F0_vreg(0, rn
);
2722 tmp
= gen_vfp_mrs();
2725 /* Set the 4 flag bits in the CPSR. */
2727 tcg_temp_free_i32(tmp
);
2729 store_reg(s
, rd
, tmp
);
2733 tmp
= load_reg(s
, rd
);
2734 if (insn
& (1 << 21)) {
2736 /* system register */
2741 /* Writes are ignored. */
2744 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2745 tcg_temp_free_i32(tmp
);
2751 /* TODO: VFP subarchitecture support.
2752 * For now, keep the EN bit only */
2753 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2754 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2757 case ARM_VFP_FPINST
:
2758 case ARM_VFP_FPINST2
:
2759 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2766 gen_mov_vreg_F0(0, rn
);
2771 /* data processing */
2772 /* The opcode is in bits 23, 21, 20 and 6. */
2773 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2777 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2779 /* rn is register number */
2780 VFP_DREG_N(rn
, insn
);
2783 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2784 /* Integer or single precision destination. */
2785 rd
= VFP_SREG_D(insn
);
2787 VFP_DREG_D(rd
, insn
);
2790 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2791 /* VCVT from int is always from S reg regardless of dp bit.
2792 * VCVT with immediate frac_bits has same format as SREG_M
2794 rm
= VFP_SREG_M(insn
);
2796 VFP_DREG_M(rm
, insn
);
2799 rn
= VFP_SREG_N(insn
);
2800 if (op
== 15 && rn
== 15) {
2801 /* Double precision destination. */
2802 VFP_DREG_D(rd
, insn
);
2804 rd
= VFP_SREG_D(insn
);
2806 /* NB that we implicitly rely on the encoding for the frac_bits
2807 * in VCVT of fixed to float being the same as that of an SREG_M
2809 rm
= VFP_SREG_M(insn
);
2812 veclen
= s
->vec_len
;
2813 if (op
== 15 && rn
> 3)
2816 /* Shut up compiler warnings. */
2827 /* Figure out what type of vector operation this is. */
2828 if ((rd
& bank_mask
) == 0) {
2833 delta_d
= (s
->vec_stride
>> 1) + 1;
2835 delta_d
= s
->vec_stride
+ 1;
2837 if ((rm
& bank_mask
) == 0) {
2838 /* mixed scalar/vector */
2847 /* Load the initial operands. */
2852 /* Integer source */
2853 gen_mov_F0_vreg(0, rm
);
2858 gen_mov_F0_vreg(dp
, rd
);
2859 gen_mov_F1_vreg(dp
, rm
);
2863 /* Compare with zero */
2864 gen_mov_F0_vreg(dp
, rd
);
2875 /* Source and destination the same. */
2876 gen_mov_F0_vreg(dp
, rd
);
2882 /* VCVTB, VCVTT: only present with the halfprec extension,
2883 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2885 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
2888 /* Otherwise fall through */
2890 /* One source operand. */
2891 gen_mov_F0_vreg(dp
, rm
);
2895 /* Two source operands. */
2896 gen_mov_F0_vreg(dp
, rn
);
2897 gen_mov_F1_vreg(dp
, rm
);
2901 /* Perform the calculation. */
2903 case 0: /* VMLA: fd + (fn * fm) */
2904 /* Note that order of inputs to the add matters for NaNs */
2906 gen_mov_F0_vreg(dp
, rd
);
2909 case 1: /* VMLS: fd + -(fn * fm) */
2912 gen_mov_F0_vreg(dp
, rd
);
2915 case 2: /* VNMLS: -fd + (fn * fm) */
2916 /* Note that it isn't valid to replace (-A + B) with (B - A)
2917 * or similar plausible looking simplifications
2918 * because this will give wrong results for NaNs.
2921 gen_mov_F0_vreg(dp
, rd
);
2925 case 3: /* VNMLA: -fd + -(fn * fm) */
2928 gen_mov_F0_vreg(dp
, rd
);
2932 case 4: /* mul: fn * fm */
2935 case 5: /* nmul: -(fn * fm) */
2939 case 6: /* add: fn + fm */
2942 case 7: /* sub: fn - fm */
2945 case 8: /* div: fn / fm */
2948 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2949 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2950 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2951 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2952 /* These are fused multiply-add, and must be done as one
2953 * floating point operation with no rounding between the
2954 * multiplication and addition steps.
2955 * NB that doing the negations here as separate steps is
2956 * correct : an input NaN should come out with its sign bit
2957 * flipped if it is a negated-input.
2959 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
2967 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
2969 frd
= tcg_temp_new_i64();
2970 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2973 gen_helper_vfp_negd(frd
, frd
);
2975 fpst
= get_fpstatus_ptr(0);
2976 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
2977 cpu_F1d
, frd
, fpst
);
2978 tcg_temp_free_ptr(fpst
);
2979 tcg_temp_free_i64(frd
);
2985 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
2987 frd
= tcg_temp_new_i32();
2988 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
2990 gen_helper_vfp_negs(frd
, frd
);
2992 fpst
= get_fpstatus_ptr(0);
2993 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
2994 cpu_F1s
, frd
, fpst
);
2995 tcg_temp_free_ptr(fpst
);
2996 tcg_temp_free_i32(frd
);
2999 case 14: /* fconst */
3000 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3003 n
= (insn
<< 12) & 0x80000000;
3004 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3011 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3018 tcg_gen_movi_i32(cpu_F0s
, n
);
3021 case 15: /* extension space */
3035 case 4: /* vcvtb.f32.f16 */
3036 tmp
= gen_vfp_mrs();
3037 tcg_gen_ext16u_i32(tmp
, tmp
);
3038 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3039 tcg_temp_free_i32(tmp
);
3041 case 5: /* vcvtt.f32.f16 */
3042 tmp
= gen_vfp_mrs();
3043 tcg_gen_shri_i32(tmp
, tmp
, 16);
3044 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3045 tcg_temp_free_i32(tmp
);
3047 case 6: /* vcvtb.f16.f32 */
3048 tmp
= tcg_temp_new_i32();
3049 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3050 gen_mov_F0_vreg(0, rd
);
3051 tmp2
= gen_vfp_mrs();
3052 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3053 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3054 tcg_temp_free_i32(tmp2
);
3057 case 7: /* vcvtt.f16.f32 */
3058 tmp
= tcg_temp_new_i32();
3059 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3060 tcg_gen_shli_i32(tmp
, tmp
, 16);
3061 gen_mov_F0_vreg(0, rd
);
3062 tmp2
= gen_vfp_mrs();
3063 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3064 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3065 tcg_temp_free_i32(tmp2
);
3077 case 11: /* cmpez */
3081 case 15: /* single<->double conversion */
3083 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3085 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3087 case 16: /* fuito */
3088 gen_vfp_uito(dp
, 0);
3090 case 17: /* fsito */
3091 gen_vfp_sito(dp
, 0);
3093 case 20: /* fshto */
3094 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3096 gen_vfp_shto(dp
, 16 - rm
, 0);
3098 case 21: /* fslto */
3099 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3101 gen_vfp_slto(dp
, 32 - rm
, 0);
3103 case 22: /* fuhto */
3104 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3106 gen_vfp_uhto(dp
, 16 - rm
, 0);
3108 case 23: /* fulto */
3109 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3111 gen_vfp_ulto(dp
, 32 - rm
, 0);
3113 case 24: /* ftoui */
3114 gen_vfp_toui(dp
, 0);
3116 case 25: /* ftouiz */
3117 gen_vfp_touiz(dp
, 0);
3119 case 26: /* ftosi */
3120 gen_vfp_tosi(dp
, 0);
3122 case 27: /* ftosiz */
3123 gen_vfp_tosiz(dp
, 0);
3125 case 28: /* ftosh */
3126 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3128 gen_vfp_tosh(dp
, 16 - rm
, 0);
3130 case 29: /* ftosl */
3131 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3133 gen_vfp_tosl(dp
, 32 - rm
, 0);
3135 case 30: /* ftouh */
3136 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3138 gen_vfp_touh(dp
, 16 - rm
, 0);
3140 case 31: /* ftoul */
3141 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3143 gen_vfp_toul(dp
, 32 - rm
, 0);
3145 default: /* undefined */
3149 default: /* undefined */
3153 /* Write back the result. */
3154 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3155 ; /* Comparison, do nothing. */
3156 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3157 /* VCVT double to int: always integer result. */
3158 gen_mov_vreg_F0(0, rd
);
3159 else if (op
== 15 && rn
== 15)
3161 gen_mov_vreg_F0(!dp
, rd
);
3163 gen_mov_vreg_F0(dp
, rd
);
3165 /* break out of the loop if we have finished */
3169 if (op
== 15 && delta_m
== 0) {
3170 /* single source one-many */
3172 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3174 gen_mov_vreg_F0(dp
, rd
);
3178 /* Setup the next operands. */
3180 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3184 /* One source operand. */
3185 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3187 gen_mov_F0_vreg(dp
, rm
);
3189 /* Two source operands. */
3190 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3192 gen_mov_F0_vreg(dp
, rn
);
3194 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3196 gen_mov_F1_vreg(dp
, rm
);
3204 if ((insn
& 0x03e00000) == 0x00400000) {
3205 /* two-register transfer */
3206 rn
= (insn
>> 16) & 0xf;
3207 rd
= (insn
>> 12) & 0xf;
3209 VFP_DREG_M(rm
, insn
);
3211 rm
= VFP_SREG_M(insn
);
3214 if (insn
& ARM_CP_RW_BIT
) {
3217 gen_mov_F0_vreg(0, rm
* 2);
3218 tmp
= gen_vfp_mrs();
3219 store_reg(s
, rd
, tmp
);
3220 gen_mov_F0_vreg(0, rm
* 2 + 1);
3221 tmp
= gen_vfp_mrs();
3222 store_reg(s
, rn
, tmp
);
3224 gen_mov_F0_vreg(0, rm
);
3225 tmp
= gen_vfp_mrs();
3226 store_reg(s
, rd
, tmp
);
3227 gen_mov_F0_vreg(0, rm
+ 1);
3228 tmp
= gen_vfp_mrs();
3229 store_reg(s
, rn
, tmp
);
3234 tmp
= load_reg(s
, rd
);
3236 gen_mov_vreg_F0(0, rm
* 2);
3237 tmp
= load_reg(s
, rn
);
3239 gen_mov_vreg_F0(0, rm
* 2 + 1);
3241 tmp
= load_reg(s
, rd
);
3243 gen_mov_vreg_F0(0, rm
);
3244 tmp
= load_reg(s
, rn
);
3246 gen_mov_vreg_F0(0, rm
+ 1);
3251 rn
= (insn
>> 16) & 0xf;
3253 VFP_DREG_D(rd
, insn
);
3255 rd
= VFP_SREG_D(insn
);
3256 if ((insn
& 0x01200000) == 0x01000000) {
3257 /* Single load/store */
3258 offset
= (insn
& 0xff) << 2;
3259 if ((insn
& (1 << 23)) == 0)
3261 if (s
->thumb
&& rn
== 15) {
3262 /* This is actually UNPREDICTABLE */
3263 addr
= tcg_temp_new_i32();
3264 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3266 addr
= load_reg(s
, rn
);
3268 tcg_gen_addi_i32(addr
, addr
, offset
);
3269 if (insn
& (1 << 20)) {
3270 gen_vfp_ld(s
, dp
, addr
);
3271 gen_mov_vreg_F0(dp
, rd
);
3273 gen_mov_F0_vreg(dp
, rd
);
3274 gen_vfp_st(s
, dp
, addr
);
3276 tcg_temp_free_i32(addr
);
3278 /* load/store multiple */
3279 int w
= insn
& (1 << 21);
3281 n
= (insn
>> 1) & 0x7f;
3285 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3286 /* P == U , W == 1 => UNDEF */
3289 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3290 /* UNPREDICTABLE cases for bad immediates: we choose to
3291 * UNDEF to avoid generating huge numbers of TCG ops
3295 if (rn
== 15 && w
) {
3296 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3300 if (s
->thumb
&& rn
== 15) {
3301 /* This is actually UNPREDICTABLE */
3302 addr
= tcg_temp_new_i32();
3303 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3305 addr
= load_reg(s
, rn
);
3307 if (insn
& (1 << 24)) /* pre-decrement */
3308 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3314 for (i
= 0; i
< n
; i
++) {
3315 if (insn
& ARM_CP_RW_BIT
) {
3317 gen_vfp_ld(s
, dp
, addr
);
3318 gen_mov_vreg_F0(dp
, rd
+ i
);
3321 gen_mov_F0_vreg(dp
, rd
+ i
);
3322 gen_vfp_st(s
, dp
, addr
);
3324 tcg_gen_addi_i32(addr
, addr
, offset
);
3328 if (insn
& (1 << 24))
3329 offset
= -offset
* n
;
3330 else if (dp
&& (insn
& 1))
3336 tcg_gen_addi_i32(addr
, addr
, offset
);
3337 store_reg(s
, rn
, addr
);
3339 tcg_temp_free_i32(addr
);
3345 /* Should never happen. */
3351 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3353 TranslationBlock
*tb
;
3356 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3358 gen_set_pc_im(dest
);
3359 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3361 gen_set_pc_im(dest
);
3366 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3368 if (unlikely(s
->singlestep_enabled
)) {
3369 /* An indirect jump so that we still trigger the debug exception. */
3374 gen_goto_tb(s
, 0, dest
);
3375 s
->is_jmp
= DISAS_TB_JUMP
;
3379 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3382 tcg_gen_sari_i32(t0
, t0
, 16);
3386 tcg_gen_sari_i32(t1
, t1
, 16);
3389 tcg_gen_mul_i32(t0
, t0
, t1
);
3392 /* Return the mask of PSR bits set by a MSR instruction. */
3393 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3397 if (flags
& (1 << 0))
3399 if (flags
& (1 << 1))
3401 if (flags
& (1 << 2))
3403 if (flags
& (1 << 3))
3406 /* Mask out undefined bits. */
3407 mask
&= ~CPSR_RESERVED
;
3408 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3410 if (!arm_feature(env
, ARM_FEATURE_V5
))
3411 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3412 if (!arm_feature(env
, ARM_FEATURE_V6
))
3413 mask
&= ~(CPSR_E
| CPSR_GE
);
3414 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3416 /* Mask out execution state bits. */
3419 /* Mask out privileged bits. */
3425 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3426 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3430 /* ??? This is also undefined in system mode. */
3434 tmp
= load_cpu_field(spsr
);
3435 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3436 tcg_gen_andi_i32(t0
, t0
, mask
);
3437 tcg_gen_or_i32(tmp
, tmp
, t0
);
3438 store_cpu_field(tmp
, spsr
);
3440 gen_set_cpsr(t0
, mask
);
3442 tcg_temp_free_i32(t0
);
3447 /* Returns nonzero if access to the PSR is not permitted. */
3448 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3451 tmp
= tcg_temp_new_i32();
3452 tcg_gen_movi_i32(tmp
, val
);
3453 return gen_set_psr(s
, mask
, spsr
, tmp
);
3456 /* Generate an old-style exception return. Marks pc as dead. */
3457 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3460 store_reg(s
, 15, pc
);
3461 tmp
= load_cpu_field(spsr
);
3462 gen_set_cpsr(tmp
, 0xffffffff);
3463 tcg_temp_free_i32(tmp
);
3464 s
->is_jmp
= DISAS_UPDATE
;
3467 /* Generate a v6 exception return. Marks both values as dead. */
3468 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3470 gen_set_cpsr(cpsr
, 0xffffffff);
3471 tcg_temp_free_i32(cpsr
);
3472 store_reg(s
, 15, pc
);
3473 s
->is_jmp
= DISAS_UPDATE
;
3477 gen_set_condexec (DisasContext
*s
)
3479 if (s
->condexec_mask
) {
3480 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3481 TCGv tmp
= tcg_temp_new_i32();
3482 tcg_gen_movi_i32(tmp
, val
);
3483 store_cpu_field(tmp
, condexec_bits
);
3487 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3489 gen_set_condexec(s
);
3490 gen_set_pc_im(s
->pc
- offset
);
3491 gen_exception(excp
);
3492 s
->is_jmp
= DISAS_JUMP
;
3495 static void gen_nop_hint(DisasContext
*s
, int val
)
3499 gen_set_pc_im(s
->pc
);
3500 s
->is_jmp
= DISAS_WFI
;
3504 /* TODO: Implement SEV and WFE. May help SMP performance. */
3510 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3512 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3515 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3516 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3517 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3522 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3525 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3526 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3527 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3532 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3533 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3534 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3535 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3536 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3538 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3539 switch ((size << 1) | u) { \
3541 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3544 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3547 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3550 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3553 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3556 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3558 default: return 1; \
3561 #define GEN_NEON_INTEGER_OP(name) do { \
3562 switch ((size << 1) | u) { \
3564 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3567 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3570 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3573 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3576 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3579 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3581 default: return 1; \
3584 static TCGv
neon_load_scratch(int scratch
)
3586 TCGv tmp
= tcg_temp_new_i32();
3587 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3591 static void neon_store_scratch(int scratch
, TCGv var
)
3593 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3594 tcg_temp_free_i32(var
);
3597 static inline TCGv
neon_get_scalar(int size
, int reg
)
3601 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3603 gen_neon_dup_high16(tmp
);
3605 gen_neon_dup_low16(tmp
);
3608 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3613 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3616 if (!q
&& size
== 2) {
3619 tmp
= tcg_const_i32(rd
);
3620 tmp2
= tcg_const_i32(rm
);
3624 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3627 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3630 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3638 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3641 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3647 tcg_temp_free_i32(tmp
);
3648 tcg_temp_free_i32(tmp2
);
3652 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3655 if (!q
&& size
== 2) {
3658 tmp
= tcg_const_i32(rd
);
3659 tmp2
= tcg_const_i32(rm
);
3663 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3666 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3669 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3677 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3680 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3686 tcg_temp_free_i32(tmp
);
3687 tcg_temp_free_i32(tmp2
);
3691 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3695 rd
= tcg_temp_new_i32();
3696 tmp
= tcg_temp_new_i32();
3698 tcg_gen_shli_i32(rd
, t0
, 8);
3699 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3700 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3701 tcg_gen_or_i32(rd
, rd
, tmp
);
3703 tcg_gen_shri_i32(t1
, t1
, 8);
3704 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3705 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3706 tcg_gen_or_i32(t1
, t1
, tmp
);
3707 tcg_gen_mov_i32(t0
, rd
);
3709 tcg_temp_free_i32(tmp
);
3710 tcg_temp_free_i32(rd
);
3713 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3717 rd
= tcg_temp_new_i32();
3718 tmp
= tcg_temp_new_i32();
3720 tcg_gen_shli_i32(rd
, t0
, 16);
3721 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3722 tcg_gen_or_i32(rd
, rd
, tmp
);
3723 tcg_gen_shri_i32(t1
, t1
, 16);
3724 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3725 tcg_gen_or_i32(t1
, t1
, tmp
);
3726 tcg_gen_mov_i32(t0
, rd
);
3728 tcg_temp_free_i32(tmp
);
3729 tcg_temp_free_i32(rd
);
3737 } neon_ls_element_type
[11] = {
3751 /* Translate a NEON load/store element instruction. Return nonzero if the
3752 instruction is invalid. */
3753 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3772 if (!s
->vfp_enabled
)
3774 VFP_DREG_D(rd
, insn
);
3775 rn
= (insn
>> 16) & 0xf;
3777 load
= (insn
& (1 << 21)) != 0;
3778 if ((insn
& (1 << 23)) == 0) {
3779 /* Load store all elements. */
3780 op
= (insn
>> 8) & 0xf;
3781 size
= (insn
>> 6) & 3;
3784 /* Catch UNDEF cases for bad values of align field */
3787 if (((insn
>> 5) & 1) == 1) {
3792 if (((insn
>> 4) & 3) == 3) {
3799 nregs
= neon_ls_element_type
[op
].nregs
;
3800 interleave
= neon_ls_element_type
[op
].interleave
;
3801 spacing
= neon_ls_element_type
[op
].spacing
;
3802 if (size
== 3 && (interleave
| spacing
) != 1)
3804 addr
= tcg_temp_new_i32();
3805 load_reg_var(s
, addr
, rn
);
3806 stride
= (1 << size
) * interleave
;
3807 for (reg
= 0; reg
< nregs
; reg
++) {
3808 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3809 load_reg_var(s
, addr
, rn
);
3810 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
3811 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3812 load_reg_var(s
, addr
, rn
);
3813 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3817 tmp64
= gen_ld64(addr
, IS_USER(s
));
3818 neon_store_reg64(tmp64
, rd
);
3819 tcg_temp_free_i64(tmp64
);
3821 tmp64
= tcg_temp_new_i64();
3822 neon_load_reg64(tmp64
, rd
);
3823 gen_st64(tmp64
, addr
, IS_USER(s
));
3825 tcg_gen_addi_i32(addr
, addr
, stride
);
3827 for (pass
= 0; pass
< 2; pass
++) {
3830 tmp
= gen_ld32(addr
, IS_USER(s
));
3831 neon_store_reg(rd
, pass
, tmp
);
3833 tmp
= neon_load_reg(rd
, pass
);
3834 gen_st32(tmp
, addr
, IS_USER(s
));
3836 tcg_gen_addi_i32(addr
, addr
, stride
);
3837 } else if (size
== 1) {
3839 tmp
= gen_ld16u(addr
, IS_USER(s
));
3840 tcg_gen_addi_i32(addr
, addr
, stride
);
3841 tmp2
= gen_ld16u(addr
, IS_USER(s
));
3842 tcg_gen_addi_i32(addr
, addr
, stride
);
3843 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
3844 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3845 tcg_temp_free_i32(tmp2
);
3846 neon_store_reg(rd
, pass
, tmp
);
3848 tmp
= neon_load_reg(rd
, pass
);
3849 tmp2
= tcg_temp_new_i32();
3850 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3851 gen_st16(tmp
, addr
, IS_USER(s
));
3852 tcg_gen_addi_i32(addr
, addr
, stride
);
3853 gen_st16(tmp2
, addr
, IS_USER(s
));
3854 tcg_gen_addi_i32(addr
, addr
, stride
);
3856 } else /* size == 0 */ {
3859 for (n
= 0; n
< 4; n
++) {
3860 tmp
= gen_ld8u(addr
, IS_USER(s
));
3861 tcg_gen_addi_i32(addr
, addr
, stride
);
3865 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
3866 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
3867 tcg_temp_free_i32(tmp
);
3870 neon_store_reg(rd
, pass
, tmp2
);
3872 tmp2
= neon_load_reg(rd
, pass
);
3873 for (n
= 0; n
< 4; n
++) {
3874 tmp
= tcg_temp_new_i32();
3876 tcg_gen_mov_i32(tmp
, tmp2
);
3878 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3880 gen_st8(tmp
, addr
, IS_USER(s
));
3881 tcg_gen_addi_i32(addr
, addr
, stride
);
3883 tcg_temp_free_i32(tmp2
);
3890 tcg_temp_free_i32(addr
);
3893 size
= (insn
>> 10) & 3;
3895 /* Load single element to all lanes. */
3896 int a
= (insn
>> 4) & 1;
3900 size
= (insn
>> 6) & 3;
3901 nregs
= ((insn
>> 8) & 3) + 1;
3904 if (nregs
!= 4 || a
== 0) {
3907 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3910 if (nregs
== 1 && a
== 1 && size
== 0) {
3913 if (nregs
== 3 && a
== 1) {
3916 addr
= tcg_temp_new_i32();
3917 load_reg_var(s
, addr
, rn
);
3919 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3920 tmp
= gen_load_and_replicate(s
, addr
, size
);
3921 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3922 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3923 if (insn
& (1 << 5)) {
3924 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
3925 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
3927 tcg_temp_free_i32(tmp
);
3929 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3930 stride
= (insn
& (1 << 5)) ? 2 : 1;
3931 for (reg
= 0; reg
< nregs
; reg
++) {
3932 tmp
= gen_load_and_replicate(s
, addr
, size
);
3933 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
3934 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
3935 tcg_temp_free_i32(tmp
);
3936 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3940 tcg_temp_free_i32(addr
);
3941 stride
= (1 << size
) * nregs
;
3943 /* Single element. */
3944 int idx
= (insn
>> 4) & 0xf;
3945 pass
= (insn
>> 7) & 1;
3948 shift
= ((insn
>> 5) & 3) * 8;
3952 shift
= ((insn
>> 6) & 1) * 16;
3953 stride
= (insn
& (1 << 5)) ? 2 : 1;
3957 stride
= (insn
& (1 << 6)) ? 2 : 1;
3962 nregs
= ((insn
>> 8) & 3) + 1;
3963 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3966 if (((idx
& (1 << size
)) != 0) ||
3967 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
3972 if ((idx
& 1) != 0) {
3977 if (size
== 2 && (idx
& 2) != 0) {
3982 if ((size
== 2) && ((idx
& 3) == 3)) {
3989 if ((rd
+ stride
* (nregs
- 1)) > 31) {
3990 /* Attempts to write off the end of the register file
3991 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3992 * the neon_load_reg() would write off the end of the array.
3996 addr
= tcg_temp_new_i32();
3997 load_reg_var(s
, addr
, rn
);
3998 for (reg
= 0; reg
< nregs
; reg
++) {
4002 tmp
= gen_ld8u(addr
, IS_USER(s
));
4005 tmp
= gen_ld16u(addr
, IS_USER(s
));
4008 tmp
= gen_ld32(addr
, IS_USER(s
));
4010 default: /* Avoid compiler warnings. */
4014 tmp2
= neon_load_reg(rd
, pass
);
4015 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4016 shift
, size
? 16 : 8);
4017 tcg_temp_free_i32(tmp2
);
4019 neon_store_reg(rd
, pass
, tmp
);
4020 } else { /* Store */
4021 tmp
= neon_load_reg(rd
, pass
);
4023 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4026 gen_st8(tmp
, addr
, IS_USER(s
));
4029 gen_st16(tmp
, addr
, IS_USER(s
));
4032 gen_st32(tmp
, addr
, IS_USER(s
));
4037 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4039 tcg_temp_free_i32(addr
);
4040 stride
= nregs
* (1 << size
);
4046 base
= load_reg(s
, rn
);
4048 tcg_gen_addi_i32(base
, base
, stride
);
4051 index
= load_reg(s
, rm
);
4052 tcg_gen_add_i32(base
, base
, index
);
4053 tcg_temp_free_i32(index
);
4055 store_reg(s
, rn
, base
);
4060 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4061 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4063 tcg_gen_and_i32(t
, t
, c
);
4064 tcg_gen_andc_i32(f
, f
, c
);
4065 tcg_gen_or_i32(dest
, t
, f
);
4068 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4071 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4072 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4073 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4078 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4081 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4082 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4083 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4088 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4091 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4092 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4093 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4098 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4101 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4102 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4103 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4108 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4114 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4115 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4120 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4121 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4128 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4129 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4134 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4135 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4142 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4146 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4147 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4148 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4153 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4154 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4155 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4159 tcg_temp_free_i32(src
);
4162 static inline void gen_neon_addl(int size
)
4165 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4166 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4167 case 2: tcg_gen_add_i64(CPU_V001
); break;
4172 static inline void gen_neon_subl(int size
)
4175 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4176 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4177 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4182 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4185 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4186 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4187 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4192 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4195 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4196 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4201 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4205 switch ((size
<< 1) | u
) {
4206 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4207 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4208 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4209 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4211 tmp
= gen_muls_i64_i32(a
, b
);
4212 tcg_gen_mov_i64(dest
, tmp
);
4213 tcg_temp_free_i64(tmp
);
4216 tmp
= gen_mulu_i64_i32(a
, b
);
4217 tcg_gen_mov_i64(dest
, tmp
);
4218 tcg_temp_free_i64(tmp
);
4223 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4224 Don't forget to clean them now. */
4226 tcg_temp_free_i32(a
);
4227 tcg_temp_free_i32(b
);
4231 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4235 gen_neon_unarrow_sats(size
, dest
, src
);
4237 gen_neon_narrow(size
, dest
, src
);
4241 gen_neon_narrow_satu(size
, dest
, src
);
4243 gen_neon_narrow_sats(size
, dest
, src
);
4248 /* Symbolic constants for op fields for Neon 3-register same-length.
4249 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4252 #define NEON_3R_VHADD 0
4253 #define NEON_3R_VQADD 1
4254 #define NEON_3R_VRHADD 2
4255 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4256 #define NEON_3R_VHSUB 4
4257 #define NEON_3R_VQSUB 5
4258 #define NEON_3R_VCGT 6
4259 #define NEON_3R_VCGE 7
4260 #define NEON_3R_VSHL 8
4261 #define NEON_3R_VQSHL 9
4262 #define NEON_3R_VRSHL 10
4263 #define NEON_3R_VQRSHL 11
4264 #define NEON_3R_VMAX 12
4265 #define NEON_3R_VMIN 13
4266 #define NEON_3R_VABD 14
4267 #define NEON_3R_VABA 15
4268 #define NEON_3R_VADD_VSUB 16
4269 #define NEON_3R_VTST_VCEQ 17
4270 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4271 #define NEON_3R_VMUL 19
4272 #define NEON_3R_VPMAX 20
4273 #define NEON_3R_VPMIN 21
4274 #define NEON_3R_VQDMULH_VQRDMULH 22
4275 #define NEON_3R_VPADD 23
4276 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4277 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4278 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4279 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4280 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4281 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4282 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4284 static const uint8_t neon_3r_sizes
[] = {
4285 [NEON_3R_VHADD
] = 0x7,
4286 [NEON_3R_VQADD
] = 0xf,
4287 [NEON_3R_VRHADD
] = 0x7,
4288 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4289 [NEON_3R_VHSUB
] = 0x7,
4290 [NEON_3R_VQSUB
] = 0xf,
4291 [NEON_3R_VCGT
] = 0x7,
4292 [NEON_3R_VCGE
] = 0x7,
4293 [NEON_3R_VSHL
] = 0xf,
4294 [NEON_3R_VQSHL
] = 0xf,
4295 [NEON_3R_VRSHL
] = 0xf,
4296 [NEON_3R_VQRSHL
] = 0xf,
4297 [NEON_3R_VMAX
] = 0x7,
4298 [NEON_3R_VMIN
] = 0x7,
4299 [NEON_3R_VABD
] = 0x7,
4300 [NEON_3R_VABA
] = 0x7,
4301 [NEON_3R_VADD_VSUB
] = 0xf,
4302 [NEON_3R_VTST_VCEQ
] = 0x7,
4303 [NEON_3R_VML
] = 0x7,
4304 [NEON_3R_VMUL
] = 0x7,
4305 [NEON_3R_VPMAX
] = 0x7,
4306 [NEON_3R_VPMIN
] = 0x7,
4307 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4308 [NEON_3R_VPADD
] = 0x7,
4309 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4313 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4314 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4315 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4318 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4319 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4322 #define NEON_2RM_VREV64 0
4323 #define NEON_2RM_VREV32 1
4324 #define NEON_2RM_VREV16 2
4325 #define NEON_2RM_VPADDL 4
4326 #define NEON_2RM_VPADDL_U 5
4327 #define NEON_2RM_VCLS 8
4328 #define NEON_2RM_VCLZ 9
4329 #define NEON_2RM_VCNT 10
4330 #define NEON_2RM_VMVN 11
4331 #define NEON_2RM_VPADAL 12
4332 #define NEON_2RM_VPADAL_U 13
4333 #define NEON_2RM_VQABS 14
4334 #define NEON_2RM_VQNEG 15
4335 #define NEON_2RM_VCGT0 16
4336 #define NEON_2RM_VCGE0 17
4337 #define NEON_2RM_VCEQ0 18
4338 #define NEON_2RM_VCLE0 19
4339 #define NEON_2RM_VCLT0 20
4340 #define NEON_2RM_VABS 22
4341 #define NEON_2RM_VNEG 23
4342 #define NEON_2RM_VCGT0_F 24
4343 #define NEON_2RM_VCGE0_F 25
4344 #define NEON_2RM_VCEQ0_F 26
4345 #define NEON_2RM_VCLE0_F 27
4346 #define NEON_2RM_VCLT0_F 28
4347 #define NEON_2RM_VABS_F 30
4348 #define NEON_2RM_VNEG_F 31
4349 #define NEON_2RM_VSWP 32
4350 #define NEON_2RM_VTRN 33
4351 #define NEON_2RM_VUZP 34
4352 #define NEON_2RM_VZIP 35
4353 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4354 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4355 #define NEON_2RM_VSHLL 38
4356 #define NEON_2RM_VCVT_F16_F32 44
4357 #define NEON_2RM_VCVT_F32_F16 46
4358 #define NEON_2RM_VRECPE 56
4359 #define NEON_2RM_VRSQRTE 57
4360 #define NEON_2RM_VRECPE_F 58
4361 #define NEON_2RM_VRSQRTE_F 59
4362 #define NEON_2RM_VCVT_FS 60
4363 #define NEON_2RM_VCVT_FU 61
4364 #define NEON_2RM_VCVT_SF 62
4365 #define NEON_2RM_VCVT_UF 63
4367 static int neon_2rm_is_float_op(int op
)
4369 /* Return true if this neon 2reg-misc op is float-to-float */
4370 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4371 op
>= NEON_2RM_VRECPE_F
);
4374 /* Each entry in this array has bit n set if the insn allows
4375 * size value n (otherwise it will UNDEF). Since unallocated
4376 * op values will have no bits set they always UNDEF.
4378 static const uint8_t neon_2rm_sizes
[] = {
4379 [NEON_2RM_VREV64
] = 0x7,
4380 [NEON_2RM_VREV32
] = 0x3,
4381 [NEON_2RM_VREV16
] = 0x1,
4382 [NEON_2RM_VPADDL
] = 0x7,
4383 [NEON_2RM_VPADDL_U
] = 0x7,
4384 [NEON_2RM_VCLS
] = 0x7,
4385 [NEON_2RM_VCLZ
] = 0x7,
4386 [NEON_2RM_VCNT
] = 0x1,
4387 [NEON_2RM_VMVN
] = 0x1,
4388 [NEON_2RM_VPADAL
] = 0x7,
4389 [NEON_2RM_VPADAL_U
] = 0x7,
4390 [NEON_2RM_VQABS
] = 0x7,
4391 [NEON_2RM_VQNEG
] = 0x7,
4392 [NEON_2RM_VCGT0
] = 0x7,
4393 [NEON_2RM_VCGE0
] = 0x7,
4394 [NEON_2RM_VCEQ0
] = 0x7,
4395 [NEON_2RM_VCLE0
] = 0x7,
4396 [NEON_2RM_VCLT0
] = 0x7,
4397 [NEON_2RM_VABS
] = 0x7,
4398 [NEON_2RM_VNEG
] = 0x7,
4399 [NEON_2RM_VCGT0_F
] = 0x4,
4400 [NEON_2RM_VCGE0_F
] = 0x4,
4401 [NEON_2RM_VCEQ0_F
] = 0x4,
4402 [NEON_2RM_VCLE0_F
] = 0x4,
4403 [NEON_2RM_VCLT0_F
] = 0x4,
4404 [NEON_2RM_VABS_F
] = 0x4,
4405 [NEON_2RM_VNEG_F
] = 0x4,
4406 [NEON_2RM_VSWP
] = 0x1,
4407 [NEON_2RM_VTRN
] = 0x7,
4408 [NEON_2RM_VUZP
] = 0x7,
4409 [NEON_2RM_VZIP
] = 0x7,
4410 [NEON_2RM_VMOVN
] = 0x7,
4411 [NEON_2RM_VQMOVN
] = 0x7,
4412 [NEON_2RM_VSHLL
] = 0x7,
4413 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4414 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4415 [NEON_2RM_VRECPE
] = 0x4,
4416 [NEON_2RM_VRSQRTE
] = 0x4,
4417 [NEON_2RM_VRECPE_F
] = 0x4,
4418 [NEON_2RM_VRSQRTE_F
] = 0x4,
4419 [NEON_2RM_VCVT_FS
] = 0x4,
4420 [NEON_2RM_VCVT_FU
] = 0x4,
4421 [NEON_2RM_VCVT_SF
] = 0x4,
4422 [NEON_2RM_VCVT_UF
] = 0x4,
4425 /* Translate a NEON data processing instruction. Return nonzero if the
4426 instruction is invalid.
4427 We process data in a mixture of 32-bit and 64-bit chunks.
4428 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4430 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4442 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4445 if (!s
->vfp_enabled
)
4447 q
= (insn
& (1 << 6)) != 0;
4448 u
= (insn
>> 24) & 1;
4449 VFP_DREG_D(rd
, insn
);
4450 VFP_DREG_N(rn
, insn
);
4451 VFP_DREG_M(rm
, insn
);
4452 size
= (insn
>> 20) & 3;
4453 if ((insn
& (1 << 23)) == 0) {
4454 /* Three register same length. */
4455 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4456 /* Catch invalid op and bad size combinations: UNDEF */
4457 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4460 /* All insns of this form UNDEF for either this condition or the
4461 * superset of cases "Q==1"; we catch the latter later.
4463 if (q
&& ((rd
| rn
| rm
) & 1)) {
4466 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4467 /* 64-bit element instructions. */
4468 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4469 neon_load_reg64(cpu_V0
, rn
+ pass
);
4470 neon_load_reg64(cpu_V1
, rm
+ pass
);
4474 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4477 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4483 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4486 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4492 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4494 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4499 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4502 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4508 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4510 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4513 case NEON_3R_VQRSHL
:
4515 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4518 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4522 case NEON_3R_VADD_VSUB
:
4524 tcg_gen_sub_i64(CPU_V001
);
4526 tcg_gen_add_i64(CPU_V001
);
4532 neon_store_reg64(cpu_V0
, rd
+ pass
);
4541 case NEON_3R_VQRSHL
:
4544 /* Shift instruction operands are reversed. */
4559 case NEON_3R_FLOAT_ARITH
:
4560 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4562 case NEON_3R_FLOAT_MINMAX
:
4563 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4565 case NEON_3R_FLOAT_CMP
:
4567 /* no encoding for U=0 C=1x */
4571 case NEON_3R_FLOAT_ACMP
:
4576 case NEON_3R_VRECPS_VRSQRTS
:
4582 if (u
&& (size
!= 0)) {
4583 /* UNDEF on invalid size for polynomial subcase */
4588 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
4596 if (pairwise
&& q
) {
4597 /* All the pairwise insns UNDEF if Q is set */
4601 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4606 tmp
= neon_load_reg(rn
, 0);
4607 tmp2
= neon_load_reg(rn
, 1);
4609 tmp
= neon_load_reg(rm
, 0);
4610 tmp2
= neon_load_reg(rm
, 1);
4614 tmp
= neon_load_reg(rn
, pass
);
4615 tmp2
= neon_load_reg(rm
, pass
);
4619 GEN_NEON_INTEGER_OP(hadd
);
4622 GEN_NEON_INTEGER_OP_ENV(qadd
);
4624 case NEON_3R_VRHADD
:
4625 GEN_NEON_INTEGER_OP(rhadd
);
4627 case NEON_3R_LOGIC
: /* Logic ops. */
4628 switch ((u
<< 2) | size
) {
4630 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4633 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4636 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4639 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4642 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4645 tmp3
= neon_load_reg(rd
, pass
);
4646 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4647 tcg_temp_free_i32(tmp3
);
4650 tmp3
= neon_load_reg(rd
, pass
);
4651 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4652 tcg_temp_free_i32(tmp3
);
4655 tmp3
= neon_load_reg(rd
, pass
);
4656 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4657 tcg_temp_free_i32(tmp3
);
4662 GEN_NEON_INTEGER_OP(hsub
);
4665 GEN_NEON_INTEGER_OP_ENV(qsub
);
4668 GEN_NEON_INTEGER_OP(cgt
);
4671 GEN_NEON_INTEGER_OP(cge
);
4674 GEN_NEON_INTEGER_OP(shl
);
4677 GEN_NEON_INTEGER_OP_ENV(qshl
);
4680 GEN_NEON_INTEGER_OP(rshl
);
4682 case NEON_3R_VQRSHL
:
4683 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4686 GEN_NEON_INTEGER_OP(max
);
4689 GEN_NEON_INTEGER_OP(min
);
4692 GEN_NEON_INTEGER_OP(abd
);
4695 GEN_NEON_INTEGER_OP(abd
);
4696 tcg_temp_free_i32(tmp2
);
4697 tmp2
= neon_load_reg(rd
, pass
);
4698 gen_neon_add(size
, tmp
, tmp2
);
4700 case NEON_3R_VADD_VSUB
:
4701 if (!u
) { /* VADD */
4702 gen_neon_add(size
, tmp
, tmp2
);
4705 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4706 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4707 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4712 case NEON_3R_VTST_VCEQ
:
4713 if (!u
) { /* VTST */
4715 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4716 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4717 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4722 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4723 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4724 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4729 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4731 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4732 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4733 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4736 tcg_temp_free_i32(tmp2
);
4737 tmp2
= neon_load_reg(rd
, pass
);
4739 gen_neon_rsb(size
, tmp
, tmp2
);
4741 gen_neon_add(size
, tmp
, tmp2
);
4745 if (u
) { /* polynomial */
4746 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4747 } else { /* Integer */
4749 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4750 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4751 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4757 GEN_NEON_INTEGER_OP(pmax
);
4760 GEN_NEON_INTEGER_OP(pmin
);
4762 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4763 if (!u
) { /* VQDMULH */
4766 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4769 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4773 } else { /* VQRDMULH */
4776 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4779 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4787 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4788 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4789 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4793 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4795 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4796 switch ((u
<< 2) | size
) {
4799 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4802 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
4805 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
4810 tcg_temp_free_ptr(fpstatus
);
4813 case NEON_3R_FLOAT_MULTIPLY
:
4815 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4816 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
4818 tcg_temp_free_i32(tmp2
);
4819 tmp2
= neon_load_reg(rd
, pass
);
4821 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4823 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
4826 tcg_temp_free_ptr(fpstatus
);
4829 case NEON_3R_FLOAT_CMP
:
4831 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4833 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
4836 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4838 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4841 tcg_temp_free_ptr(fpstatus
);
4844 case NEON_3R_FLOAT_ACMP
:
4846 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4848 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
4850 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
4852 tcg_temp_free_ptr(fpstatus
);
4855 case NEON_3R_FLOAT_MINMAX
:
4857 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4859 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
4861 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
4863 tcg_temp_free_ptr(fpstatus
);
4866 case NEON_3R_VRECPS_VRSQRTS
:
4868 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
4870 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
4874 /* VFMA, VFMS: fused multiply-add */
4875 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4876 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
4879 gen_helper_vfp_negs(tmp
, tmp
);
4881 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
4882 tcg_temp_free_i32(tmp3
);
4883 tcg_temp_free_ptr(fpstatus
);
4889 tcg_temp_free_i32(tmp2
);
4891 /* Save the result. For elementwise operations we can put it
4892 straight into the destination register. For pairwise operations
4893 we have to be careful to avoid clobbering the source operands. */
4894 if (pairwise
&& rd
== rm
) {
4895 neon_store_scratch(pass
, tmp
);
4897 neon_store_reg(rd
, pass
, tmp
);
4901 if (pairwise
&& rd
== rm
) {
4902 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4903 tmp
= neon_load_scratch(pass
);
4904 neon_store_reg(rd
, pass
, tmp
);
4907 /* End of 3 register same size operations. */
4908 } else if (insn
& (1 << 4)) {
4909 if ((insn
& 0x00380080) != 0) {
4910 /* Two registers and shift. */
4911 op
= (insn
>> 8) & 0xf;
4912 if (insn
& (1 << 7)) {
4920 while ((insn
& (1 << (size
+ 19))) == 0)
4923 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4924 /* To avoid excessive duplication of ops we implement shift
4925 by immediate using the variable shift operations. */
4927 /* Shift by immediate:
4928 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4929 if (q
&& ((rd
| rm
) & 1)) {
4932 if (!u
&& (op
== 4 || op
== 6)) {
4935 /* Right shifts are encoded as N - shift, where N is the
4936 element size in bits. */
4938 shift
= shift
- (1 << (size
+ 3));
4946 imm
= (uint8_t) shift
;
4951 imm
= (uint16_t) shift
;
4962 for (pass
= 0; pass
< count
; pass
++) {
4964 neon_load_reg64(cpu_V0
, rm
+ pass
);
4965 tcg_gen_movi_i64(cpu_V1
, imm
);
4970 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4972 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4977 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4979 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4982 case 5: /* VSHL, VSLI */
4983 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4985 case 6: /* VQSHLU */
4986 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
4991 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4994 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4999 if (op
== 1 || op
== 3) {
5001 neon_load_reg64(cpu_V1
, rd
+ pass
);
5002 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5003 } else if (op
== 4 || (op
== 5 && u
)) {
5005 neon_load_reg64(cpu_V1
, rd
+ pass
);
5007 if (shift
< -63 || shift
> 63) {
5011 mask
= 0xffffffffffffffffull
>> -shift
;
5013 mask
= 0xffffffffffffffffull
<< shift
;
5016 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5017 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5019 neon_store_reg64(cpu_V0
, rd
+ pass
);
5020 } else { /* size < 3 */
5021 /* Operands in T0 and T1. */
5022 tmp
= neon_load_reg(rm
, pass
);
5023 tmp2
= tcg_temp_new_i32();
5024 tcg_gen_movi_i32(tmp2
, imm
);
5028 GEN_NEON_INTEGER_OP(shl
);
5032 GEN_NEON_INTEGER_OP(rshl
);
5035 case 5: /* VSHL, VSLI */
5037 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5038 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5039 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5043 case 6: /* VQSHLU */
5046 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5050 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5054 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5062 GEN_NEON_INTEGER_OP_ENV(qshl
);
5065 tcg_temp_free_i32(tmp2
);
5067 if (op
== 1 || op
== 3) {
5069 tmp2
= neon_load_reg(rd
, pass
);
5070 gen_neon_add(size
, tmp
, tmp2
);
5071 tcg_temp_free_i32(tmp2
);
5072 } else if (op
== 4 || (op
== 5 && u
)) {
5077 mask
= 0xff >> -shift
;
5079 mask
= (uint8_t)(0xff << shift
);
5085 mask
= 0xffff >> -shift
;
5087 mask
= (uint16_t)(0xffff << shift
);
5091 if (shift
< -31 || shift
> 31) {
5095 mask
= 0xffffffffu
>> -shift
;
5097 mask
= 0xffffffffu
<< shift
;
5103 tmp2
= neon_load_reg(rd
, pass
);
5104 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5105 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5106 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5107 tcg_temp_free_i32(tmp2
);
5109 neon_store_reg(rd
, pass
, tmp
);
5112 } else if (op
< 10) {
5113 /* Shift by immediate and narrow:
5114 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5115 int input_unsigned
= (op
== 8) ? !u
: u
;
5119 shift
= shift
- (1 << (size
+ 3));
5122 tmp64
= tcg_const_i64(shift
);
5123 neon_load_reg64(cpu_V0
, rm
);
5124 neon_load_reg64(cpu_V1
, rm
+ 1);
5125 for (pass
= 0; pass
< 2; pass
++) {
5133 if (input_unsigned
) {
5134 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5136 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5139 if (input_unsigned
) {
5140 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5142 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5145 tmp
= tcg_temp_new_i32();
5146 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5147 neon_store_reg(rd
, pass
, tmp
);
5149 tcg_temp_free_i64(tmp64
);
5152 imm
= (uint16_t)shift
;
5156 imm
= (uint32_t)shift
;
5158 tmp2
= tcg_const_i32(imm
);
5159 tmp4
= neon_load_reg(rm
+ 1, 0);
5160 tmp5
= neon_load_reg(rm
+ 1, 1);
5161 for (pass
= 0; pass
< 2; pass
++) {
5163 tmp
= neon_load_reg(rm
, 0);
5167 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5170 tmp3
= neon_load_reg(rm
, 1);
5174 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5176 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5177 tcg_temp_free_i32(tmp
);
5178 tcg_temp_free_i32(tmp3
);
5179 tmp
= tcg_temp_new_i32();
5180 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5181 neon_store_reg(rd
, pass
, tmp
);
5183 tcg_temp_free_i32(tmp2
);
5185 } else if (op
== 10) {
5187 if (q
|| (rd
& 1)) {
5190 tmp
= neon_load_reg(rm
, 0);
5191 tmp2
= neon_load_reg(rm
, 1);
5192 for (pass
= 0; pass
< 2; pass
++) {
5196 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5199 /* The shift is less than the width of the source
5200 type, so we can just shift the whole register. */
5201 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5202 /* Widen the result of shift: we need to clear
5203 * the potential overflow bits resulting from
5204 * left bits of the narrow input appearing as
5205 * right bits of left the neighbour narrow
5207 if (size
< 2 || !u
) {
5210 imm
= (0xffu
>> (8 - shift
));
5212 } else if (size
== 1) {
5213 imm
= 0xffff >> (16 - shift
);
5216 imm
= 0xffffffff >> (32 - shift
);
5219 imm64
= imm
| (((uint64_t)imm
) << 32);
5223 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5226 neon_store_reg64(cpu_V0
, rd
+ pass
);
5228 } else if (op
>= 14) {
5229 /* VCVT fixed-point. */
5230 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5233 /* We have already masked out the must-be-1 top bit of imm6,
5234 * hence this 32-shift where the ARM ARM has 64-imm6.
5237 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5238 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5241 gen_vfp_ulto(0, shift
, 1);
5243 gen_vfp_slto(0, shift
, 1);
5246 gen_vfp_toul(0, shift
, 1);
5248 gen_vfp_tosl(0, shift
, 1);
5250 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5255 } else { /* (insn & 0x00380080) == 0 */
5257 if (q
&& (rd
& 1)) {
5261 op
= (insn
>> 8) & 0xf;
5262 /* One register and immediate. */
5263 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5264 invert
= (insn
& (1 << 5)) != 0;
5265 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5266 * We choose to not special-case this and will behave as if a
5267 * valid constant encoding of 0 had been given.
5286 imm
= (imm
<< 8) | (imm
<< 24);
5289 imm
= (imm
<< 8) | 0xff;
5292 imm
= (imm
<< 16) | 0xffff;
5295 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5303 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5304 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5310 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5311 if (op
& 1 && op
< 12) {
5312 tmp
= neon_load_reg(rd
, pass
);
5314 /* The immediate value has already been inverted, so
5316 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5318 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5322 tmp
= tcg_temp_new_i32();
5323 if (op
== 14 && invert
) {
5327 for (n
= 0; n
< 4; n
++) {
5328 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5329 val
|= 0xff << (n
* 8);
5331 tcg_gen_movi_i32(tmp
, val
);
5333 tcg_gen_movi_i32(tmp
, imm
);
5336 neon_store_reg(rd
, pass
, tmp
);
5339 } else { /* (insn & 0x00800010 == 0x00800000) */
5341 op
= (insn
>> 8) & 0xf;
5342 if ((insn
& (1 << 6)) == 0) {
5343 /* Three registers of different lengths. */
5347 /* undefreq: bit 0 : UNDEF if size != 0
5348 * bit 1 : UNDEF if size == 0
5349 * bit 2 : UNDEF if U == 1
5350 * Note that [1:0] set implies 'always UNDEF'
5353 /* prewiden, src1_wide, src2_wide, undefreq */
5354 static const int neon_3reg_wide
[16][4] = {
5355 {1, 0, 0, 0}, /* VADDL */
5356 {1, 1, 0, 0}, /* VADDW */
5357 {1, 0, 0, 0}, /* VSUBL */
5358 {1, 1, 0, 0}, /* VSUBW */
5359 {0, 1, 1, 0}, /* VADDHN */
5360 {0, 0, 0, 0}, /* VABAL */
5361 {0, 1, 1, 0}, /* VSUBHN */
5362 {0, 0, 0, 0}, /* VABDL */
5363 {0, 0, 0, 0}, /* VMLAL */
5364 {0, 0, 0, 6}, /* VQDMLAL */
5365 {0, 0, 0, 0}, /* VMLSL */
5366 {0, 0, 0, 6}, /* VQDMLSL */
5367 {0, 0, 0, 0}, /* Integer VMULL */
5368 {0, 0, 0, 2}, /* VQDMULL */
5369 {0, 0, 0, 5}, /* Polynomial VMULL */
5370 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5373 prewiden
= neon_3reg_wide
[op
][0];
5374 src1_wide
= neon_3reg_wide
[op
][1];
5375 src2_wide
= neon_3reg_wide
[op
][2];
5376 undefreq
= neon_3reg_wide
[op
][3];
5378 if (((undefreq
& 1) && (size
!= 0)) ||
5379 ((undefreq
& 2) && (size
== 0)) ||
5380 ((undefreq
& 4) && u
)) {
5383 if ((src1_wide
&& (rn
& 1)) ||
5384 (src2_wide
&& (rm
& 1)) ||
5385 (!src2_wide
&& (rd
& 1))) {
5389 /* Avoid overlapping operands. Wide source operands are
5390 always aligned so will never overlap with wide
5391 destinations in problematic ways. */
5392 if (rd
== rm
&& !src2_wide
) {
5393 tmp
= neon_load_reg(rm
, 1);
5394 neon_store_scratch(2, tmp
);
5395 } else if (rd
== rn
&& !src1_wide
) {
5396 tmp
= neon_load_reg(rn
, 1);
5397 neon_store_scratch(2, tmp
);
5400 for (pass
= 0; pass
< 2; pass
++) {
5402 neon_load_reg64(cpu_V0
, rn
+ pass
);
5405 if (pass
== 1 && rd
== rn
) {
5406 tmp
= neon_load_scratch(2);
5408 tmp
= neon_load_reg(rn
, pass
);
5411 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5415 neon_load_reg64(cpu_V1
, rm
+ pass
);
5418 if (pass
== 1 && rd
== rm
) {
5419 tmp2
= neon_load_scratch(2);
5421 tmp2
= neon_load_reg(rm
, pass
);
5424 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5428 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5429 gen_neon_addl(size
);
5431 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5432 gen_neon_subl(size
);
5434 case 5: case 7: /* VABAL, VABDL */
5435 switch ((size
<< 1) | u
) {
5437 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5440 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5443 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5446 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5449 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5452 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5456 tcg_temp_free_i32(tmp2
);
5457 tcg_temp_free_i32(tmp
);
5459 case 8: case 9: case 10: case 11: case 12: case 13:
5460 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5461 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5463 case 14: /* Polynomial VMULL */
5464 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5465 tcg_temp_free_i32(tmp2
);
5466 tcg_temp_free_i32(tmp
);
5468 default: /* 15 is RESERVED: caught earlier */
5473 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5474 neon_store_reg64(cpu_V0
, rd
+ pass
);
5475 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5477 neon_load_reg64(cpu_V1
, rd
+ pass
);
5479 case 10: /* VMLSL */
5480 gen_neon_negl(cpu_V0
, size
);
5482 case 5: case 8: /* VABAL, VMLAL */
5483 gen_neon_addl(size
);
5485 case 9: case 11: /* VQDMLAL, VQDMLSL */
5486 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5488 gen_neon_negl(cpu_V0
, size
);
5490 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5495 neon_store_reg64(cpu_V0
, rd
+ pass
);
5496 } else if (op
== 4 || op
== 6) {
5497 /* Narrowing operation. */
5498 tmp
= tcg_temp_new_i32();
5502 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5505 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5508 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5509 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5516 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5519 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5522 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5523 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5524 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5532 neon_store_reg(rd
, 0, tmp3
);
5533 neon_store_reg(rd
, 1, tmp
);
5536 /* Write back the result. */
5537 neon_store_reg64(cpu_V0
, rd
+ pass
);
5541 /* Two registers and a scalar. NB that for ops of this form
5542 * the ARM ARM labels bit 24 as Q, but it is in our variable
5549 case 1: /* Float VMLA scalar */
5550 case 5: /* Floating point VMLS scalar */
5551 case 9: /* Floating point VMUL scalar */
5556 case 0: /* Integer VMLA scalar */
5557 case 4: /* Integer VMLS scalar */
5558 case 8: /* Integer VMUL scalar */
5559 case 12: /* VQDMULH scalar */
5560 case 13: /* VQRDMULH scalar */
5561 if (u
&& ((rd
| rn
) & 1)) {
5564 tmp
= neon_get_scalar(size
, rm
);
5565 neon_store_scratch(0, tmp
);
5566 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5567 tmp
= neon_load_scratch(0);
5568 tmp2
= neon_load_reg(rn
, pass
);
5571 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5573 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5575 } else if (op
== 13) {
5577 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5579 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5581 } else if (op
& 1) {
5582 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5583 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5584 tcg_temp_free_ptr(fpstatus
);
5587 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5588 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5589 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5593 tcg_temp_free_i32(tmp2
);
5596 tmp2
= neon_load_reg(rd
, pass
);
5599 gen_neon_add(size
, tmp
, tmp2
);
5603 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5604 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5605 tcg_temp_free_ptr(fpstatus
);
5609 gen_neon_rsb(size
, tmp
, tmp2
);
5613 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5614 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5615 tcg_temp_free_ptr(fpstatus
);
5621 tcg_temp_free_i32(tmp2
);
5623 neon_store_reg(rd
, pass
, tmp
);
5626 case 3: /* VQDMLAL scalar */
5627 case 7: /* VQDMLSL scalar */
5628 case 11: /* VQDMULL scalar */
5633 case 2: /* VMLAL sclar */
5634 case 6: /* VMLSL scalar */
5635 case 10: /* VMULL scalar */
5639 tmp2
= neon_get_scalar(size
, rm
);
5640 /* We need a copy of tmp2 because gen_neon_mull
5641 * deletes it during pass 0. */
5642 tmp4
= tcg_temp_new_i32();
5643 tcg_gen_mov_i32(tmp4
, tmp2
);
5644 tmp3
= neon_load_reg(rn
, 1);
5646 for (pass
= 0; pass
< 2; pass
++) {
5648 tmp
= neon_load_reg(rn
, 0);
5653 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5655 neon_load_reg64(cpu_V1
, rd
+ pass
);
5659 gen_neon_negl(cpu_V0
, size
);
5662 gen_neon_addl(size
);
5665 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5667 gen_neon_negl(cpu_V0
, size
);
5669 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5675 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5680 neon_store_reg64(cpu_V0
, rd
+ pass
);
5685 default: /* 14 and 15 are RESERVED */
5689 } else { /* size == 3 */
5692 imm
= (insn
>> 8) & 0xf;
5697 if (q
&& ((rd
| rn
| rm
) & 1)) {
5702 neon_load_reg64(cpu_V0
, rn
);
5704 neon_load_reg64(cpu_V1
, rn
+ 1);
5706 } else if (imm
== 8) {
5707 neon_load_reg64(cpu_V0
, rn
+ 1);
5709 neon_load_reg64(cpu_V1
, rm
);
5712 tmp64
= tcg_temp_new_i64();
5714 neon_load_reg64(cpu_V0
, rn
);
5715 neon_load_reg64(tmp64
, rn
+ 1);
5717 neon_load_reg64(cpu_V0
, rn
+ 1);
5718 neon_load_reg64(tmp64
, rm
);
5720 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5721 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5722 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5724 neon_load_reg64(cpu_V1
, rm
);
5726 neon_load_reg64(cpu_V1
, rm
+ 1);
5729 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5730 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5731 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5732 tcg_temp_free_i64(tmp64
);
5735 neon_load_reg64(cpu_V0
, rn
);
5736 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5737 neon_load_reg64(cpu_V1
, rm
);
5738 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5739 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5741 neon_store_reg64(cpu_V0
, rd
);
5743 neon_store_reg64(cpu_V1
, rd
+ 1);
5745 } else if ((insn
& (1 << 11)) == 0) {
5746 /* Two register misc. */
5747 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5748 size
= (insn
>> 18) & 3;
5749 /* UNDEF for unknown op values and bad op-size combinations */
5750 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5753 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5754 q
&& ((rm
| rd
) & 1)) {
5758 case NEON_2RM_VREV64
:
5759 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5760 tmp
= neon_load_reg(rm
, pass
* 2);
5761 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5763 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5764 case 1: gen_swap_half(tmp
); break;
5765 case 2: /* no-op */ break;
5768 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5770 neon_store_reg(rd
, pass
* 2, tmp2
);
5773 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5774 case 1: gen_swap_half(tmp2
); break;
5777 neon_store_reg(rd
, pass
* 2, tmp2
);
5781 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5782 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5783 for (pass
= 0; pass
< q
+ 1; pass
++) {
5784 tmp
= neon_load_reg(rm
, pass
* 2);
5785 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5786 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5787 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5789 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5790 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5791 case 2: tcg_gen_add_i64(CPU_V001
); break;
5794 if (op
>= NEON_2RM_VPADAL
) {
5796 neon_load_reg64(cpu_V1
, rd
+ pass
);
5797 gen_neon_addl(size
);
5799 neon_store_reg64(cpu_V0
, rd
+ pass
);
5805 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5806 tmp
= neon_load_reg(rm
, n
);
5807 tmp2
= neon_load_reg(rd
, n
+ 1);
5808 neon_store_reg(rm
, n
, tmp2
);
5809 neon_store_reg(rd
, n
+ 1, tmp
);
5816 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
5821 if (gen_neon_zip(rd
, rm
, size
, q
)) {
5825 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
5826 /* also VQMOVUN; op field and mnemonics don't line up */
5831 for (pass
= 0; pass
< 2; pass
++) {
5832 neon_load_reg64(cpu_V0
, rm
+ pass
);
5833 tmp
= tcg_temp_new_i32();
5834 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
5839 neon_store_reg(rd
, 0, tmp2
);
5840 neon_store_reg(rd
, 1, tmp
);
5844 case NEON_2RM_VSHLL
:
5845 if (q
|| (rd
& 1)) {
5848 tmp
= neon_load_reg(rm
, 0);
5849 tmp2
= neon_load_reg(rm
, 1);
5850 for (pass
= 0; pass
< 2; pass
++) {
5853 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5854 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
5855 neon_store_reg64(cpu_V0
, rd
+ pass
);
5858 case NEON_2RM_VCVT_F16_F32
:
5859 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5863 tmp
= tcg_temp_new_i32();
5864 tmp2
= tcg_temp_new_i32();
5865 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
5866 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5867 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
5868 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5869 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5870 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5871 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
5872 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
5873 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
5874 neon_store_reg(rd
, 0, tmp2
);
5875 tmp2
= tcg_temp_new_i32();
5876 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
5877 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
5878 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
5879 neon_store_reg(rd
, 1, tmp2
);
5880 tcg_temp_free_i32(tmp
);
5882 case NEON_2RM_VCVT_F32_F16
:
5883 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
5887 tmp3
= tcg_temp_new_i32();
5888 tmp
= neon_load_reg(rm
, 0);
5889 tmp2
= neon_load_reg(rm
, 1);
5890 tcg_gen_ext16u_i32(tmp3
, tmp
);
5891 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5892 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
5893 tcg_gen_shri_i32(tmp3
, tmp
, 16);
5894 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5895 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
5896 tcg_temp_free_i32(tmp
);
5897 tcg_gen_ext16u_i32(tmp3
, tmp2
);
5898 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5899 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
5900 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
5901 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
5902 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
5903 tcg_temp_free_i32(tmp2
);
5904 tcg_temp_free_i32(tmp3
);
5908 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5909 if (neon_2rm_is_float_op(op
)) {
5910 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5911 neon_reg_offset(rm
, pass
));
5914 tmp
= neon_load_reg(rm
, pass
);
5917 case NEON_2RM_VREV32
:
5919 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5920 case 1: gen_swap_half(tmp
); break;
5924 case NEON_2RM_VREV16
:
5929 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
5930 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
5931 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
5937 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
5938 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
5939 case 2: gen_helper_clz(tmp
, tmp
); break;
5944 gen_helper_neon_cnt_u8(tmp
, tmp
);
5947 tcg_gen_not_i32(tmp
, tmp
);
5949 case NEON_2RM_VQABS
:
5952 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
5955 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
5958 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
5963 case NEON_2RM_VQNEG
:
5966 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
5969 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
5972 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
5977 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
5978 tmp2
= tcg_const_i32(0);
5980 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
5981 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
5982 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
5985 tcg_temp_free(tmp2
);
5986 if (op
== NEON_2RM_VCLE0
) {
5987 tcg_gen_not_i32(tmp
, tmp
);
5990 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
5991 tmp2
= tcg_const_i32(0);
5993 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
5994 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
5995 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
5998 tcg_temp_free(tmp2
);
5999 if (op
== NEON_2RM_VCLT0
) {
6000 tcg_gen_not_i32(tmp
, tmp
);
6003 case NEON_2RM_VCEQ0
:
6004 tmp2
= tcg_const_i32(0);
6006 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6007 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6008 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6011 tcg_temp_free(tmp2
);
6015 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6016 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6017 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6022 tmp2
= tcg_const_i32(0);
6023 gen_neon_rsb(size
, tmp
, tmp2
);
6024 tcg_temp_free(tmp2
);
6026 case NEON_2RM_VCGT0_F
:
6028 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6029 tmp2
= tcg_const_i32(0);
6030 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6031 tcg_temp_free(tmp2
);
6032 tcg_temp_free_ptr(fpstatus
);
6035 case NEON_2RM_VCGE0_F
:
6037 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6038 tmp2
= tcg_const_i32(0);
6039 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6040 tcg_temp_free(tmp2
);
6041 tcg_temp_free_ptr(fpstatus
);
6044 case NEON_2RM_VCEQ0_F
:
6046 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6047 tmp2
= tcg_const_i32(0);
6048 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6049 tcg_temp_free(tmp2
);
6050 tcg_temp_free_ptr(fpstatus
);
6053 case NEON_2RM_VCLE0_F
:
6055 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6056 tmp2
= tcg_const_i32(0);
6057 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6058 tcg_temp_free(tmp2
);
6059 tcg_temp_free_ptr(fpstatus
);
6062 case NEON_2RM_VCLT0_F
:
6064 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6065 tmp2
= tcg_const_i32(0);
6066 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6067 tcg_temp_free(tmp2
);
6068 tcg_temp_free_ptr(fpstatus
);
6071 case NEON_2RM_VABS_F
:
6074 case NEON_2RM_VNEG_F
:
6078 tmp2
= neon_load_reg(rd
, pass
);
6079 neon_store_reg(rm
, pass
, tmp2
);
6082 tmp2
= neon_load_reg(rd
, pass
);
6084 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6085 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6088 neon_store_reg(rm
, pass
, tmp2
);
6090 case NEON_2RM_VRECPE
:
6091 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6093 case NEON_2RM_VRSQRTE
:
6094 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6096 case NEON_2RM_VRECPE_F
:
6097 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6099 case NEON_2RM_VRSQRTE_F
:
6100 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6102 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6105 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6108 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6109 gen_vfp_tosiz(0, 1);
6111 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6112 gen_vfp_touiz(0, 1);
6115 /* Reserved op values were caught by the
6116 * neon_2rm_sizes[] check earlier.
6120 if (neon_2rm_is_float_op(op
)) {
6121 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6122 neon_reg_offset(rd
, pass
));
6124 neon_store_reg(rd
, pass
, tmp
);
6129 } else if ((insn
& (1 << 10)) == 0) {
6131 int n
= ((insn
>> 8) & 3) + 1;
6132 if ((rn
+ n
) > 32) {
6133 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6134 * helper function running off the end of the register file.
6139 if (insn
& (1 << 6)) {
6140 tmp
= neon_load_reg(rd
, 0);
6142 tmp
= tcg_temp_new_i32();
6143 tcg_gen_movi_i32(tmp
, 0);
6145 tmp2
= neon_load_reg(rm
, 0);
6146 tmp4
= tcg_const_i32(rn
);
6147 tmp5
= tcg_const_i32(n
);
6148 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
6149 tcg_temp_free_i32(tmp
);
6150 if (insn
& (1 << 6)) {
6151 tmp
= neon_load_reg(rd
, 1);
6153 tmp
= tcg_temp_new_i32();
6154 tcg_gen_movi_i32(tmp
, 0);
6156 tmp3
= neon_load_reg(rm
, 1);
6157 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
6158 tcg_temp_free_i32(tmp5
);
6159 tcg_temp_free_i32(tmp4
);
6160 neon_store_reg(rd
, 0, tmp2
);
6161 neon_store_reg(rd
, 1, tmp3
);
6162 tcg_temp_free_i32(tmp
);
6163 } else if ((insn
& 0x380) == 0) {
6165 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6168 if (insn
& (1 << 19)) {
6169 tmp
= neon_load_reg(rm
, 1);
6171 tmp
= neon_load_reg(rm
, 0);
6173 if (insn
& (1 << 16)) {
6174 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6175 } else if (insn
& (1 << 17)) {
6176 if ((insn
>> 18) & 1)
6177 gen_neon_dup_high16(tmp
);
6179 gen_neon_dup_low16(tmp
);
6181 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6182 tmp2
= tcg_temp_new_i32();
6183 tcg_gen_mov_i32(tmp2
, tmp
);
6184 neon_store_reg(rd
, pass
, tmp2
);
6186 tcg_temp_free_i32(tmp
);
6195 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6197 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6198 const ARMCPRegInfo
*ri
;
6199 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6201 cpnum
= (insn
>> 8) & 0xf;
6202 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6203 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6206 /* First check for coprocessor space used for actual instructions */
6210 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6211 return disas_iwmmxt_insn(env
, s
, insn
);
6212 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6213 return disas_dsp_insn(env
, s
, insn
);
6218 return disas_vfp_insn (env
, s
, insn
);
6223 /* Otherwise treat as a generic register access */
6224 is64
= (insn
& (1 << 25)) == 0;
6225 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6233 opc1
= (insn
>> 4) & 0xf;
6235 rt2
= (insn
>> 16) & 0xf;
6237 crn
= (insn
>> 16) & 0xf;
6238 opc1
= (insn
>> 21) & 7;
6239 opc2
= (insn
>> 5) & 7;
6242 isread
= (insn
>> 20) & 1;
6243 rt
= (insn
>> 12) & 0xf;
6245 ri
= get_arm_cp_reginfo(cpu
,
6246 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
6248 /* Check access permissions */
6249 if (!cp_access_ok(env
, ri
, isread
)) {
6253 /* Handle special cases first */
6254 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6261 gen_set_pc_im(s
->pc
);
6262 s
->is_jmp
= DISAS_WFI
;
6273 if (ri
->type
& ARM_CP_CONST
) {
6274 tmp64
= tcg_const_i64(ri
->resetvalue
);
6275 } else if (ri
->readfn
) {
6277 gen_set_pc_im(s
->pc
);
6278 tmp64
= tcg_temp_new_i64();
6279 tmpptr
= tcg_const_ptr(ri
);
6280 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6281 tcg_temp_free_ptr(tmpptr
);
6283 tmp64
= tcg_temp_new_i64();
6284 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6286 tmp
= tcg_temp_new_i32();
6287 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6288 store_reg(s
, rt
, tmp
);
6289 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6290 tmp
= tcg_temp_new_i32();
6291 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6292 tcg_temp_free_i64(tmp64
);
6293 store_reg(s
, rt2
, tmp
);
6296 if (ri
->type
& ARM_CP_CONST
) {
6297 tmp
= tcg_const_i32(ri
->resetvalue
);
6298 } else if (ri
->readfn
) {
6300 gen_set_pc_im(s
->pc
);
6301 tmp
= tcg_temp_new_i32();
6302 tmpptr
= tcg_const_ptr(ri
);
6303 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6304 tcg_temp_free_ptr(tmpptr
);
6306 tmp
= load_cpu_offset(ri
->fieldoffset
);
6309 /* Destination register of r15 for 32 bit loads sets
6310 * the condition codes from the high 4 bits of the value
6313 tcg_temp_free_i32(tmp
);
6315 store_reg(s
, rt
, tmp
);
6320 if (ri
->type
& ARM_CP_CONST
) {
6321 /* If not forbidden by access permissions, treat as WI */
6327 TCGv_i64 tmp64
= tcg_temp_new_i64();
6328 tmplo
= load_reg(s
, rt
);
6329 tmphi
= load_reg(s
, rt2
);
6330 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6331 tcg_temp_free_i32(tmplo
);
6332 tcg_temp_free_i32(tmphi
);
6334 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6335 gen_set_pc_im(s
->pc
);
6336 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6337 tcg_temp_free_ptr(tmpptr
);
6339 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6341 tcg_temp_free_i64(tmp64
);
6346 gen_set_pc_im(s
->pc
);
6347 tmp
= load_reg(s
, rt
);
6348 tmpptr
= tcg_const_ptr(ri
);
6349 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6350 tcg_temp_free_ptr(tmpptr
);
6351 tcg_temp_free_i32(tmp
);
6353 TCGv tmp
= load_reg(s
, rt
);
6354 store_cpu_offset(tmp
, ri
->fieldoffset
);
6357 /* We default to ending the TB on a coprocessor register write,
6358 * but allow this to be suppressed by the register definition
6359 * (usually only necessary to work around guest bugs).
6361 if (!(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6372 /* Store a 64-bit value to a register pair. Clobbers val. */
6373 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6376 tmp
= tcg_temp_new_i32();
6377 tcg_gen_trunc_i64_i32(tmp
, val
);
6378 store_reg(s
, rlow
, tmp
);
6379 tmp
= tcg_temp_new_i32();
6380 tcg_gen_shri_i64(val
, val
, 32);
6381 tcg_gen_trunc_i64_i32(tmp
, val
);
6382 store_reg(s
, rhigh
, tmp
);
6385 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6386 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6391 /* Load value and extend to 64 bits. */
6392 tmp
= tcg_temp_new_i64();
6393 tmp2
= load_reg(s
, rlow
);
6394 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6395 tcg_temp_free_i32(tmp2
);
6396 tcg_gen_add_i64(val
, val
, tmp
);
6397 tcg_temp_free_i64(tmp
);
6400 /* load and add a 64-bit value from a register pair. */
6401 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6407 /* Load 64-bit value rd:rn. */
6408 tmpl
= load_reg(s
, rlow
);
6409 tmph
= load_reg(s
, rhigh
);
6410 tmp
= tcg_temp_new_i64();
6411 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6412 tcg_temp_free_i32(tmpl
);
6413 tcg_temp_free_i32(tmph
);
6414 tcg_gen_add_i64(val
, val
, tmp
);
6415 tcg_temp_free_i64(tmp
);
6418 /* Set N and Z flags from a 64-bit value. */
6419 static void gen_logicq_cc(TCGv_i64 val
)
6421 TCGv tmp
= tcg_temp_new_i32();
6422 gen_helper_logicq_cc(tmp
, val
);
6424 tcg_temp_free_i32(tmp
);
6427 /* Load/Store exclusive instructions are implemented by remembering
6428 the value/address loaded, and seeing if these are the same
6429 when the store is performed. This should be sufficient to implement
6430 the architecturally mandated semantics, and avoids having to monitor
6433 In system emulation mode only one CPU will be running at once, so
6434 this sequence is effectively atomic. In user emulation mode we
6435 throw an exception and handle the atomic operation elsewhere. */
6436 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6437 TCGv addr
, int size
)
6443 tmp
= gen_ld8u(addr
, IS_USER(s
));
6446 tmp
= gen_ld16u(addr
, IS_USER(s
));
6450 tmp
= gen_ld32(addr
, IS_USER(s
));
6455 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6456 store_reg(s
, rt
, tmp
);
6458 TCGv tmp2
= tcg_temp_new_i32();
6459 tcg_gen_addi_i32(tmp2
, addr
, 4);
6460 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6461 tcg_temp_free_i32(tmp2
);
6462 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6463 store_reg(s
, rt2
, tmp
);
6465 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6468 static void gen_clrex(DisasContext
*s
)
6470 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6473 #ifdef CONFIG_USER_ONLY
6474 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6475 TCGv addr
, int size
)
6477 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6478 tcg_gen_movi_i32(cpu_exclusive_info
,
6479 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6480 gen_exception_insn(s
, 4, EXCP_STREX
);
6483 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6484 TCGv addr
, int size
)
6490 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6496 fail_label
= gen_new_label();
6497 done_label
= gen_new_label();
6498 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6501 tmp
= gen_ld8u(addr
, IS_USER(s
));
6504 tmp
= gen_ld16u(addr
, IS_USER(s
));
6508 tmp
= gen_ld32(addr
, IS_USER(s
));
6513 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6514 tcg_temp_free_i32(tmp
);
6516 TCGv tmp2
= tcg_temp_new_i32();
6517 tcg_gen_addi_i32(tmp2
, addr
, 4);
6518 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6519 tcg_temp_free_i32(tmp2
);
6520 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6521 tcg_temp_free_i32(tmp
);
6523 tmp
= load_reg(s
, rt
);
6526 gen_st8(tmp
, addr
, IS_USER(s
));
6529 gen_st16(tmp
, addr
, IS_USER(s
));
6533 gen_st32(tmp
, addr
, IS_USER(s
));
6539 tcg_gen_addi_i32(addr
, addr
, 4);
6540 tmp
= load_reg(s
, rt2
);
6541 gen_st32(tmp
, addr
, IS_USER(s
));
6543 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6544 tcg_gen_br(done_label
);
6545 gen_set_label(fail_label
);
6546 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6547 gen_set_label(done_label
);
6548 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6552 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
6554 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6561 insn
= arm_ldl_code(env
, s
->pc
, s
->bswap_code
);
6564 /* M variants do not implement ARM mode. */
6569 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6570 * choose to UNDEF. In ARMv5 and above the space is used
6571 * for miscellaneous unconditional instructions.
6575 /* Unconditional instructions. */
6576 if (((insn
>> 25) & 7) == 1) {
6577 /* NEON Data processing. */
6578 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6581 if (disas_neon_data_insn(env
, s
, insn
))
6585 if ((insn
& 0x0f100000) == 0x04000000) {
6586 /* NEON load/store. */
6587 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6590 if (disas_neon_ls_insn(env
, s
, insn
))
6594 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6595 ((insn
& 0x0f30f010) == 0x0710f000)) {
6596 if ((insn
& (1 << 22)) == 0) {
6598 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6602 /* Otherwise PLD; v5TE+ */
6606 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6607 ((insn
& 0x0f70f010) == 0x0650f000)) {
6609 return; /* PLI; V7 */
6611 if (((insn
& 0x0f700000) == 0x04100000) ||
6612 ((insn
& 0x0f700010) == 0x06100000)) {
6613 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6616 return; /* v7MP: Unallocated memory hint: must NOP */
6619 if ((insn
& 0x0ffffdff) == 0x01010000) {
6622 if (((insn
>> 9) & 1) != s
->bswap_code
) {
6623 /* Dynamic endianness switching not implemented. */
6627 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6628 switch ((insn
>> 4) & 0xf) {
6637 /* We don't emulate caches so these are a no-op. */
6642 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6648 op1
= (insn
& 0x1f);
6649 addr
= tcg_temp_new_i32();
6650 tmp
= tcg_const_i32(op1
);
6651 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6652 tcg_temp_free_i32(tmp
);
6653 i
= (insn
>> 23) & 3;
6655 case 0: offset
= -4; break; /* DA */
6656 case 1: offset
= 0; break; /* IA */
6657 case 2: offset
= -8; break; /* DB */
6658 case 3: offset
= 4; break; /* IB */
6662 tcg_gen_addi_i32(addr
, addr
, offset
);
6663 tmp
= load_reg(s
, 14);
6664 gen_st32(tmp
, addr
, 0);
6665 tmp
= load_cpu_field(spsr
);
6666 tcg_gen_addi_i32(addr
, addr
, 4);
6667 gen_st32(tmp
, addr
, 0);
6668 if (insn
& (1 << 21)) {
6669 /* Base writeback. */
6671 case 0: offset
= -8; break;
6672 case 1: offset
= 4; break;
6673 case 2: offset
= -4; break;
6674 case 3: offset
= 0; break;
6678 tcg_gen_addi_i32(addr
, addr
, offset
);
6679 tmp
= tcg_const_i32(op1
);
6680 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6681 tcg_temp_free_i32(tmp
);
6682 tcg_temp_free_i32(addr
);
6684 tcg_temp_free_i32(addr
);
6687 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6693 rn
= (insn
>> 16) & 0xf;
6694 addr
= load_reg(s
, rn
);
6695 i
= (insn
>> 23) & 3;
6697 case 0: offset
= -4; break; /* DA */
6698 case 1: offset
= 0; break; /* IA */
6699 case 2: offset
= -8; break; /* DB */
6700 case 3: offset
= 4; break; /* IB */
6704 tcg_gen_addi_i32(addr
, addr
, offset
);
6705 /* Load PC into tmp and CPSR into tmp2. */
6706 tmp
= gen_ld32(addr
, 0);
6707 tcg_gen_addi_i32(addr
, addr
, 4);
6708 tmp2
= gen_ld32(addr
, 0);
6709 if (insn
& (1 << 21)) {
6710 /* Base writeback. */
6712 case 0: offset
= -8; break;
6713 case 1: offset
= 4; break;
6714 case 2: offset
= -4; break;
6715 case 3: offset
= 0; break;
6719 tcg_gen_addi_i32(addr
, addr
, offset
);
6720 store_reg(s
, rn
, addr
);
6722 tcg_temp_free_i32(addr
);
6724 gen_rfe(s
, tmp
, tmp2
);
6726 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6727 /* branch link and change to thumb (blx <offset>) */
6730 val
= (uint32_t)s
->pc
;
6731 tmp
= tcg_temp_new_i32();
6732 tcg_gen_movi_i32(tmp
, val
);
6733 store_reg(s
, 14, tmp
);
6734 /* Sign-extend the 24-bit offset */
6735 offset
= (((int32_t)insn
) << 8) >> 8;
6736 /* offset * 4 + bit24 * 2 + (thumb bit) */
6737 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6738 /* pipeline offset */
6740 /* protected by ARCH(5); above, near the start of uncond block */
6743 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6744 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6745 /* iWMMXt register transfer. */
6746 if (env
->cp15
.c15_cpar
& (1 << 1))
6747 if (!disas_iwmmxt_insn(env
, s
, insn
))
6750 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6751 /* Coprocessor double register transfer. */
6753 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6754 /* Additional coprocessor register transfer. */
6755 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6758 /* cps (privileged) */
6762 if (insn
& (1 << 19)) {
6763 if (insn
& (1 << 8))
6765 if (insn
& (1 << 7))
6767 if (insn
& (1 << 6))
6769 if (insn
& (1 << 18))
6772 if (insn
& (1 << 17)) {
6774 val
|= (insn
& 0x1f);
6777 gen_set_psr_im(s
, mask
, 0, val
);
6784 /* if not always execute, we generate a conditional jump to
6786 s
->condlabel
= gen_new_label();
6787 gen_test_cc(cond
^ 1, s
->condlabel
);
6790 if ((insn
& 0x0f900000) == 0x03000000) {
6791 if ((insn
& (1 << 21)) == 0) {
6793 rd
= (insn
>> 12) & 0xf;
6794 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6795 if ((insn
& (1 << 22)) == 0) {
6797 tmp
= tcg_temp_new_i32();
6798 tcg_gen_movi_i32(tmp
, val
);
6801 tmp
= load_reg(s
, rd
);
6802 tcg_gen_ext16u_i32(tmp
, tmp
);
6803 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6805 store_reg(s
, rd
, tmp
);
6807 if (((insn
>> 12) & 0xf) != 0xf)
6809 if (((insn
>> 16) & 0xf) == 0) {
6810 gen_nop_hint(s
, insn
& 0xff);
6812 /* CPSR = immediate */
6814 shift
= ((insn
>> 8) & 0xf) * 2;
6816 val
= (val
>> shift
) | (val
<< (32 - shift
));
6817 i
= ((insn
& (1 << 22)) != 0);
6818 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6822 } else if ((insn
& 0x0f900000) == 0x01000000
6823 && (insn
& 0x00000090) != 0x00000090) {
6824 /* miscellaneous instructions */
6825 op1
= (insn
>> 21) & 3;
6826 sh
= (insn
>> 4) & 0xf;
6829 case 0x0: /* move program status register */
6832 tmp
= load_reg(s
, rm
);
6833 i
= ((op1
& 2) != 0);
6834 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6838 rd
= (insn
>> 12) & 0xf;
6842 tmp
= load_cpu_field(spsr
);
6844 tmp
= tcg_temp_new_i32();
6845 gen_helper_cpsr_read(tmp
, cpu_env
);
6847 store_reg(s
, rd
, tmp
);
6852 /* branch/exchange thumb (bx). */
6854 tmp
= load_reg(s
, rm
);
6856 } else if (op1
== 3) {
6859 rd
= (insn
>> 12) & 0xf;
6860 tmp
= load_reg(s
, rm
);
6861 gen_helper_clz(tmp
, tmp
);
6862 store_reg(s
, rd
, tmp
);
6870 /* Trivial implementation equivalent to bx. */
6871 tmp
= load_reg(s
, rm
);
6882 /* branch link/exchange thumb (blx) */
6883 tmp
= load_reg(s
, rm
);
6884 tmp2
= tcg_temp_new_i32();
6885 tcg_gen_movi_i32(tmp2
, s
->pc
);
6886 store_reg(s
, 14, tmp2
);
6889 case 0x5: /* saturating add/subtract */
6891 rd
= (insn
>> 12) & 0xf;
6892 rn
= (insn
>> 16) & 0xf;
6893 tmp
= load_reg(s
, rm
);
6894 tmp2
= load_reg(s
, rn
);
6896 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
6898 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6900 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
6901 tcg_temp_free_i32(tmp2
);
6902 store_reg(s
, rd
, tmp
);
6905 /* SMC instruction (op1 == 3)
6906 and undefined instructions (op1 == 0 || op1 == 2)
6913 gen_exception_insn(s
, 4, EXCP_BKPT
);
6915 case 0x8: /* signed multiply */
6920 rs
= (insn
>> 8) & 0xf;
6921 rn
= (insn
>> 12) & 0xf;
6922 rd
= (insn
>> 16) & 0xf;
6924 /* (32 * 16) >> 16 */
6925 tmp
= load_reg(s
, rm
);
6926 tmp2
= load_reg(s
, rs
);
6928 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6931 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6932 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6933 tmp
= tcg_temp_new_i32();
6934 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6935 tcg_temp_free_i64(tmp64
);
6936 if ((sh
& 2) == 0) {
6937 tmp2
= load_reg(s
, rn
);
6938 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
6939 tcg_temp_free_i32(tmp2
);
6941 store_reg(s
, rd
, tmp
);
6944 tmp
= load_reg(s
, rm
);
6945 tmp2
= load_reg(s
, rs
);
6946 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6947 tcg_temp_free_i32(tmp2
);
6949 tmp64
= tcg_temp_new_i64();
6950 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6951 tcg_temp_free_i32(tmp
);
6952 gen_addq(s
, tmp64
, rn
, rd
);
6953 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6954 tcg_temp_free_i64(tmp64
);
6957 tmp2
= load_reg(s
, rn
);
6958 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
6959 tcg_temp_free_i32(tmp2
);
6961 store_reg(s
, rd
, tmp
);
6968 } else if (((insn
& 0x0e000000) == 0 &&
6969 (insn
& 0x00000090) != 0x90) ||
6970 ((insn
& 0x0e000000) == (1 << 25))) {
6971 int set_cc
, logic_cc
, shiftop
;
6973 op1
= (insn
>> 21) & 0xf;
6974 set_cc
= (insn
>> 20) & 1;
6975 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6977 /* data processing instruction */
6978 if (insn
& (1 << 25)) {
6979 /* immediate operand */
6981 shift
= ((insn
>> 8) & 0xf) * 2;
6983 val
= (val
>> shift
) | (val
<< (32 - shift
));
6985 tmp2
= tcg_temp_new_i32();
6986 tcg_gen_movi_i32(tmp2
, val
);
6987 if (logic_cc
&& shift
) {
6988 gen_set_CF_bit31(tmp2
);
6993 tmp2
= load_reg(s
, rm
);
6994 shiftop
= (insn
>> 5) & 3;
6995 if (!(insn
& (1 << 4))) {
6996 shift
= (insn
>> 7) & 0x1f;
6997 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6999 rs
= (insn
>> 8) & 0xf;
7000 tmp
= load_reg(s
, rs
);
7001 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
7004 if (op1
!= 0x0f && op1
!= 0x0d) {
7005 rn
= (insn
>> 16) & 0xf;
7006 tmp
= load_reg(s
, rn
);
7010 rd
= (insn
>> 12) & 0xf;
7013 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7017 store_reg_bx(env
, s
, rd
, tmp
);
7020 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7024 store_reg_bx(env
, s
, rd
, tmp
);
7027 if (set_cc
&& rd
== 15) {
7028 /* SUBS r15, ... is used for exception return. */
7032 gen_sub_CC(tmp
, tmp
, tmp2
);
7033 gen_exception_return(s
, tmp
);
7036 gen_sub_CC(tmp
, tmp
, tmp2
);
7038 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7040 store_reg_bx(env
, s
, rd
, tmp
);
7045 gen_sub_CC(tmp
, tmp2
, tmp
);
7047 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7049 store_reg_bx(env
, s
, rd
, tmp
);
7053 gen_add_CC(tmp
, tmp
, tmp2
);
7055 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7057 store_reg_bx(env
, s
, rd
, tmp
);
7061 gen_helper_adc_cc(tmp
, cpu_env
, tmp
, tmp2
);
7063 gen_add_carry(tmp
, tmp
, tmp2
);
7065 store_reg_bx(env
, s
, rd
, tmp
);
7069 gen_helper_sbc_cc(tmp
, cpu_env
, tmp
, tmp2
);
7071 gen_sub_carry(tmp
, tmp
, tmp2
);
7073 store_reg_bx(env
, s
, rd
, tmp
);
7077 gen_helper_sbc_cc(tmp
, cpu_env
, tmp2
, tmp
);
7079 gen_sub_carry(tmp
, tmp2
, tmp
);
7081 store_reg_bx(env
, s
, rd
, tmp
);
7085 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7088 tcg_temp_free_i32(tmp
);
7092 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7095 tcg_temp_free_i32(tmp
);
7099 gen_sub_CC(tmp
, tmp
, tmp2
);
7101 tcg_temp_free_i32(tmp
);
7105 gen_add_CC(tmp
, tmp
, tmp2
);
7107 tcg_temp_free_i32(tmp
);
7110 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7114 store_reg_bx(env
, s
, rd
, tmp
);
7117 if (logic_cc
&& rd
== 15) {
7118 /* MOVS r15, ... is used for exception return. */
7122 gen_exception_return(s
, tmp2
);
7127 store_reg_bx(env
, s
, rd
, tmp2
);
7131 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7135 store_reg_bx(env
, s
, rd
, tmp
);
7139 tcg_gen_not_i32(tmp2
, tmp2
);
7143 store_reg_bx(env
, s
, rd
, tmp2
);
7146 if (op1
!= 0x0f && op1
!= 0x0d) {
7147 tcg_temp_free_i32(tmp2
);
7150 /* other instructions */
7151 op1
= (insn
>> 24) & 0xf;
7155 /* multiplies, extra load/stores */
7156 sh
= (insn
>> 5) & 3;
7159 rd
= (insn
>> 16) & 0xf;
7160 rn
= (insn
>> 12) & 0xf;
7161 rs
= (insn
>> 8) & 0xf;
7163 op1
= (insn
>> 20) & 0xf;
7165 case 0: case 1: case 2: case 3: case 6:
7167 tmp
= load_reg(s
, rs
);
7168 tmp2
= load_reg(s
, rm
);
7169 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7170 tcg_temp_free_i32(tmp2
);
7171 if (insn
& (1 << 22)) {
7172 /* Subtract (mls) */
7174 tmp2
= load_reg(s
, rn
);
7175 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7176 tcg_temp_free_i32(tmp2
);
7177 } else if (insn
& (1 << 21)) {
7179 tmp2
= load_reg(s
, rn
);
7180 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7181 tcg_temp_free_i32(tmp2
);
7183 if (insn
& (1 << 20))
7185 store_reg(s
, rd
, tmp
);
7188 /* 64 bit mul double accumulate (UMAAL) */
7190 tmp
= load_reg(s
, rs
);
7191 tmp2
= load_reg(s
, rm
);
7192 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7193 gen_addq_lo(s
, tmp64
, rn
);
7194 gen_addq_lo(s
, tmp64
, rd
);
7195 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7196 tcg_temp_free_i64(tmp64
);
7198 case 8: case 9: case 10: case 11:
7199 case 12: case 13: case 14: case 15:
7200 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7201 tmp
= load_reg(s
, rs
);
7202 tmp2
= load_reg(s
, rm
);
7203 if (insn
& (1 << 22)) {
7204 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7206 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7208 if (insn
& (1 << 21)) { /* mult accumulate */
7209 gen_addq(s
, tmp64
, rn
, rd
);
7211 if (insn
& (1 << 20)) {
7212 gen_logicq_cc(tmp64
);
7214 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7215 tcg_temp_free_i64(tmp64
);
7221 rn
= (insn
>> 16) & 0xf;
7222 rd
= (insn
>> 12) & 0xf;
7223 if (insn
& (1 << 23)) {
7224 /* load/store exclusive */
7225 op1
= (insn
>> 21) & 0x3;
7230 addr
= tcg_temp_local_new_i32();
7231 load_reg_var(s
, addr
, rn
);
7232 if (insn
& (1 << 20)) {
7235 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7237 case 1: /* ldrexd */
7238 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7240 case 2: /* ldrexb */
7241 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7243 case 3: /* ldrexh */
7244 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7253 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7255 case 1: /* strexd */
7256 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7258 case 2: /* strexb */
7259 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7261 case 3: /* strexh */
7262 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7268 tcg_temp_free(addr
);
7270 /* SWP instruction */
7273 /* ??? This is not really atomic. However we know
7274 we never have multiple CPUs running in parallel,
7275 so it is good enough. */
7276 addr
= load_reg(s
, rn
);
7277 tmp
= load_reg(s
, rm
);
7278 if (insn
& (1 << 22)) {
7279 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7280 gen_st8(tmp
, addr
, IS_USER(s
));
7282 tmp2
= gen_ld32(addr
, IS_USER(s
));
7283 gen_st32(tmp
, addr
, IS_USER(s
));
7285 tcg_temp_free_i32(addr
);
7286 store_reg(s
, rd
, tmp2
);
7292 /* Misc load/store */
7293 rn
= (insn
>> 16) & 0xf;
7294 rd
= (insn
>> 12) & 0xf;
7295 addr
= load_reg(s
, rn
);
7296 if (insn
& (1 << 24))
7297 gen_add_datah_offset(s
, insn
, 0, addr
);
7299 if (insn
& (1 << 20)) {
7303 tmp
= gen_ld16u(addr
, IS_USER(s
));
7306 tmp
= gen_ld8s(addr
, IS_USER(s
));
7310 tmp
= gen_ld16s(addr
, IS_USER(s
));
7314 } else if (sh
& 2) {
7319 tmp
= load_reg(s
, rd
);
7320 gen_st32(tmp
, addr
, IS_USER(s
));
7321 tcg_gen_addi_i32(addr
, addr
, 4);
7322 tmp
= load_reg(s
, rd
+ 1);
7323 gen_st32(tmp
, addr
, IS_USER(s
));
7327 tmp
= gen_ld32(addr
, IS_USER(s
));
7328 store_reg(s
, rd
, tmp
);
7329 tcg_gen_addi_i32(addr
, addr
, 4);
7330 tmp
= gen_ld32(addr
, IS_USER(s
));
7334 address_offset
= -4;
7337 tmp
= load_reg(s
, rd
);
7338 gen_st16(tmp
, addr
, IS_USER(s
));
7341 /* Perform base writeback before the loaded value to
7342 ensure correct behavior with overlapping index registers.
7343 ldrd with base writeback is is undefined if the
7344 destination and index registers overlap. */
7345 if (!(insn
& (1 << 24))) {
7346 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7347 store_reg(s
, rn
, addr
);
7348 } else if (insn
& (1 << 21)) {
7350 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7351 store_reg(s
, rn
, addr
);
7353 tcg_temp_free_i32(addr
);
7356 /* Complete the load. */
7357 store_reg(s
, rd
, tmp
);
7366 if (insn
& (1 << 4)) {
7368 /* Armv6 Media instructions. */
7370 rn
= (insn
>> 16) & 0xf;
7371 rd
= (insn
>> 12) & 0xf;
7372 rs
= (insn
>> 8) & 0xf;
7373 switch ((insn
>> 23) & 3) {
7374 case 0: /* Parallel add/subtract. */
7375 op1
= (insn
>> 20) & 7;
7376 tmp
= load_reg(s
, rn
);
7377 tmp2
= load_reg(s
, rm
);
7378 sh
= (insn
>> 5) & 7;
7379 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7381 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7382 tcg_temp_free_i32(tmp2
);
7383 store_reg(s
, rd
, tmp
);
7386 if ((insn
& 0x00700020) == 0) {
7387 /* Halfword pack. */
7388 tmp
= load_reg(s
, rn
);
7389 tmp2
= load_reg(s
, rm
);
7390 shift
= (insn
>> 7) & 0x1f;
7391 if (insn
& (1 << 6)) {
7395 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7396 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7397 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7401 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7402 tcg_gen_ext16u_i32(tmp
, tmp
);
7403 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7405 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7406 tcg_temp_free_i32(tmp2
);
7407 store_reg(s
, rd
, tmp
);
7408 } else if ((insn
& 0x00200020) == 0x00200000) {
7410 tmp
= load_reg(s
, rm
);
7411 shift
= (insn
>> 7) & 0x1f;
7412 if (insn
& (1 << 6)) {
7415 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7417 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7419 sh
= (insn
>> 16) & 0x1f;
7420 tmp2
= tcg_const_i32(sh
);
7421 if (insn
& (1 << 22))
7422 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
7424 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
7425 tcg_temp_free_i32(tmp2
);
7426 store_reg(s
, rd
, tmp
);
7427 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7429 tmp
= load_reg(s
, rm
);
7430 sh
= (insn
>> 16) & 0x1f;
7431 tmp2
= tcg_const_i32(sh
);
7432 if (insn
& (1 << 22))
7433 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
7435 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
7436 tcg_temp_free_i32(tmp2
);
7437 store_reg(s
, rd
, tmp
);
7438 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7440 tmp
= load_reg(s
, rn
);
7441 tmp2
= load_reg(s
, rm
);
7442 tmp3
= tcg_temp_new_i32();
7443 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
7444 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7445 tcg_temp_free_i32(tmp3
);
7446 tcg_temp_free_i32(tmp2
);
7447 store_reg(s
, rd
, tmp
);
7448 } else if ((insn
& 0x000003e0) == 0x00000060) {
7449 tmp
= load_reg(s
, rm
);
7450 shift
= (insn
>> 10) & 3;
7451 /* ??? In many cases it's not necessary to do a
7452 rotate, a shift is sufficient. */
7454 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7455 op1
= (insn
>> 20) & 7;
7457 case 0: gen_sxtb16(tmp
); break;
7458 case 2: gen_sxtb(tmp
); break;
7459 case 3: gen_sxth(tmp
); break;
7460 case 4: gen_uxtb16(tmp
); break;
7461 case 6: gen_uxtb(tmp
); break;
7462 case 7: gen_uxth(tmp
); break;
7463 default: goto illegal_op
;
7466 tmp2
= load_reg(s
, rn
);
7467 if ((op1
& 3) == 0) {
7468 gen_add16(tmp
, tmp2
);
7470 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7471 tcg_temp_free_i32(tmp2
);
7474 store_reg(s
, rd
, tmp
);
7475 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7477 tmp
= load_reg(s
, rm
);
7478 if (insn
& (1 << 22)) {
7479 if (insn
& (1 << 7)) {
7483 gen_helper_rbit(tmp
, tmp
);
7486 if (insn
& (1 << 7))
7489 tcg_gen_bswap32_i32(tmp
, tmp
);
7491 store_reg(s
, rd
, tmp
);
7496 case 2: /* Multiplies (Type 3). */
7497 switch ((insn
>> 20) & 0x7) {
7499 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
7500 /* op2 not 00x or 11x : UNDEF */
7503 /* Signed multiply most significant [accumulate].
7504 (SMMUL, SMMLA, SMMLS) */
7505 tmp
= load_reg(s
, rm
);
7506 tmp2
= load_reg(s
, rs
);
7507 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7510 tmp
= load_reg(s
, rd
);
7511 if (insn
& (1 << 6)) {
7512 tmp64
= gen_subq_msw(tmp64
, tmp
);
7514 tmp64
= gen_addq_msw(tmp64
, tmp
);
7517 if (insn
& (1 << 5)) {
7518 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7520 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7521 tmp
= tcg_temp_new_i32();
7522 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7523 tcg_temp_free_i64(tmp64
);
7524 store_reg(s
, rn
, tmp
);
7528 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7529 if (insn
& (1 << 7)) {
7532 tmp
= load_reg(s
, rm
);
7533 tmp2
= load_reg(s
, rs
);
7534 if (insn
& (1 << 5))
7535 gen_swap_half(tmp2
);
7536 gen_smul_dual(tmp
, tmp2
);
7537 if (insn
& (1 << 6)) {
7538 /* This subtraction cannot overflow. */
7539 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7541 /* This addition cannot overflow 32 bits;
7542 * however it may overflow considered as a signed
7543 * operation, in which case we must set the Q flag.
7545 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7547 tcg_temp_free_i32(tmp2
);
7548 if (insn
& (1 << 22)) {
7549 /* smlald, smlsld */
7550 tmp64
= tcg_temp_new_i64();
7551 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7552 tcg_temp_free_i32(tmp
);
7553 gen_addq(s
, tmp64
, rd
, rn
);
7554 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7555 tcg_temp_free_i64(tmp64
);
7557 /* smuad, smusd, smlad, smlsd */
7560 tmp2
= load_reg(s
, rd
);
7561 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7562 tcg_temp_free_i32(tmp2
);
7564 store_reg(s
, rn
, tmp
);
7570 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
7573 if (((insn
>> 5) & 7) || (rd
!= 15)) {
7576 tmp
= load_reg(s
, rm
);
7577 tmp2
= load_reg(s
, rs
);
7578 if (insn
& (1 << 21)) {
7579 gen_helper_udiv(tmp
, tmp
, tmp2
);
7581 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7583 tcg_temp_free_i32(tmp2
);
7584 store_reg(s
, rn
, tmp
);
7591 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7593 case 0: /* Unsigned sum of absolute differences. */
7595 tmp
= load_reg(s
, rm
);
7596 tmp2
= load_reg(s
, rs
);
7597 gen_helper_usad8(tmp
, tmp
, tmp2
);
7598 tcg_temp_free_i32(tmp2
);
7600 tmp2
= load_reg(s
, rd
);
7601 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7602 tcg_temp_free_i32(tmp2
);
7604 store_reg(s
, rn
, tmp
);
7606 case 0x20: case 0x24: case 0x28: case 0x2c:
7607 /* Bitfield insert/clear. */
7609 shift
= (insn
>> 7) & 0x1f;
7610 i
= (insn
>> 16) & 0x1f;
7613 tmp
= tcg_temp_new_i32();
7614 tcg_gen_movi_i32(tmp
, 0);
7616 tmp
= load_reg(s
, rm
);
7619 tmp2
= load_reg(s
, rd
);
7620 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
7621 tcg_temp_free_i32(tmp2
);
7623 store_reg(s
, rd
, tmp
);
7625 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7626 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7628 tmp
= load_reg(s
, rm
);
7629 shift
= (insn
>> 7) & 0x1f;
7630 i
= ((insn
>> 16) & 0x1f) + 1;
7635 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7637 gen_sbfx(tmp
, shift
, i
);
7640 store_reg(s
, rd
, tmp
);
7650 /* Check for undefined extension instructions
7651 * per the ARM Bible IE:
7652 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7654 sh
= (0xf << 20) | (0xf << 4);
7655 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7659 /* load/store byte/word */
7660 rn
= (insn
>> 16) & 0xf;
7661 rd
= (insn
>> 12) & 0xf;
7662 tmp2
= load_reg(s
, rn
);
7663 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7664 if (insn
& (1 << 24))
7665 gen_add_data_offset(s
, insn
, tmp2
);
7666 if (insn
& (1 << 20)) {
7668 if (insn
& (1 << 22)) {
7669 tmp
= gen_ld8u(tmp2
, i
);
7671 tmp
= gen_ld32(tmp2
, i
);
7675 tmp
= load_reg(s
, rd
);
7676 if (insn
& (1 << 22))
7677 gen_st8(tmp
, tmp2
, i
);
7679 gen_st32(tmp
, tmp2
, i
);
7681 if (!(insn
& (1 << 24))) {
7682 gen_add_data_offset(s
, insn
, tmp2
);
7683 store_reg(s
, rn
, tmp2
);
7684 } else if (insn
& (1 << 21)) {
7685 store_reg(s
, rn
, tmp2
);
7687 tcg_temp_free_i32(tmp2
);
7689 if (insn
& (1 << 20)) {
7690 /* Complete the load. */
7691 store_reg_from_load(env
, s
, rd
, tmp
);
7697 int j
, n
, user
, loaded_base
;
7699 /* load/store multiple words */
7700 /* XXX: store correct base if write back */
7702 if (insn
& (1 << 22)) {
7704 goto illegal_op
; /* only usable in supervisor mode */
7706 if ((insn
& (1 << 15)) == 0)
7709 rn
= (insn
>> 16) & 0xf;
7710 addr
= load_reg(s
, rn
);
7712 /* compute total size */
7714 TCGV_UNUSED(loaded_var
);
7717 if (insn
& (1 << i
))
7720 /* XXX: test invalid n == 0 case ? */
7721 if (insn
& (1 << 23)) {
7722 if (insn
& (1 << 24)) {
7724 tcg_gen_addi_i32(addr
, addr
, 4);
7726 /* post increment */
7729 if (insn
& (1 << 24)) {
7731 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7733 /* post decrement */
7735 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7740 if (insn
& (1 << i
)) {
7741 if (insn
& (1 << 20)) {
7743 tmp
= gen_ld32(addr
, IS_USER(s
));
7745 tmp2
= tcg_const_i32(i
);
7746 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
7747 tcg_temp_free_i32(tmp2
);
7748 tcg_temp_free_i32(tmp
);
7749 } else if (i
== rn
) {
7753 store_reg_from_load(env
, s
, i
, tmp
);
7758 /* special case: r15 = PC + 8 */
7759 val
= (long)s
->pc
+ 4;
7760 tmp
= tcg_temp_new_i32();
7761 tcg_gen_movi_i32(tmp
, val
);
7763 tmp
= tcg_temp_new_i32();
7764 tmp2
= tcg_const_i32(i
);
7765 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
7766 tcg_temp_free_i32(tmp2
);
7768 tmp
= load_reg(s
, i
);
7770 gen_st32(tmp
, addr
, IS_USER(s
));
7773 /* no need to add after the last transfer */
7775 tcg_gen_addi_i32(addr
, addr
, 4);
7778 if (insn
& (1 << 21)) {
7780 if (insn
& (1 << 23)) {
7781 if (insn
& (1 << 24)) {
7784 /* post increment */
7785 tcg_gen_addi_i32(addr
, addr
, 4);
7788 if (insn
& (1 << 24)) {
7791 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7793 /* post decrement */
7794 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7797 store_reg(s
, rn
, addr
);
7799 tcg_temp_free_i32(addr
);
7802 store_reg(s
, rn
, loaded_var
);
7804 if ((insn
& (1 << 22)) && !user
) {
7805 /* Restore CPSR from SPSR. */
7806 tmp
= load_cpu_field(spsr
);
7807 gen_set_cpsr(tmp
, 0xffffffff);
7808 tcg_temp_free_i32(tmp
);
7809 s
->is_jmp
= DISAS_UPDATE
;
7818 /* branch (and link) */
7819 val
= (int32_t)s
->pc
;
7820 if (insn
& (1 << 24)) {
7821 tmp
= tcg_temp_new_i32();
7822 tcg_gen_movi_i32(tmp
, val
);
7823 store_reg(s
, 14, tmp
);
7825 offset
= (((int32_t)insn
<< 8) >> 8);
7826 val
+= (offset
<< 2) + 4;
7834 if (disas_coproc_insn(env
, s
, insn
))
7839 gen_set_pc_im(s
->pc
);
7840 s
->is_jmp
= DISAS_SWI
;
7844 gen_exception_insn(s
, 4, EXCP_UDEF
);
7850 /* Return true if this is a Thumb-2 logical op. */
7852 thumb2_logic_op(int op
)
7857 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7858 then set condition code flags based on the result of the operation.
7859 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7860 to the high bit of T1.
7861 Returns zero if the opcode is valid. */
7864 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
7871 tcg_gen_and_i32(t0
, t0
, t1
);
7875 tcg_gen_andc_i32(t0
, t0
, t1
);
7879 tcg_gen_or_i32(t0
, t0
, t1
);
7883 tcg_gen_orc_i32(t0
, t0
, t1
);
7887 tcg_gen_xor_i32(t0
, t0
, t1
);
7892 gen_add_CC(t0
, t0
, t1
);
7894 tcg_gen_add_i32(t0
, t0
, t1
);
7898 gen_helper_adc_cc(t0
, cpu_env
, t0
, t1
);
7904 gen_helper_sbc_cc(t0
, cpu_env
, t0
, t1
);
7906 gen_sub_carry(t0
, t0
, t1
);
7910 gen_sub_CC(t0
, t0
, t1
);
7912 tcg_gen_sub_i32(t0
, t0
, t1
);
7916 gen_sub_CC(t0
, t1
, t0
);
7918 tcg_gen_sub_i32(t0
, t1
, t0
);
7920 default: /* 5, 6, 7, 9, 12, 15. */
7926 gen_set_CF_bit31(t1
);
7931 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7933 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7935 uint32_t insn
, imm
, shift
, offset
;
7936 uint32_t rd
, rn
, rm
, rs
;
7947 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7948 || arm_feature (env
, ARM_FEATURE_M
))) {
7949 /* Thumb-1 cores may need to treat bl and blx as a pair of
7950 16-bit instructions to get correct prefetch abort behavior. */
7952 if ((insn
& (1 << 12)) == 0) {
7954 /* Second half of blx. */
7955 offset
= ((insn
& 0x7ff) << 1);
7956 tmp
= load_reg(s
, 14);
7957 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7958 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7960 tmp2
= tcg_temp_new_i32();
7961 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7962 store_reg(s
, 14, tmp2
);
7966 if (insn
& (1 << 11)) {
7967 /* Second half of bl. */
7968 offset
= ((insn
& 0x7ff) << 1) | 1;
7969 tmp
= load_reg(s
, 14);
7970 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7972 tmp2
= tcg_temp_new_i32();
7973 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7974 store_reg(s
, 14, tmp2
);
7978 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7979 /* Instruction spans a page boundary. Implement it as two
7980 16-bit instructions in case the second half causes an
7982 offset
= ((int32_t)insn
<< 21) >> 9;
7983 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
7986 /* Fall through to 32-bit decode. */
7989 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
7991 insn
|= (uint32_t)insn_hw1
<< 16;
7993 if ((insn
& 0xf800e800) != 0xf000e800) {
7997 rn
= (insn
>> 16) & 0xf;
7998 rs
= (insn
>> 12) & 0xf;
7999 rd
= (insn
>> 8) & 0xf;
8001 switch ((insn
>> 25) & 0xf) {
8002 case 0: case 1: case 2: case 3:
8003 /* 16-bit instructions. Should never happen. */
8006 if (insn
& (1 << 22)) {
8007 /* Other load/store, table branch. */
8008 if (insn
& 0x01200000) {
8009 /* Load/store doubleword. */
8011 addr
= tcg_temp_new_i32();
8012 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
8014 addr
= load_reg(s
, rn
);
8016 offset
= (insn
& 0xff) * 4;
8017 if ((insn
& (1 << 23)) == 0)
8019 if (insn
& (1 << 24)) {
8020 tcg_gen_addi_i32(addr
, addr
, offset
);
8023 if (insn
& (1 << 20)) {
8025 tmp
= gen_ld32(addr
, IS_USER(s
));
8026 store_reg(s
, rs
, tmp
);
8027 tcg_gen_addi_i32(addr
, addr
, 4);
8028 tmp
= gen_ld32(addr
, IS_USER(s
));
8029 store_reg(s
, rd
, tmp
);
8032 tmp
= load_reg(s
, rs
);
8033 gen_st32(tmp
, addr
, IS_USER(s
));
8034 tcg_gen_addi_i32(addr
, addr
, 4);
8035 tmp
= load_reg(s
, rd
);
8036 gen_st32(tmp
, addr
, IS_USER(s
));
8038 if (insn
& (1 << 21)) {
8039 /* Base writeback. */
8042 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8043 store_reg(s
, rn
, addr
);
8045 tcg_temp_free_i32(addr
);
8047 } else if ((insn
& (1 << 23)) == 0) {
8048 /* Load/store exclusive word. */
8049 addr
= tcg_temp_local_new();
8050 load_reg_var(s
, addr
, rn
);
8051 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8052 if (insn
& (1 << 20)) {
8053 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8055 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8057 tcg_temp_free(addr
);
8058 } else if ((insn
& (1 << 6)) == 0) {
8061 addr
= tcg_temp_new_i32();
8062 tcg_gen_movi_i32(addr
, s
->pc
);
8064 addr
= load_reg(s
, rn
);
8066 tmp
= load_reg(s
, rm
);
8067 tcg_gen_add_i32(addr
, addr
, tmp
);
8068 if (insn
& (1 << 4)) {
8070 tcg_gen_add_i32(addr
, addr
, tmp
);
8071 tcg_temp_free_i32(tmp
);
8072 tmp
= gen_ld16u(addr
, IS_USER(s
));
8074 tcg_temp_free_i32(tmp
);
8075 tmp
= gen_ld8u(addr
, IS_USER(s
));
8077 tcg_temp_free_i32(addr
);
8078 tcg_gen_shli_i32(tmp
, tmp
, 1);
8079 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8080 store_reg(s
, 15, tmp
);
8082 /* Load/store exclusive byte/halfword/doubleword. */
8084 op
= (insn
>> 4) & 0x3;
8088 addr
= tcg_temp_local_new();
8089 load_reg_var(s
, addr
, rn
);
8090 if (insn
& (1 << 20)) {
8091 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8093 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8095 tcg_temp_free(addr
);
8098 /* Load/store multiple, RFE, SRS. */
8099 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8100 /* Not available in user mode. */
8103 if (insn
& (1 << 20)) {
8105 addr
= load_reg(s
, rn
);
8106 if ((insn
& (1 << 24)) == 0)
8107 tcg_gen_addi_i32(addr
, addr
, -8);
8108 /* Load PC into tmp and CPSR into tmp2. */
8109 tmp
= gen_ld32(addr
, 0);
8110 tcg_gen_addi_i32(addr
, addr
, 4);
8111 tmp2
= gen_ld32(addr
, 0);
8112 if (insn
& (1 << 21)) {
8113 /* Base writeback. */
8114 if (insn
& (1 << 24)) {
8115 tcg_gen_addi_i32(addr
, addr
, 4);
8117 tcg_gen_addi_i32(addr
, addr
, -4);
8119 store_reg(s
, rn
, addr
);
8121 tcg_temp_free_i32(addr
);
8123 gen_rfe(s
, tmp
, tmp2
);
8127 addr
= tcg_temp_new_i32();
8128 tmp
= tcg_const_i32(op
);
8129 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8130 tcg_temp_free_i32(tmp
);
8131 if ((insn
& (1 << 24)) == 0) {
8132 tcg_gen_addi_i32(addr
, addr
, -8);
8134 tmp
= load_reg(s
, 14);
8135 gen_st32(tmp
, addr
, 0);
8136 tcg_gen_addi_i32(addr
, addr
, 4);
8137 tmp
= tcg_temp_new_i32();
8138 gen_helper_cpsr_read(tmp
, cpu_env
);
8139 gen_st32(tmp
, addr
, 0);
8140 if (insn
& (1 << 21)) {
8141 if ((insn
& (1 << 24)) == 0) {
8142 tcg_gen_addi_i32(addr
, addr
, -4);
8144 tcg_gen_addi_i32(addr
, addr
, 4);
8146 tmp
= tcg_const_i32(op
);
8147 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8148 tcg_temp_free_i32(tmp
);
8150 tcg_temp_free_i32(addr
);
8154 int i
, loaded_base
= 0;
8156 /* Load/store multiple. */
8157 addr
= load_reg(s
, rn
);
8159 for (i
= 0; i
< 16; i
++) {
8160 if (insn
& (1 << i
))
8163 if (insn
& (1 << 24)) {
8164 tcg_gen_addi_i32(addr
, addr
, -offset
);
8167 TCGV_UNUSED(loaded_var
);
8168 for (i
= 0; i
< 16; i
++) {
8169 if ((insn
& (1 << i
)) == 0)
8171 if (insn
& (1 << 20)) {
8173 tmp
= gen_ld32(addr
, IS_USER(s
));
8176 } else if (i
== rn
) {
8180 store_reg(s
, i
, tmp
);
8184 tmp
= load_reg(s
, i
);
8185 gen_st32(tmp
, addr
, IS_USER(s
));
8187 tcg_gen_addi_i32(addr
, addr
, 4);
8190 store_reg(s
, rn
, loaded_var
);
8192 if (insn
& (1 << 21)) {
8193 /* Base register writeback. */
8194 if (insn
& (1 << 24)) {
8195 tcg_gen_addi_i32(addr
, addr
, -offset
);
8197 /* Fault if writeback register is in register list. */
8198 if (insn
& (1 << rn
))
8200 store_reg(s
, rn
, addr
);
8202 tcg_temp_free_i32(addr
);
8209 op
= (insn
>> 21) & 0xf;
8211 /* Halfword pack. */
8212 tmp
= load_reg(s
, rn
);
8213 tmp2
= load_reg(s
, rm
);
8214 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8215 if (insn
& (1 << 5)) {
8219 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8220 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8221 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8225 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8226 tcg_gen_ext16u_i32(tmp
, tmp
);
8227 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8229 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8230 tcg_temp_free_i32(tmp2
);
8231 store_reg(s
, rd
, tmp
);
8233 /* Data processing register constant shift. */
8235 tmp
= tcg_temp_new_i32();
8236 tcg_gen_movi_i32(tmp
, 0);
8238 tmp
= load_reg(s
, rn
);
8240 tmp2
= load_reg(s
, rm
);
8242 shiftop
= (insn
>> 4) & 3;
8243 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8244 conds
= (insn
& (1 << 20)) != 0;
8245 logic_cc
= (conds
&& thumb2_logic_op(op
));
8246 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8247 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8249 tcg_temp_free_i32(tmp2
);
8251 store_reg(s
, rd
, tmp
);
8253 tcg_temp_free_i32(tmp
);
8257 case 13: /* Misc data processing. */
8258 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8259 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8262 case 0: /* Register controlled shift. */
8263 tmp
= load_reg(s
, rn
);
8264 tmp2
= load_reg(s
, rm
);
8265 if ((insn
& 0x70) != 0)
8267 op
= (insn
>> 21) & 3;
8268 logic_cc
= (insn
& (1 << 20)) != 0;
8269 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8272 store_reg_bx(env
, s
, rd
, tmp
);
8274 case 1: /* Sign/zero extend. */
8275 tmp
= load_reg(s
, rm
);
8276 shift
= (insn
>> 4) & 3;
8277 /* ??? In many cases it's not necessary to do a
8278 rotate, a shift is sufficient. */
8280 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8281 op
= (insn
>> 20) & 7;
8283 case 0: gen_sxth(tmp
); break;
8284 case 1: gen_uxth(tmp
); break;
8285 case 2: gen_sxtb16(tmp
); break;
8286 case 3: gen_uxtb16(tmp
); break;
8287 case 4: gen_sxtb(tmp
); break;
8288 case 5: gen_uxtb(tmp
); break;
8289 default: goto illegal_op
;
8292 tmp2
= load_reg(s
, rn
);
8293 if ((op
>> 1) == 1) {
8294 gen_add16(tmp
, tmp2
);
8296 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8297 tcg_temp_free_i32(tmp2
);
8300 store_reg(s
, rd
, tmp
);
8302 case 2: /* SIMD add/subtract. */
8303 op
= (insn
>> 20) & 7;
8304 shift
= (insn
>> 4) & 7;
8305 if ((op
& 3) == 3 || (shift
& 3) == 3)
8307 tmp
= load_reg(s
, rn
);
8308 tmp2
= load_reg(s
, rm
);
8309 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8310 tcg_temp_free_i32(tmp2
);
8311 store_reg(s
, rd
, tmp
);
8313 case 3: /* Other data processing. */
8314 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8316 /* Saturating add/subtract. */
8317 tmp
= load_reg(s
, rn
);
8318 tmp2
= load_reg(s
, rm
);
8320 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
8322 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
8324 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8325 tcg_temp_free_i32(tmp2
);
8327 tmp
= load_reg(s
, rn
);
8329 case 0x0a: /* rbit */
8330 gen_helper_rbit(tmp
, tmp
);
8332 case 0x08: /* rev */
8333 tcg_gen_bswap32_i32(tmp
, tmp
);
8335 case 0x09: /* rev16 */
8338 case 0x0b: /* revsh */
8341 case 0x10: /* sel */
8342 tmp2
= load_reg(s
, rm
);
8343 tmp3
= tcg_temp_new_i32();
8344 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8345 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8346 tcg_temp_free_i32(tmp3
);
8347 tcg_temp_free_i32(tmp2
);
8349 case 0x18: /* clz */
8350 gen_helper_clz(tmp
, tmp
);
8356 store_reg(s
, rd
, tmp
);
8358 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8359 op
= (insn
>> 4) & 0xf;
8360 tmp
= load_reg(s
, rn
);
8361 tmp2
= load_reg(s
, rm
);
8362 switch ((insn
>> 20) & 7) {
8363 case 0: /* 32 x 32 -> 32 */
8364 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8365 tcg_temp_free_i32(tmp2
);
8367 tmp2
= load_reg(s
, rs
);
8369 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8371 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8372 tcg_temp_free_i32(tmp2
);
8375 case 1: /* 16 x 16 -> 32 */
8376 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8377 tcg_temp_free_i32(tmp2
);
8379 tmp2
= load_reg(s
, rs
);
8380 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8381 tcg_temp_free_i32(tmp2
);
8384 case 2: /* Dual multiply add. */
8385 case 4: /* Dual multiply subtract. */
8387 gen_swap_half(tmp2
);
8388 gen_smul_dual(tmp
, tmp2
);
8389 if (insn
& (1 << 22)) {
8390 /* This subtraction cannot overflow. */
8391 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8393 /* This addition cannot overflow 32 bits;
8394 * however it may overflow considered as a signed
8395 * operation, in which case we must set the Q flag.
8397 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8399 tcg_temp_free_i32(tmp2
);
8402 tmp2
= load_reg(s
, rs
);
8403 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8404 tcg_temp_free_i32(tmp2
);
8407 case 3: /* 32 * 16 -> 32msb */
8409 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8412 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8413 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8414 tmp
= tcg_temp_new_i32();
8415 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8416 tcg_temp_free_i64(tmp64
);
8419 tmp2
= load_reg(s
, rs
);
8420 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8421 tcg_temp_free_i32(tmp2
);
8424 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8425 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8427 tmp
= load_reg(s
, rs
);
8428 if (insn
& (1 << 20)) {
8429 tmp64
= gen_addq_msw(tmp64
, tmp
);
8431 tmp64
= gen_subq_msw(tmp64
, tmp
);
8434 if (insn
& (1 << 4)) {
8435 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8437 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8438 tmp
= tcg_temp_new_i32();
8439 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8440 tcg_temp_free_i64(tmp64
);
8442 case 7: /* Unsigned sum of absolute differences. */
8443 gen_helper_usad8(tmp
, tmp
, tmp2
);
8444 tcg_temp_free_i32(tmp2
);
8446 tmp2
= load_reg(s
, rs
);
8447 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8448 tcg_temp_free_i32(tmp2
);
8452 store_reg(s
, rd
, tmp
);
8454 case 6: case 7: /* 64-bit multiply, Divide. */
8455 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8456 tmp
= load_reg(s
, rn
);
8457 tmp2
= load_reg(s
, rm
);
8458 if ((op
& 0x50) == 0x10) {
8460 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
8464 gen_helper_udiv(tmp
, tmp
, tmp2
);
8466 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8467 tcg_temp_free_i32(tmp2
);
8468 store_reg(s
, rd
, tmp
);
8469 } else if ((op
& 0xe) == 0xc) {
8470 /* Dual multiply accumulate long. */
8472 gen_swap_half(tmp2
);
8473 gen_smul_dual(tmp
, tmp2
);
8475 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8477 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8479 tcg_temp_free_i32(tmp2
);
8481 tmp64
= tcg_temp_new_i64();
8482 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8483 tcg_temp_free_i32(tmp
);
8484 gen_addq(s
, tmp64
, rs
, rd
);
8485 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8486 tcg_temp_free_i64(tmp64
);
8489 /* Unsigned 64-bit multiply */
8490 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8494 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8495 tcg_temp_free_i32(tmp2
);
8496 tmp64
= tcg_temp_new_i64();
8497 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8498 tcg_temp_free_i32(tmp
);
8500 /* Signed 64-bit multiply */
8501 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8506 gen_addq_lo(s
, tmp64
, rs
);
8507 gen_addq_lo(s
, tmp64
, rd
);
8508 } else if (op
& 0x40) {
8509 /* 64-bit accumulate. */
8510 gen_addq(s
, tmp64
, rs
, rd
);
8512 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8513 tcg_temp_free_i64(tmp64
);
8518 case 6: case 7: case 14: case 15:
8520 if (((insn
>> 24) & 3) == 3) {
8521 /* Translate into the equivalent ARM encoding. */
8522 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8523 if (disas_neon_data_insn(env
, s
, insn
))
8526 if (insn
& (1 << 28))
8528 if (disas_coproc_insn (env
, s
, insn
))
8532 case 8: case 9: case 10: case 11:
8533 if (insn
& (1 << 15)) {
8534 /* Branches, misc control. */
8535 if (insn
& 0x5000) {
8536 /* Unconditional branch. */
8537 /* signextend(hw1[10:0]) -> offset[:12]. */
8538 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8539 /* hw1[10:0] -> offset[11:1]. */
8540 offset
|= (insn
& 0x7ff) << 1;
8541 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8542 offset[24:22] already have the same value because of the
8543 sign extension above. */
8544 offset
^= ((~insn
) & (1 << 13)) << 10;
8545 offset
^= ((~insn
) & (1 << 11)) << 11;
8547 if (insn
& (1 << 14)) {
8548 /* Branch and link. */
8549 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8553 if (insn
& (1 << 12)) {
8558 offset
&= ~(uint32_t)2;
8559 /* thumb2 bx, no need to check */
8560 gen_bx_im(s
, offset
);
8562 } else if (((insn
>> 23) & 7) == 7) {
8564 if (insn
& (1 << 13))
8567 if (insn
& (1 << 26)) {
8568 /* Secure monitor call (v6Z) */
8569 goto illegal_op
; /* not implemented. */
8571 op
= (insn
>> 20) & 7;
8573 case 0: /* msr cpsr. */
8575 tmp
= load_reg(s
, rn
);
8576 addr
= tcg_const_i32(insn
& 0xff);
8577 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8578 tcg_temp_free_i32(addr
);
8579 tcg_temp_free_i32(tmp
);
8584 case 1: /* msr spsr. */
8587 tmp
= load_reg(s
, rn
);
8589 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8593 case 2: /* cps, nop-hint. */
8594 if (((insn
>> 8) & 7) == 0) {
8595 gen_nop_hint(s
, insn
& 0xff);
8597 /* Implemented as NOP in user mode. */
8602 if (insn
& (1 << 10)) {
8603 if (insn
& (1 << 7))
8605 if (insn
& (1 << 6))
8607 if (insn
& (1 << 5))
8609 if (insn
& (1 << 9))
8610 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8612 if (insn
& (1 << 8)) {
8614 imm
|= (insn
& 0x1f);
8617 gen_set_psr_im(s
, offset
, 0, imm
);
8620 case 3: /* Special control operations. */
8622 op
= (insn
>> 4) & 0xf;
8630 /* These execute as NOPs. */
8637 /* Trivial implementation equivalent to bx. */
8638 tmp
= load_reg(s
, rn
);
8641 case 5: /* Exception return. */
8645 if (rn
!= 14 || rd
!= 15) {
8648 tmp
= load_reg(s
, rn
);
8649 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8650 gen_exception_return(s
, tmp
);
8652 case 6: /* mrs cpsr. */
8653 tmp
= tcg_temp_new_i32();
8655 addr
= tcg_const_i32(insn
& 0xff);
8656 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8657 tcg_temp_free_i32(addr
);
8659 gen_helper_cpsr_read(tmp
, cpu_env
);
8661 store_reg(s
, rd
, tmp
);
8663 case 7: /* mrs spsr. */
8664 /* Not accessible in user mode. */
8665 if (IS_USER(s
) || IS_M(env
))
8667 tmp
= load_cpu_field(spsr
);
8668 store_reg(s
, rd
, tmp
);
8673 /* Conditional branch. */
8674 op
= (insn
>> 22) & 0xf;
8675 /* Generate a conditional jump to next instruction. */
8676 s
->condlabel
= gen_new_label();
8677 gen_test_cc(op
^ 1, s
->condlabel
);
8680 /* offset[11:1] = insn[10:0] */
8681 offset
= (insn
& 0x7ff) << 1;
8682 /* offset[17:12] = insn[21:16]. */
8683 offset
|= (insn
& 0x003f0000) >> 4;
8684 /* offset[31:20] = insn[26]. */
8685 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8686 /* offset[18] = insn[13]. */
8687 offset
|= (insn
& (1 << 13)) << 5;
8688 /* offset[19] = insn[11]. */
8689 offset
|= (insn
& (1 << 11)) << 8;
8691 /* jump to the offset */
8692 gen_jmp(s
, s
->pc
+ offset
);
8695 /* Data processing immediate. */
8696 if (insn
& (1 << 25)) {
8697 if (insn
& (1 << 24)) {
8698 if (insn
& (1 << 20))
8700 /* Bitfield/Saturate. */
8701 op
= (insn
>> 21) & 7;
8703 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8705 tmp
= tcg_temp_new_i32();
8706 tcg_gen_movi_i32(tmp
, 0);
8708 tmp
= load_reg(s
, rn
);
8711 case 2: /* Signed bitfield extract. */
8713 if (shift
+ imm
> 32)
8716 gen_sbfx(tmp
, shift
, imm
);
8718 case 6: /* Unsigned bitfield extract. */
8720 if (shift
+ imm
> 32)
8723 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8725 case 3: /* Bitfield insert/clear. */
8728 imm
= imm
+ 1 - shift
;
8730 tmp2
= load_reg(s
, rd
);
8731 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
8732 tcg_temp_free_i32(tmp2
);
8737 default: /* Saturate. */
8740 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8742 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8744 tmp2
= tcg_const_i32(imm
);
8747 if ((op
& 1) && shift
== 0)
8748 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8750 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8753 if ((op
& 1) && shift
== 0)
8754 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8756 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8758 tcg_temp_free_i32(tmp2
);
8761 store_reg(s
, rd
, tmp
);
8763 imm
= ((insn
& 0x04000000) >> 15)
8764 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8765 if (insn
& (1 << 22)) {
8766 /* 16-bit immediate. */
8767 imm
|= (insn
>> 4) & 0xf000;
8768 if (insn
& (1 << 23)) {
8770 tmp
= load_reg(s
, rd
);
8771 tcg_gen_ext16u_i32(tmp
, tmp
);
8772 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8775 tmp
= tcg_temp_new_i32();
8776 tcg_gen_movi_i32(tmp
, imm
);
8779 /* Add/sub 12-bit immediate. */
8781 offset
= s
->pc
& ~(uint32_t)3;
8782 if (insn
& (1 << 23))
8786 tmp
= tcg_temp_new_i32();
8787 tcg_gen_movi_i32(tmp
, offset
);
8789 tmp
= load_reg(s
, rn
);
8790 if (insn
& (1 << 23))
8791 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8793 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8796 store_reg(s
, rd
, tmp
);
8799 int shifter_out
= 0;
8800 /* modified 12-bit immediate. */
8801 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8802 imm
= (insn
& 0xff);
8805 /* Nothing to do. */
8807 case 1: /* 00XY00XY */
8810 case 2: /* XY00XY00 */
8814 case 3: /* XYXYXYXY */
8818 default: /* Rotated constant. */
8819 shift
= (shift
<< 1) | (imm
>> 7);
8821 imm
= imm
<< (32 - shift
);
8825 tmp2
= tcg_temp_new_i32();
8826 tcg_gen_movi_i32(tmp2
, imm
);
8827 rn
= (insn
>> 16) & 0xf;
8829 tmp
= tcg_temp_new_i32();
8830 tcg_gen_movi_i32(tmp
, 0);
8832 tmp
= load_reg(s
, rn
);
8834 op
= (insn
>> 21) & 0xf;
8835 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8836 shifter_out
, tmp
, tmp2
))
8838 tcg_temp_free_i32(tmp2
);
8839 rd
= (insn
>> 8) & 0xf;
8841 store_reg(s
, rd
, tmp
);
8843 tcg_temp_free_i32(tmp
);
8848 case 12: /* Load/store single data item. */
8853 if ((insn
& 0x01100000) == 0x01000000) {
8854 if (disas_neon_ls_insn(env
, s
, insn
))
8858 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
8860 if (!(insn
& (1 << 20))) {
8864 /* Byte or halfword load space with dest == r15 : memory hints.
8865 * Catch them early so we don't emit pointless addressing code.
8866 * This space is a mix of:
8867 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8868 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8870 * unallocated hints, which must be treated as NOPs
8871 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8872 * which is easiest for the decoding logic
8873 * Some space which must UNDEF
8875 int op1
= (insn
>> 23) & 3;
8876 int op2
= (insn
>> 6) & 0x3f;
8881 /* UNPREDICTABLE, unallocated hint or
8882 * PLD/PLDW/PLI (literal)
8887 return 0; /* PLD/PLDW/PLI or unallocated hint */
8889 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
8890 return 0; /* PLD/PLDW/PLI or unallocated hint */
8892 /* UNDEF space, or an UNPREDICTABLE */
8898 addr
= tcg_temp_new_i32();
8900 /* s->pc has already been incremented by 4. */
8901 imm
= s
->pc
& 0xfffffffc;
8902 if (insn
& (1 << 23))
8903 imm
+= insn
& 0xfff;
8905 imm
-= insn
& 0xfff;
8906 tcg_gen_movi_i32(addr
, imm
);
8908 addr
= load_reg(s
, rn
);
8909 if (insn
& (1 << 23)) {
8910 /* Positive offset. */
8912 tcg_gen_addi_i32(addr
, addr
, imm
);
8915 switch ((insn
>> 8) & 0xf) {
8916 case 0x0: /* Shifted Register. */
8917 shift
= (insn
>> 4) & 0xf;
8919 tcg_temp_free_i32(addr
);
8922 tmp
= load_reg(s
, rm
);
8924 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8925 tcg_gen_add_i32(addr
, addr
, tmp
);
8926 tcg_temp_free_i32(tmp
);
8928 case 0xc: /* Negative offset. */
8929 tcg_gen_addi_i32(addr
, addr
, -imm
);
8931 case 0xe: /* User privilege. */
8932 tcg_gen_addi_i32(addr
, addr
, imm
);
8935 case 0x9: /* Post-decrement. */
8938 case 0xb: /* Post-increment. */
8942 case 0xd: /* Pre-decrement. */
8945 case 0xf: /* Pre-increment. */
8946 tcg_gen_addi_i32(addr
, addr
, imm
);
8950 tcg_temp_free_i32(addr
);
8955 if (insn
& (1 << 20)) {
8958 case 0: tmp
= gen_ld8u(addr
, user
); break;
8959 case 4: tmp
= gen_ld8s(addr
, user
); break;
8960 case 1: tmp
= gen_ld16u(addr
, user
); break;
8961 case 5: tmp
= gen_ld16s(addr
, user
); break;
8962 case 2: tmp
= gen_ld32(addr
, user
); break;
8964 tcg_temp_free_i32(addr
);
8970 store_reg(s
, rs
, tmp
);
8974 tmp
= load_reg(s
, rs
);
8976 case 0: gen_st8(tmp
, addr
, user
); break;
8977 case 1: gen_st16(tmp
, addr
, user
); break;
8978 case 2: gen_st32(tmp
, addr
, user
); break;
8980 tcg_temp_free_i32(addr
);
8985 tcg_gen_addi_i32(addr
, addr
, imm
);
8987 store_reg(s
, rn
, addr
);
8989 tcg_temp_free_i32(addr
);
9001 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
9003 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
9010 if (s
->condexec_mask
) {
9011 cond
= s
->condexec_cond
;
9012 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
9013 s
->condlabel
= gen_new_label();
9014 gen_test_cc(cond
^ 1, s
->condlabel
);
9019 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
9022 switch (insn
>> 12) {
9026 op
= (insn
>> 11) & 3;
9029 rn
= (insn
>> 3) & 7;
9030 tmp
= load_reg(s
, rn
);
9031 if (insn
& (1 << 10)) {
9033 tmp2
= tcg_temp_new_i32();
9034 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9037 rm
= (insn
>> 6) & 7;
9038 tmp2
= load_reg(s
, rm
);
9040 if (insn
& (1 << 9)) {
9041 if (s
->condexec_mask
)
9042 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9044 gen_sub_CC(tmp
, tmp
, tmp2
);
9046 if (s
->condexec_mask
)
9047 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9049 gen_add_CC(tmp
, tmp
, tmp2
);
9051 tcg_temp_free_i32(tmp2
);
9052 store_reg(s
, rd
, tmp
);
9054 /* shift immediate */
9055 rm
= (insn
>> 3) & 7;
9056 shift
= (insn
>> 6) & 0x1f;
9057 tmp
= load_reg(s
, rm
);
9058 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9059 if (!s
->condexec_mask
)
9061 store_reg(s
, rd
, tmp
);
9065 /* arithmetic large immediate */
9066 op
= (insn
>> 11) & 3;
9067 rd
= (insn
>> 8) & 0x7;
9068 if (op
== 0) { /* mov */
9069 tmp
= tcg_temp_new_i32();
9070 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9071 if (!s
->condexec_mask
)
9073 store_reg(s
, rd
, tmp
);
9075 tmp
= load_reg(s
, rd
);
9076 tmp2
= tcg_temp_new_i32();
9077 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9080 gen_sub_CC(tmp
, tmp
, tmp2
);
9081 tcg_temp_free_i32(tmp
);
9082 tcg_temp_free_i32(tmp2
);
9085 if (s
->condexec_mask
)
9086 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9088 gen_add_CC(tmp
, tmp
, tmp2
);
9089 tcg_temp_free_i32(tmp2
);
9090 store_reg(s
, rd
, tmp
);
9093 if (s
->condexec_mask
)
9094 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9096 gen_sub_CC(tmp
, tmp
, tmp2
);
9097 tcg_temp_free_i32(tmp2
);
9098 store_reg(s
, rd
, tmp
);
9104 if (insn
& (1 << 11)) {
9105 rd
= (insn
>> 8) & 7;
9106 /* load pc-relative. Bit 1 of PC is ignored. */
9107 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9108 val
&= ~(uint32_t)2;
9109 addr
= tcg_temp_new_i32();
9110 tcg_gen_movi_i32(addr
, val
);
9111 tmp
= gen_ld32(addr
, IS_USER(s
));
9112 tcg_temp_free_i32(addr
);
9113 store_reg(s
, rd
, tmp
);
9116 if (insn
& (1 << 10)) {
9117 /* data processing extended or blx */
9118 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9119 rm
= (insn
>> 3) & 0xf;
9120 op
= (insn
>> 8) & 3;
9123 tmp
= load_reg(s
, rd
);
9124 tmp2
= load_reg(s
, rm
);
9125 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9126 tcg_temp_free_i32(tmp2
);
9127 store_reg(s
, rd
, tmp
);
9130 tmp
= load_reg(s
, rd
);
9131 tmp2
= load_reg(s
, rm
);
9132 gen_sub_CC(tmp
, tmp
, tmp2
);
9133 tcg_temp_free_i32(tmp2
);
9134 tcg_temp_free_i32(tmp
);
9136 case 2: /* mov/cpy */
9137 tmp
= load_reg(s
, rm
);
9138 store_reg(s
, rd
, tmp
);
9140 case 3:/* branch [and link] exchange thumb register */
9141 tmp
= load_reg(s
, rm
);
9142 if (insn
& (1 << 7)) {
9144 val
= (uint32_t)s
->pc
| 1;
9145 tmp2
= tcg_temp_new_i32();
9146 tcg_gen_movi_i32(tmp2
, val
);
9147 store_reg(s
, 14, tmp2
);
9149 /* already thumb, no need to check */
9156 /* data processing register */
9158 rm
= (insn
>> 3) & 7;
9159 op
= (insn
>> 6) & 0xf;
9160 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9161 /* the shift/rotate ops want the operands backwards */
9170 if (op
== 9) { /* neg */
9171 tmp
= tcg_temp_new_i32();
9172 tcg_gen_movi_i32(tmp
, 0);
9173 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9174 tmp
= load_reg(s
, rd
);
9179 tmp2
= load_reg(s
, rm
);
9182 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9183 if (!s
->condexec_mask
)
9187 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9188 if (!s
->condexec_mask
)
9192 if (s
->condexec_mask
) {
9193 gen_shl(tmp2
, tmp2
, tmp
);
9195 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9200 if (s
->condexec_mask
) {
9201 gen_shr(tmp2
, tmp2
, tmp
);
9203 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9208 if (s
->condexec_mask
) {
9209 gen_sar(tmp2
, tmp2
, tmp
);
9211 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9216 if (s
->condexec_mask
)
9219 gen_helper_adc_cc(tmp
, cpu_env
, tmp
, tmp2
);
9222 if (s
->condexec_mask
)
9223 gen_sub_carry(tmp
, tmp
, tmp2
);
9225 gen_helper_sbc_cc(tmp
, cpu_env
, tmp
, tmp2
);
9228 if (s
->condexec_mask
) {
9229 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9230 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9232 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
9237 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9242 if (s
->condexec_mask
)
9243 tcg_gen_neg_i32(tmp
, tmp2
);
9245 gen_sub_CC(tmp
, tmp
, tmp2
);
9248 gen_sub_CC(tmp
, tmp
, tmp2
);
9252 gen_add_CC(tmp
, tmp
, tmp2
);
9256 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9257 if (!s
->condexec_mask
)
9261 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9262 if (!s
->condexec_mask
)
9266 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9267 if (!s
->condexec_mask
)
9271 tcg_gen_not_i32(tmp2
, tmp2
);
9272 if (!s
->condexec_mask
)
9280 store_reg(s
, rm
, tmp2
);
9282 tcg_temp_free_i32(tmp
);
9284 store_reg(s
, rd
, tmp
);
9285 tcg_temp_free_i32(tmp2
);
9288 tcg_temp_free_i32(tmp
);
9289 tcg_temp_free_i32(tmp2
);
9294 /* load/store register offset. */
9296 rn
= (insn
>> 3) & 7;
9297 rm
= (insn
>> 6) & 7;
9298 op
= (insn
>> 9) & 7;
9299 addr
= load_reg(s
, rn
);
9300 tmp
= load_reg(s
, rm
);
9301 tcg_gen_add_i32(addr
, addr
, tmp
);
9302 tcg_temp_free_i32(tmp
);
9304 if (op
< 3) /* store */
9305 tmp
= load_reg(s
, rd
);
9309 gen_st32(tmp
, addr
, IS_USER(s
));
9312 gen_st16(tmp
, addr
, IS_USER(s
));
9315 gen_st8(tmp
, addr
, IS_USER(s
));
9318 tmp
= gen_ld8s(addr
, IS_USER(s
));
9321 tmp
= gen_ld32(addr
, IS_USER(s
));
9324 tmp
= gen_ld16u(addr
, IS_USER(s
));
9327 tmp
= gen_ld8u(addr
, IS_USER(s
));
9330 tmp
= gen_ld16s(addr
, IS_USER(s
));
9333 if (op
>= 3) /* load */
9334 store_reg(s
, rd
, tmp
);
9335 tcg_temp_free_i32(addr
);
9339 /* load/store word immediate offset */
9341 rn
= (insn
>> 3) & 7;
9342 addr
= load_reg(s
, rn
);
9343 val
= (insn
>> 4) & 0x7c;
9344 tcg_gen_addi_i32(addr
, addr
, val
);
9346 if (insn
& (1 << 11)) {
9348 tmp
= gen_ld32(addr
, IS_USER(s
));
9349 store_reg(s
, rd
, tmp
);
9352 tmp
= load_reg(s
, rd
);
9353 gen_st32(tmp
, addr
, IS_USER(s
));
9355 tcg_temp_free_i32(addr
);
9359 /* load/store byte immediate offset */
9361 rn
= (insn
>> 3) & 7;
9362 addr
= load_reg(s
, rn
);
9363 val
= (insn
>> 6) & 0x1f;
9364 tcg_gen_addi_i32(addr
, addr
, val
);
9366 if (insn
& (1 << 11)) {
9368 tmp
= gen_ld8u(addr
, IS_USER(s
));
9369 store_reg(s
, rd
, tmp
);
9372 tmp
= load_reg(s
, rd
);
9373 gen_st8(tmp
, addr
, IS_USER(s
));
9375 tcg_temp_free_i32(addr
);
9379 /* load/store halfword immediate offset */
9381 rn
= (insn
>> 3) & 7;
9382 addr
= load_reg(s
, rn
);
9383 val
= (insn
>> 5) & 0x3e;
9384 tcg_gen_addi_i32(addr
, addr
, val
);
9386 if (insn
& (1 << 11)) {
9388 tmp
= gen_ld16u(addr
, IS_USER(s
));
9389 store_reg(s
, rd
, tmp
);
9392 tmp
= load_reg(s
, rd
);
9393 gen_st16(tmp
, addr
, IS_USER(s
));
9395 tcg_temp_free_i32(addr
);
9399 /* load/store from stack */
9400 rd
= (insn
>> 8) & 7;
9401 addr
= load_reg(s
, 13);
9402 val
= (insn
& 0xff) * 4;
9403 tcg_gen_addi_i32(addr
, addr
, val
);
9405 if (insn
& (1 << 11)) {
9407 tmp
= gen_ld32(addr
, IS_USER(s
));
9408 store_reg(s
, rd
, tmp
);
9411 tmp
= load_reg(s
, rd
);
9412 gen_st32(tmp
, addr
, IS_USER(s
));
9414 tcg_temp_free_i32(addr
);
9418 /* add to high reg */
9419 rd
= (insn
>> 8) & 7;
9420 if (insn
& (1 << 11)) {
9422 tmp
= load_reg(s
, 13);
9424 /* PC. bit 1 is ignored. */
9425 tmp
= tcg_temp_new_i32();
9426 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9428 val
= (insn
& 0xff) * 4;
9429 tcg_gen_addi_i32(tmp
, tmp
, val
);
9430 store_reg(s
, rd
, tmp
);
9435 op
= (insn
>> 8) & 0xf;
9438 /* adjust stack pointer */
9439 tmp
= load_reg(s
, 13);
9440 val
= (insn
& 0x7f) * 4;
9441 if (insn
& (1 << 7))
9442 val
= -(int32_t)val
;
9443 tcg_gen_addi_i32(tmp
, tmp
, val
);
9444 store_reg(s
, 13, tmp
);
9447 case 2: /* sign/zero extend. */
9450 rm
= (insn
>> 3) & 7;
9451 tmp
= load_reg(s
, rm
);
9452 switch ((insn
>> 6) & 3) {
9453 case 0: gen_sxth(tmp
); break;
9454 case 1: gen_sxtb(tmp
); break;
9455 case 2: gen_uxth(tmp
); break;
9456 case 3: gen_uxtb(tmp
); break;
9458 store_reg(s
, rd
, tmp
);
9460 case 4: case 5: case 0xc: case 0xd:
9462 addr
= load_reg(s
, 13);
9463 if (insn
& (1 << 8))
9467 for (i
= 0; i
< 8; i
++) {
9468 if (insn
& (1 << i
))
9471 if ((insn
& (1 << 11)) == 0) {
9472 tcg_gen_addi_i32(addr
, addr
, -offset
);
9474 for (i
= 0; i
< 8; i
++) {
9475 if (insn
& (1 << i
)) {
9476 if (insn
& (1 << 11)) {
9478 tmp
= gen_ld32(addr
, IS_USER(s
));
9479 store_reg(s
, i
, tmp
);
9482 tmp
= load_reg(s
, i
);
9483 gen_st32(tmp
, addr
, IS_USER(s
));
9485 /* advance to the next address. */
9486 tcg_gen_addi_i32(addr
, addr
, 4);
9490 if (insn
& (1 << 8)) {
9491 if (insn
& (1 << 11)) {
9493 tmp
= gen_ld32(addr
, IS_USER(s
));
9494 /* don't set the pc until the rest of the instruction
9498 tmp
= load_reg(s
, 14);
9499 gen_st32(tmp
, addr
, IS_USER(s
));
9501 tcg_gen_addi_i32(addr
, addr
, 4);
9503 if ((insn
& (1 << 11)) == 0) {
9504 tcg_gen_addi_i32(addr
, addr
, -offset
);
9506 /* write back the new stack pointer */
9507 store_reg(s
, 13, addr
);
9508 /* set the new PC value */
9509 if ((insn
& 0x0900) == 0x0900) {
9510 store_reg_from_load(env
, s
, 15, tmp
);
9514 case 1: case 3: case 9: case 11: /* czb */
9516 tmp
= load_reg(s
, rm
);
9517 s
->condlabel
= gen_new_label();
9519 if (insn
& (1 << 11))
9520 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9522 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9523 tcg_temp_free_i32(tmp
);
9524 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9525 val
= (uint32_t)s
->pc
+ 2;
9530 case 15: /* IT, nop-hint. */
9531 if ((insn
& 0xf) == 0) {
9532 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9536 s
->condexec_cond
= (insn
>> 4) & 0xe;
9537 s
->condexec_mask
= insn
& 0x1f;
9538 /* No actual code generated for this insn, just setup state. */
9541 case 0xe: /* bkpt */
9543 gen_exception_insn(s
, 2, EXCP_BKPT
);
9548 rn
= (insn
>> 3) & 0x7;
9550 tmp
= load_reg(s
, rn
);
9551 switch ((insn
>> 6) & 3) {
9552 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9553 case 1: gen_rev16(tmp
); break;
9554 case 3: gen_revsh(tmp
); break;
9555 default: goto illegal_op
;
9557 store_reg(s
, rd
, tmp
);
9561 switch ((insn
>> 5) & 7) {
9565 if (((insn
>> 3) & 1) != s
->bswap_code
) {
9566 /* Dynamic endianness switching not implemented. */
9577 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9580 addr
= tcg_const_i32(19);
9581 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9582 tcg_temp_free_i32(addr
);
9586 addr
= tcg_const_i32(16);
9587 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9588 tcg_temp_free_i32(addr
);
9590 tcg_temp_free_i32(tmp
);
9593 if (insn
& (1 << 4)) {
9594 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9598 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9613 /* load/store multiple */
9615 TCGV_UNUSED(loaded_var
);
9616 rn
= (insn
>> 8) & 0x7;
9617 addr
= load_reg(s
, rn
);
9618 for (i
= 0; i
< 8; i
++) {
9619 if (insn
& (1 << i
)) {
9620 if (insn
& (1 << 11)) {
9622 tmp
= gen_ld32(addr
, IS_USER(s
));
9626 store_reg(s
, i
, tmp
);
9630 tmp
= load_reg(s
, i
);
9631 gen_st32(tmp
, addr
, IS_USER(s
));
9633 /* advance to the next address */
9634 tcg_gen_addi_i32(addr
, addr
, 4);
9637 if ((insn
& (1 << rn
)) == 0) {
9638 /* base reg not in list: base register writeback */
9639 store_reg(s
, rn
, addr
);
9641 /* base reg in list: if load, complete it now */
9642 if (insn
& (1 << 11)) {
9643 store_reg(s
, rn
, loaded_var
);
9645 tcg_temp_free_i32(addr
);
9650 /* conditional branch or swi */
9651 cond
= (insn
>> 8) & 0xf;
9657 gen_set_pc_im(s
->pc
);
9658 s
->is_jmp
= DISAS_SWI
;
9661 /* generate a conditional jump to next instruction */
9662 s
->condlabel
= gen_new_label();
9663 gen_test_cc(cond
^ 1, s
->condlabel
);
9666 /* jump to the offset */
9667 val
= (uint32_t)s
->pc
+ 2;
9668 offset
= ((int32_t)insn
<< 24) >> 24;
9674 if (insn
& (1 << 11)) {
9675 if (disas_thumb2_insn(env
, s
, insn
))
9679 /* unconditional branch */
9680 val
= (uint32_t)s
->pc
;
9681 offset
= ((int32_t)insn
<< 21) >> 21;
9682 val
+= (offset
<< 1) + 2;
9687 if (disas_thumb2_insn(env
, s
, insn
))
9693 gen_exception_insn(s
, 4, EXCP_UDEF
);
9697 gen_exception_insn(s
, 2, EXCP_UDEF
);
9700 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9701 basic block 'tb'. If search_pc is TRUE, also generate PC
9702 information for each intermediate instruction. */
9703 static inline void gen_intermediate_code_internal(CPUARMState
*env
,
9704 TranslationBlock
*tb
,
9707 DisasContext dc1
, *dc
= &dc1
;
9709 uint16_t *gen_opc_end
;
9711 target_ulong pc_start
;
9712 uint32_t next_page_start
;
9716 /* generate intermediate code */
9721 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9723 dc
->is_jmp
= DISAS_NEXT
;
9725 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9727 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9728 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
9729 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9730 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9731 #if !defined(CONFIG_USER_ONLY)
9732 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9734 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9735 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9736 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9737 cpu_F0s
= tcg_temp_new_i32();
9738 cpu_F1s
= tcg_temp_new_i32();
9739 cpu_F0d
= tcg_temp_new_i64();
9740 cpu_F1d
= tcg_temp_new_i64();
9743 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9744 cpu_M0
= tcg_temp_new_i64();
9745 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9748 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9750 max_insns
= CF_COUNT_MASK
;
9754 tcg_clear_temp_count();
9756 /* A note on handling of the condexec (IT) bits:
9758 * We want to avoid the overhead of having to write the updated condexec
9759 * bits back to the CPUARMState for every instruction in an IT block. So:
9760 * (1) if the condexec bits are not already zero then we write
9761 * zero back into the CPUARMState now. This avoids complications trying
9762 * to do it at the end of the block. (For example if we don't do this
9763 * it's hard to identify whether we can safely skip writing condexec
9764 * at the end of the TB, which we definitely want to do for the case
9765 * where a TB doesn't do anything with the IT state at all.)
9766 * (2) if we are going to leave the TB then we call gen_set_condexec()
9767 * which will write the correct value into CPUARMState if zero is wrong.
9768 * This is done both for leaving the TB at the end, and for leaving
9769 * it because of an exception we know will happen, which is done in
9770 * gen_exception_insn(). The latter is necessary because we need to
9771 * leave the TB with the PC/IT state just prior to execution of the
9772 * instruction which caused the exception.
9773 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9774 * then the CPUARMState will be wrong and we need to reset it.
9775 * This is handled in the same way as restoration of the
9776 * PC in these situations: we will be called again with search_pc=1
9777 * and generate a mapping of the condexec bits for each PC in
9778 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9779 * this to restore the condexec bits.
9781 * Note that there are no instructions which can read the condexec
9782 * bits, and none which can write non-static values to them, so
9783 * we don't need to care about whether CPUARMState is correct in the
9787 /* Reset the conditional execution bits immediately. This avoids
9788 complications trying to do it at the end of the block. */
9789 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9791 TCGv tmp
= tcg_temp_new_i32();
9792 tcg_gen_movi_i32(tmp
, 0);
9793 store_cpu_field(tmp
, condexec_bits
);
9796 #ifdef CONFIG_USER_ONLY
9797 /* Intercept jump to the magic kernel page. */
9798 if (dc
->pc
>= 0xffff0000) {
9799 /* We always get here via a jump, so know we are not in a
9800 conditional execution block. */
9801 gen_exception(EXCP_KERNEL_TRAP
);
9802 dc
->is_jmp
= DISAS_UPDATE
;
9806 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9807 /* We always get here via a jump, so know we are not in a
9808 conditional execution block. */
9809 gen_exception(EXCP_EXCEPTION_EXIT
);
9810 dc
->is_jmp
= DISAS_UPDATE
;
9815 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9816 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9817 if (bp
->pc
== dc
->pc
) {
9818 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9819 /* Advance PC so that clearing the breakpoint will
9820 invalidate this TB. */
9822 goto done_generating
;
9828 j
= gen_opc_ptr
- gen_opc_buf
;
9832 gen_opc_instr_start
[lj
++] = 0;
9834 gen_opc_pc
[lj
] = dc
->pc
;
9835 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9836 gen_opc_instr_start
[lj
] = 1;
9837 gen_opc_icount
[lj
] = num_insns
;
9840 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9843 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
9844 tcg_gen_debug_insn_start(dc
->pc
);
9848 disas_thumb_insn(env
, dc
);
9849 if (dc
->condexec_mask
) {
9850 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9851 | ((dc
->condexec_mask
>> 4) & 1);
9852 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9853 if (dc
->condexec_mask
== 0) {
9854 dc
->condexec_cond
= 0;
9858 disas_arm_insn(env
, dc
);
9861 if (dc
->condjmp
&& !dc
->is_jmp
) {
9862 gen_set_label(dc
->condlabel
);
9866 if (tcg_check_temp_count()) {
9867 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9870 /* Translation stops when a conditional branch is encountered.
9871 * Otherwise the subsequent code could get translated several times.
9872 * Also stop translation when a page boundary is reached. This
9873 * ensures prefetch aborts occur at the right place. */
9875 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
9876 !env
->singlestep_enabled
&&
9878 dc
->pc
< next_page_start
&&
9879 num_insns
< max_insns
);
9881 if (tb
->cflags
& CF_LAST_IO
) {
9883 /* FIXME: This can theoretically happen with self-modifying
9885 cpu_abort(env
, "IO on conditional branch instruction");
9890 /* At this stage dc->condjmp will only be set when the skipped
9891 instruction was a conditional branch or trap, and the PC has
9892 already been written. */
9893 if (unlikely(env
->singlestep_enabled
)) {
9894 /* Make sure the pc is updated, and raise a debug exception. */
9896 gen_set_condexec(dc
);
9897 if (dc
->is_jmp
== DISAS_SWI
) {
9898 gen_exception(EXCP_SWI
);
9900 gen_exception(EXCP_DEBUG
);
9902 gen_set_label(dc
->condlabel
);
9904 if (dc
->condjmp
|| !dc
->is_jmp
) {
9905 gen_set_pc_im(dc
->pc
);
9908 gen_set_condexec(dc
);
9909 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
9910 gen_exception(EXCP_SWI
);
9912 /* FIXME: Single stepping a WFI insn will not halt
9914 gen_exception(EXCP_DEBUG
);
9917 /* While branches must always occur at the end of an IT block,
9918 there are a few other things that can cause us to terminate
9919 the TB in the middle of an IT block:
9920 - Exception generating instructions (bkpt, swi, undefined).
9922 - Hardware watchpoints.
9923 Hardware breakpoints have already been handled and skip this code.
9925 gen_set_condexec(dc
);
9926 switch(dc
->is_jmp
) {
9928 gen_goto_tb(dc
, 1, dc
->pc
);
9933 /* indicate that the hash table must be used to find the next TB */
9937 /* nothing more to generate */
9940 gen_helper_wfi(cpu_env
);
9943 gen_exception(EXCP_SWI
);
9947 gen_set_label(dc
->condlabel
);
9948 gen_set_condexec(dc
);
9949 gen_goto_tb(dc
, 1, dc
->pc
);
9955 gen_icount_end(tb
, num_insns
);
9956 *gen_opc_ptr
= INDEX_op_end
;
9959 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
9960 qemu_log("----------------\n");
9961 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
9962 log_target_disas(pc_start
, dc
->pc
- pc_start
,
9963 dc
->thumb
| (dc
->bswap_code
<< 1));
9968 j
= gen_opc_ptr
- gen_opc_buf
;
9971 gen_opc_instr_start
[lj
++] = 0;
9973 tb
->size
= dc
->pc
- pc_start
;
9974 tb
->icount
= num_insns
;
9978 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
9980 gen_intermediate_code_internal(env
, tb
, 0);
9983 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
9985 gen_intermediate_code_internal(env
, tb
, 1);
9988 static const char *cpu_mode_names
[16] = {
9989 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9990 "???", "???", "???", "und", "???", "???", "???", "sys"
9993 void cpu_dump_state(CPUARMState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
10000 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
10002 cpu_fprintf(f
, "\n");
10004 cpu_fprintf(f
, " ");
10006 psr
= cpsr_read(env
);
10007 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
10009 psr
& (1 << 31) ? 'N' : '-',
10010 psr
& (1 << 30) ? 'Z' : '-',
10011 psr
& (1 << 29) ? 'C' : '-',
10012 psr
& (1 << 28) ? 'V' : '-',
10013 psr
& CPSR_T
? 'T' : 'A',
10014 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10016 if (flags
& CPU_DUMP_FPU
) {
10017 int numvfpregs
= 0;
10018 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
10021 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
10024 for (i
= 0; i
< numvfpregs
; i
++) {
10025 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
10026 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
10027 i
* 2, (uint32_t)v
,
10028 i
* 2 + 1, (uint32_t)(v
>> 32),
10031 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10035 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
10037 env
->regs
[15] = gen_opc_pc
[pc_pos
];
10038 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];