4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext
{
52 /* Nonzero if this instruction has been conditionally skipped. */
54 /* The label that will be jumped to when the instruction is skipped. */
56 /* Thumb-2 condtional execution bits. */
59 struct TranslationBlock
*tb
;
60 int singlestep_enabled
;
62 #if !defined(CONFIG_USER_ONLY)
70 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
72 #if defined(CONFIG_USER_ONLY)
75 #define IS_USER(s) (s->user)
78 /* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
83 static TCGv_ptr cpu_env
;
84 /* We reuse the same 64-bit temporaries for efficiency. */
85 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
86 static TCGv_i32 cpu_R
[16];
87 static TCGv_i32 cpu_exclusive_addr
;
88 static TCGv_i32 cpu_exclusive_val
;
89 static TCGv_i32 cpu_exclusive_high
;
90 #ifdef CONFIG_USER_ONLY
91 static TCGv_i32 cpu_exclusive_test
;
92 static TCGv_i32 cpu_exclusive_info
;
95 /* FIXME: These should be removed. */
96 static TCGv cpu_F0s
, cpu_F1s
;
97 static TCGv_i64 cpu_F0d
, cpu_F1d
;
99 #include "gen-icount.h"
101 static const char *regnames
[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105 /* initialize TCG globals. */
106 void arm_translate_init(void)
110 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
112 for (i
= 0; i
< 16; i
++) {
113 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUARMState
, regs
[i
]),
117 cpu_exclusive_addr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
119 cpu_exclusive_val
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
121 cpu_exclusive_high
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
123 #ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
126 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
134 static inline TCGv
load_cpu_offset(int offset
)
136 TCGv tmp
= tcg_temp_new_i32();
137 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
141 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
143 static inline void store_cpu_offset(TCGv var
, int offset
)
145 tcg_gen_st_i32(var
, cpu_env
, offset
);
146 tcg_temp_free_i32(var
);
149 #define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUARMState, name))
152 /* Set a variable to the value of a CPU register. */
153 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
157 /* normaly, since we updated PC, we need only to add one insn */
159 addr
= (long)s
->pc
+ 2;
161 addr
= (long)s
->pc
+ 4;
162 tcg_gen_movi_i32(var
, addr
);
164 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
168 /* Create a new temporary and set it to the value of a CPU register. */
169 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
171 TCGv tmp
= tcg_temp_new_i32();
172 load_reg_var(s
, tmp
, reg
);
176 /* Set a CPU register. The source must be a temporary and will be
178 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
181 tcg_gen_andi_i32(var
, var
, ~1);
182 s
->is_jmp
= DISAS_JUMP
;
184 tcg_gen_mov_i32(cpu_R
[reg
], var
);
185 tcg_temp_free_i32(var
);
188 /* Value extensions. */
189 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
191 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
198 static inline void gen_set_cpsr(TCGv var
, uint32_t mask
)
200 TCGv tmp_mask
= tcg_const_i32(mask
);
201 gen_helper_cpsr_write(var
, tmp_mask
);
202 tcg_temp_free_i32(tmp_mask
);
204 /* Set NZCV flags from the high 4 bits of var. */
205 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207 static void gen_exception(int excp
)
209 TCGv tmp
= tcg_temp_new_i32();
210 tcg_gen_movi_i32(tmp
, excp
);
211 gen_helper_exception(tmp
);
212 tcg_temp_free_i32(tmp
);
215 static void gen_smul_dual(TCGv a
, TCGv b
)
217 TCGv tmp1
= tcg_temp_new_i32();
218 TCGv tmp2
= tcg_temp_new_i32();
219 tcg_gen_ext16s_i32(tmp1
, a
);
220 tcg_gen_ext16s_i32(tmp2
, b
);
221 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
222 tcg_temp_free_i32(tmp2
);
223 tcg_gen_sari_i32(a
, a
, 16);
224 tcg_gen_sari_i32(b
, b
, 16);
225 tcg_gen_mul_i32(b
, b
, a
);
226 tcg_gen_mov_i32(a
, tmp1
);
227 tcg_temp_free_i32(tmp1
);
230 /* Byteswap each halfword. */
231 static void gen_rev16(TCGv var
)
233 TCGv tmp
= tcg_temp_new_i32();
234 tcg_gen_shri_i32(tmp
, var
, 8);
235 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
236 tcg_gen_shli_i32(var
, var
, 8);
237 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
238 tcg_gen_or_i32(var
, var
, tmp
);
239 tcg_temp_free_i32(tmp
);
242 /* Byteswap low halfword and sign extend. */
243 static void gen_revsh(TCGv var
)
245 tcg_gen_ext16u_i32(var
, var
);
246 tcg_gen_bswap16_i32(var
, var
);
247 tcg_gen_ext16s_i32(var
, var
);
250 /* Unsigned bitfield extract. */
251 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
254 tcg_gen_shri_i32(var
, var
, shift
);
255 tcg_gen_andi_i32(var
, var
, mask
);
258 /* Signed bitfield extract. */
259 static void gen_sbfx(TCGv var
, int shift
, int width
)
264 tcg_gen_sari_i32(var
, var
, shift
);
265 if (shift
+ width
< 32) {
266 signbit
= 1u << (width
- 1);
267 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
268 tcg_gen_xori_i32(var
, var
, signbit
);
269 tcg_gen_subi_i32(var
, var
, signbit
);
273 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
274 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
276 tcg_gen_andi_i32(val
, val
, mask
);
277 tcg_gen_shli_i32(val
, val
, shift
);
278 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
279 tcg_gen_or_i32(dest
, base
, val
);
282 /* Return (b << 32) + a. Mark inputs as dead */
283 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv b
)
285 TCGv_i64 tmp64
= tcg_temp_new_i64();
287 tcg_gen_extu_i32_i64(tmp64
, b
);
288 tcg_temp_free_i32(b
);
289 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
290 tcg_gen_add_i64(a
, tmp64
, a
);
292 tcg_temp_free_i64(tmp64
);
296 /* Return (b << 32) - a. Mark inputs as dead. */
297 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv b
)
299 TCGv_i64 tmp64
= tcg_temp_new_i64();
301 tcg_gen_extu_i32_i64(tmp64
, b
);
302 tcg_temp_free_i32(b
);
303 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
304 tcg_gen_sub_i64(a
, tmp64
, a
);
306 tcg_temp_free_i64(tmp64
);
310 /* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
312 /* 32x32->64 multiply. Marks inputs as dead. */
313 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
315 TCGv_i64 tmp1
= tcg_temp_new_i64();
316 TCGv_i64 tmp2
= tcg_temp_new_i64();
318 tcg_gen_extu_i32_i64(tmp1
, a
);
319 tcg_temp_free_i32(a
);
320 tcg_gen_extu_i32_i64(tmp2
, b
);
321 tcg_temp_free_i32(b
);
322 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
323 tcg_temp_free_i64(tmp2
);
327 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
329 TCGv_i64 tmp1
= tcg_temp_new_i64();
330 TCGv_i64 tmp2
= tcg_temp_new_i64();
332 tcg_gen_ext_i32_i64(tmp1
, a
);
333 tcg_temp_free_i32(a
);
334 tcg_gen_ext_i32_i64(tmp2
, b
);
335 tcg_temp_free_i32(b
);
336 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
337 tcg_temp_free_i64(tmp2
);
341 /* Swap low and high halfwords. */
342 static void gen_swap_half(TCGv var
)
344 TCGv tmp
= tcg_temp_new_i32();
345 tcg_gen_shri_i32(tmp
, var
, 16);
346 tcg_gen_shli_i32(var
, var
, 16);
347 tcg_gen_or_i32(var
, var
, tmp
);
348 tcg_temp_free_i32(tmp
);
351 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
355 t0 = (t0 + t1) ^ tmp;
358 static void gen_add16(TCGv t0
, TCGv t1
)
360 TCGv tmp
= tcg_temp_new_i32();
361 tcg_gen_xor_i32(tmp
, t0
, t1
);
362 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
363 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
364 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
365 tcg_gen_add_i32(t0
, t0
, t1
);
366 tcg_gen_xor_i32(t0
, t0
, tmp
);
367 tcg_temp_free_i32(tmp
);
368 tcg_temp_free_i32(t1
);
371 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
373 /* Set CF to the top bit of var. */
374 static void gen_set_CF_bit31(TCGv var
)
376 TCGv tmp
= tcg_temp_new_i32();
377 tcg_gen_shri_i32(tmp
, var
, 31);
379 tcg_temp_free_i32(tmp
);
382 /* Set N and Z flags from var. */
383 static inline void gen_logic_CC(TCGv var
)
385 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, NF
));
386 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, ZF
));
390 static void gen_adc(TCGv t0
, TCGv t1
)
393 tcg_gen_add_i32(t0
, t0
, t1
);
394 tmp
= load_cpu_field(CF
);
395 tcg_gen_add_i32(t0
, t0
, tmp
);
396 tcg_temp_free_i32(tmp
);
399 /* dest = T0 + T1 + CF. */
400 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
403 tcg_gen_add_i32(dest
, t0
, t1
);
404 tmp
= load_cpu_field(CF
);
405 tcg_gen_add_i32(dest
, dest
, tmp
);
406 tcg_temp_free_i32(tmp
);
409 /* dest = T0 - T1 + CF - 1. */
410 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
413 tcg_gen_sub_i32(dest
, t0
, t1
);
414 tmp
= load_cpu_field(CF
);
415 tcg_gen_add_i32(dest
, dest
, tmp
);
416 tcg_gen_subi_i32(dest
, dest
, 1);
417 tcg_temp_free_i32(tmp
);
420 /* FIXME: Implement this natively. */
421 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423 static void shifter_out_im(TCGv var
, int shift
)
425 TCGv tmp
= tcg_temp_new_i32();
427 tcg_gen_andi_i32(tmp
, var
, 1);
429 tcg_gen_shri_i32(tmp
, var
, shift
);
431 tcg_gen_andi_i32(tmp
, tmp
, 1);
434 tcg_temp_free_i32(tmp
);
437 /* Shift by immediate. Includes special handling for shift == 0. */
438 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
444 shifter_out_im(var
, 32 - shift
);
445 tcg_gen_shli_i32(var
, var
, shift
);
451 tcg_gen_shri_i32(var
, var
, 31);
454 tcg_gen_movi_i32(var
, 0);
457 shifter_out_im(var
, shift
- 1);
458 tcg_gen_shri_i32(var
, var
, shift
);
465 shifter_out_im(var
, shift
- 1);
468 tcg_gen_sari_i32(var
, var
, shift
);
470 case 3: /* ROR/RRX */
473 shifter_out_im(var
, shift
- 1);
474 tcg_gen_rotri_i32(var
, var
, shift
); break;
476 TCGv tmp
= load_cpu_field(CF
);
478 shifter_out_im(var
, 0);
479 tcg_gen_shri_i32(var
, var
, 1);
480 tcg_gen_shli_i32(tmp
, tmp
, 31);
481 tcg_gen_or_i32(var
, var
, tmp
);
482 tcg_temp_free_i32(tmp
);
487 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
488 TCGv shift
, int flags
)
492 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
493 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
494 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
495 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
499 case 0: gen_helper_shl(var
, var
, shift
); break;
500 case 1: gen_helper_shr(var
, var
, shift
); break;
501 case 2: gen_helper_sar(var
, var
, shift
); break;
502 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
503 tcg_gen_rotr_i32(var
, var
, shift
); break;
506 tcg_temp_free_i32(shift
);
509 #define PAS_OP(pfx) \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
523 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 tmp
= tcg_temp_new_ptr();
526 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
528 tcg_temp_free_ptr(tmp
);
531 tmp
= tcg_temp_new_ptr();
532 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
534 tcg_temp_free_ptr(tmp
);
536 #undef gen_pas_helper
537 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
550 #undef gen_pas_helper
555 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556 #define PAS_OP(pfx) \
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
570 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 tmp
= tcg_temp_new_ptr();
573 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
575 tcg_temp_free_ptr(tmp
);
578 tmp
= tcg_temp_new_ptr();
579 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
581 tcg_temp_free_ptr(tmp
);
583 #undef gen_pas_helper
584 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
597 #undef gen_pas_helper
602 static void gen_test_cc(int cc
, int label
)
610 tmp
= load_cpu_field(ZF
);
611 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
614 tmp
= load_cpu_field(ZF
);
615 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
618 tmp
= load_cpu_field(CF
);
619 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
622 tmp
= load_cpu_field(CF
);
623 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
626 tmp
= load_cpu_field(NF
);
627 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
630 tmp
= load_cpu_field(NF
);
631 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
634 tmp
= load_cpu_field(VF
);
635 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
638 tmp
= load_cpu_field(VF
);
639 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
641 case 8: /* hi: C && !Z */
642 inv
= gen_new_label();
643 tmp
= load_cpu_field(CF
);
644 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
645 tcg_temp_free_i32(tmp
);
646 tmp
= load_cpu_field(ZF
);
647 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
650 case 9: /* ls: !C || Z */
651 tmp
= load_cpu_field(CF
);
652 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
653 tcg_temp_free_i32(tmp
);
654 tmp
= load_cpu_field(ZF
);
655 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp
= load_cpu_field(VF
);
659 tmp2
= load_cpu_field(NF
);
660 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
661 tcg_temp_free_i32(tmp2
);
662 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp
= load_cpu_field(VF
);
666 tmp2
= load_cpu_field(NF
);
667 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
668 tcg_temp_free_i32(tmp2
);
669 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
671 case 12: /* gt: !Z && N == V */
672 inv
= gen_new_label();
673 tmp
= load_cpu_field(ZF
);
674 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
675 tcg_temp_free_i32(tmp
);
676 tmp
= load_cpu_field(VF
);
677 tmp2
= load_cpu_field(NF
);
678 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
679 tcg_temp_free_i32(tmp2
);
680 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
683 case 13: /* le: Z || N != V */
684 tmp
= load_cpu_field(ZF
);
685 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
686 tcg_temp_free_i32(tmp
);
687 tmp
= load_cpu_field(VF
);
688 tmp2
= load_cpu_field(NF
);
689 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
690 tcg_temp_free_i32(tmp2
);
691 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
694 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
697 tcg_temp_free_i32(tmp
);
700 static const uint8_t table_logic_cc
[16] = {
719 /* Set PC and Thumb state from an immediate address. */
720 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
724 s
->is_jmp
= DISAS_UPDATE
;
725 if (s
->thumb
!= (addr
& 1)) {
726 tmp
= tcg_temp_new_i32();
727 tcg_gen_movi_i32(tmp
, addr
& 1);
728 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
729 tcg_temp_free_i32(tmp
);
731 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
734 /* Set PC and Thumb state from var. var is marked as dead. */
735 static inline void gen_bx(DisasContext
*s
, TCGv var
)
737 s
->is_jmp
= DISAS_UPDATE
;
738 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
739 tcg_gen_andi_i32(var
, var
, 1);
740 store_cpu_field(var
, thumb
);
743 /* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
749 if (reg
== 15 && ENABLE_ARCH_7
) {
752 store_reg(s
, reg
, var
);
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
763 if (reg
== 15 && ENABLE_ARCH_5
) {
766 store_reg(s
, reg
, var
);
770 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
772 TCGv tmp
= tcg_temp_new_i32();
773 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
776 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
778 TCGv tmp
= tcg_temp_new_i32();
779 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
782 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
784 TCGv tmp
= tcg_temp_new_i32();
785 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
788 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
790 TCGv tmp
= tcg_temp_new_i32();
791 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
794 static inline TCGv
gen_ld32(TCGv addr
, int index
)
796 TCGv tmp
= tcg_temp_new_i32();
797 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
800 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
802 TCGv_i64 tmp
= tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp
, addr
, index
);
806 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
808 tcg_gen_qemu_st8(val
, addr
, index
);
809 tcg_temp_free_i32(val
);
811 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
813 tcg_gen_qemu_st16(val
, addr
, index
);
814 tcg_temp_free_i32(val
);
816 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
818 tcg_gen_qemu_st32(val
, addr
, index
);
819 tcg_temp_free_i32(val
);
821 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
823 tcg_gen_qemu_st64(val
, addr
, index
);
824 tcg_temp_free_i64(val
);
827 static inline void gen_set_pc_im(uint32_t val
)
829 tcg_gen_movi_i32(cpu_R
[15], val
);
832 /* Force a TB lookup after an instruction that changes the CPU state. */
833 static inline void gen_lookup_tb(DisasContext
*s
)
835 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
836 s
->is_jmp
= DISAS_UPDATE
;
839 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
842 int val
, rm
, shift
, shiftop
;
845 if (!(insn
& (1 << 25))) {
848 if (!(insn
& (1 << 23)))
851 tcg_gen_addi_i32(var
, var
, val
);
855 shift
= (insn
>> 7) & 0x1f;
856 shiftop
= (insn
>> 5) & 3;
857 offset
= load_reg(s
, rm
);
858 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
859 if (!(insn
& (1 << 23)))
860 tcg_gen_sub_i32(var
, var
, offset
);
862 tcg_gen_add_i32(var
, var
, offset
);
863 tcg_temp_free_i32(offset
);
867 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
873 if (insn
& (1 << 22)) {
875 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
876 if (!(insn
& (1 << 23)))
880 tcg_gen_addi_i32(var
, var
, val
);
884 tcg_gen_addi_i32(var
, var
, extra
);
886 offset
= load_reg(s
, rm
);
887 if (!(insn
& (1 << 23)))
888 tcg_gen_sub_i32(var
, var
, offset
);
890 tcg_gen_add_i32(var
, var
, offset
);
891 tcg_temp_free_i32(offset
);
895 static TCGv_ptr
get_fpstatus_ptr(int neon
)
897 TCGv_ptr statusptr
= tcg_temp_new_ptr();
900 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
902 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
904 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
908 #define VFP_OP2(name) \
909 static inline void gen_vfp_##name(int dp) \
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
917 tcg_temp_free_ptr(fpst); \
927 static inline void gen_vfp_F1_mul(int dp
)
929 /* Like gen_vfp_mul() but put result in F1 */
930 TCGv_ptr fpst
= get_fpstatus_ptr(0);
932 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
934 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
936 tcg_temp_free_ptr(fpst
);
939 static inline void gen_vfp_F1_neg(int dp
)
941 /* Like gen_vfp_neg() but put result in F1 */
943 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
945 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
949 static inline void gen_vfp_abs(int dp
)
952 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
954 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
957 static inline void gen_vfp_neg(int dp
)
960 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
962 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
965 static inline void gen_vfp_sqrt(int dp
)
968 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
970 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
973 static inline void gen_vfp_cmp(int dp
)
976 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
978 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
981 static inline void gen_vfp_cmpe(int dp
)
984 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
986 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
989 static inline void gen_vfp_F1_ld0(int dp
)
992 tcg_gen_movi_i64(cpu_F1d
, 0);
994 tcg_gen_movi_i32(cpu_F1s
, 0);
997 #define VFP_GEN_ITOF(name) \
998 static inline void gen_vfp_##name(int dp, int neon) \
1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1006 tcg_temp_free_ptr(statusptr); \
1013 #define VFP_GEN_FTOI(name) \
1014 static inline void gen_vfp_##name(int dp, int neon) \
1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 tcg_temp_free_ptr(statusptr); \
1031 #define VFP_GEN_FIX(name) \
1032 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1034 TCGv tmp_shift = tcg_const_i32(shift); \
1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1041 tcg_temp_free_i32(tmp_shift); \
1042 tcg_temp_free_ptr(statusptr); \
1054 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv addr
)
1057 tcg_gen_qemu_ld64(cpu_F0d
, addr
, IS_USER(s
));
1059 tcg_gen_qemu_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1062 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv addr
)
1065 tcg_gen_qemu_st64(cpu_F0d
, addr
, IS_USER(s
));
1067 tcg_gen_qemu_st32(cpu_F0s
, addr
, IS_USER(s
));
1071 vfp_reg_offset (int dp
, int reg
)
1074 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1076 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1077 + offsetof(CPU_DoubleU
, l
.upper
);
1079 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1080 + offsetof(CPU_DoubleU
, l
.lower
);
1084 /* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1087 neon_reg_offset (int reg
, int n
)
1091 return vfp_reg_offset(0, sreg
);
1094 static TCGv
neon_load_reg(int reg
, int pass
)
1096 TCGv tmp
= tcg_temp_new_i32();
1097 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1101 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1103 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1104 tcg_temp_free_i32(var
);
1107 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1109 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1112 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1114 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1117 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1118 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1119 #define tcg_gen_st_f32 tcg_gen_st_i32
1120 #define tcg_gen_st_f64 tcg_gen_st_i64
1122 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1125 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1127 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1130 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1133 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1135 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1138 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1141 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1143 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1146 #define ARM_CP_RW_BIT (1 << 20)
1148 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1150 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1153 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1155 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1158 static inline TCGv
iwmmxt_load_creg(int reg
)
1160 TCGv var
= tcg_temp_new_i32();
1161 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1165 static inline void iwmmxt_store_creg(int reg
, TCGv var
)
1167 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1168 tcg_temp_free_i32(var
);
1171 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1173 iwmmxt_store_reg(cpu_M0
, rn
);
1176 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1178 iwmmxt_load_reg(cpu_M0
, rn
);
1181 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1183 iwmmxt_load_reg(cpu_V1
, rn
);
1184 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1187 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1189 iwmmxt_load_reg(cpu_V1
, rn
);
1190 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1193 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1195 iwmmxt_load_reg(cpu_V1
, rn
);
1196 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1199 #define IWMMXT_OP(name) \
1200 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1206 #define IWMMXT_OP_ENV(name) \
1207 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1213 #define IWMMXT_OP_ENV_SIZE(name) \
1214 IWMMXT_OP_ENV(name##b) \
1215 IWMMXT_OP_ENV(name##w) \
1216 IWMMXT_OP_ENV(name##l)
1218 #define IWMMXT_OP_ENV1(name) \
1219 static inline void gen_op_iwmmxt_##name##_M0(void) \
1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1235 IWMMXT_OP_ENV_SIZE(unpackl
)
1236 IWMMXT_OP_ENV_SIZE(unpackh
)
1238 IWMMXT_OP_ENV1(unpacklub
)
1239 IWMMXT_OP_ENV1(unpackluw
)
1240 IWMMXT_OP_ENV1(unpacklul
)
1241 IWMMXT_OP_ENV1(unpackhub
)
1242 IWMMXT_OP_ENV1(unpackhuw
)
1243 IWMMXT_OP_ENV1(unpackhul
)
1244 IWMMXT_OP_ENV1(unpacklsb
)
1245 IWMMXT_OP_ENV1(unpacklsw
)
1246 IWMMXT_OP_ENV1(unpacklsl
)
1247 IWMMXT_OP_ENV1(unpackhsb
)
1248 IWMMXT_OP_ENV1(unpackhsw
)
1249 IWMMXT_OP_ENV1(unpackhsl
)
1251 IWMMXT_OP_ENV_SIZE(cmpeq
)
1252 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1253 IWMMXT_OP_ENV_SIZE(cmpgts
)
1255 IWMMXT_OP_ENV_SIZE(mins
)
1256 IWMMXT_OP_ENV_SIZE(minu
)
1257 IWMMXT_OP_ENV_SIZE(maxs
)
1258 IWMMXT_OP_ENV_SIZE(maxu
)
1260 IWMMXT_OP_ENV_SIZE(subn
)
1261 IWMMXT_OP_ENV_SIZE(addn
)
1262 IWMMXT_OP_ENV_SIZE(subu
)
1263 IWMMXT_OP_ENV_SIZE(addu
)
1264 IWMMXT_OP_ENV_SIZE(subs
)
1265 IWMMXT_OP_ENV_SIZE(adds
)
1267 IWMMXT_OP_ENV(avgb0
)
1268 IWMMXT_OP_ENV(avgb1
)
1269 IWMMXT_OP_ENV(avgw0
)
1270 IWMMXT_OP_ENV(avgw1
)
1274 IWMMXT_OP_ENV(packuw
)
1275 IWMMXT_OP_ENV(packul
)
1276 IWMMXT_OP_ENV(packuq
)
1277 IWMMXT_OP_ENV(packsw
)
1278 IWMMXT_OP_ENV(packsl
)
1279 IWMMXT_OP_ENV(packsq
)
1281 static void gen_op_iwmmxt_set_mup(void)
1284 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1285 tcg_gen_ori_i32(tmp
, tmp
, 2);
1286 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1289 static void gen_op_iwmmxt_set_cup(void)
1292 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1293 tcg_gen_ori_i32(tmp
, tmp
, 1);
1294 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1297 static void gen_op_iwmmxt_setpsr_nz(void)
1299 TCGv tmp
= tcg_temp_new_i32();
1300 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1301 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1304 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1306 iwmmxt_load_reg(cpu_V1
, rn
);
1307 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1308 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1311 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
, TCGv dest
)
1317 rd
= (insn
>> 16) & 0xf;
1318 tmp
= load_reg(s
, rd
);
1320 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1321 if (insn
& (1 << 24)) {
1323 if (insn
& (1 << 23))
1324 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1326 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1327 tcg_gen_mov_i32(dest
, tmp
);
1328 if (insn
& (1 << 21))
1329 store_reg(s
, rd
, tmp
);
1331 tcg_temp_free_i32(tmp
);
1332 } else if (insn
& (1 << 21)) {
1334 tcg_gen_mov_i32(dest
, tmp
);
1335 if (insn
& (1 << 23))
1336 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1338 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1339 store_reg(s
, rd
, tmp
);
1340 } else if (!(insn
& (1 << 23)))
1345 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv dest
)
1347 int rd
= (insn
>> 0) & 0xf;
1350 if (insn
& (1 << 8)) {
1351 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1354 tmp
= iwmmxt_load_creg(rd
);
1357 tmp
= tcg_temp_new_i32();
1358 iwmmxt_load_reg(cpu_V0
, rd
);
1359 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1361 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1362 tcg_gen_mov_i32(dest
, tmp
);
1363 tcg_temp_free_i32(tmp
);
1367 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1368 (ie. an undefined instruction). */
1369 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1372 int rdhi
, rdlo
, rd0
, rd1
, i
;
1374 TCGv tmp
, tmp2
, tmp3
;
1376 if ((insn
& 0x0e000e00) == 0x0c000000) {
1377 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1379 rdlo
= (insn
>> 12) & 0xf;
1380 rdhi
= (insn
>> 16) & 0xf;
1381 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1382 iwmmxt_load_reg(cpu_V0
, wrd
);
1383 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1384 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1386 } else { /* TMCRR */
1387 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1388 iwmmxt_store_reg(cpu_V0
, wrd
);
1389 gen_op_iwmmxt_set_mup();
1394 wrd
= (insn
>> 12) & 0xf;
1395 addr
= tcg_temp_new_i32();
1396 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1397 tcg_temp_free_i32(addr
);
1400 if (insn
& ARM_CP_RW_BIT
) {
1401 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1402 tmp
= tcg_temp_new_i32();
1403 tcg_gen_qemu_ld32u(tmp
, addr
, IS_USER(s
));
1404 iwmmxt_store_creg(wrd
, tmp
);
1407 if (insn
& (1 << 8)) {
1408 if (insn
& (1 << 22)) { /* WLDRD */
1409 tcg_gen_qemu_ld64(cpu_M0
, addr
, IS_USER(s
));
1411 } else { /* WLDRW wRd */
1412 tmp
= gen_ld32(addr
, IS_USER(s
));
1415 if (insn
& (1 << 22)) { /* WLDRH */
1416 tmp
= gen_ld16u(addr
, IS_USER(s
));
1417 } else { /* WLDRB */
1418 tmp
= gen_ld8u(addr
, IS_USER(s
));
1422 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1423 tcg_temp_free_i32(tmp
);
1425 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1428 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1429 tmp
= iwmmxt_load_creg(wrd
);
1430 gen_st32(tmp
, addr
, IS_USER(s
));
1432 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1433 tmp
= tcg_temp_new_i32();
1434 if (insn
& (1 << 8)) {
1435 if (insn
& (1 << 22)) { /* WSTRD */
1436 tcg_temp_free_i32(tmp
);
1437 tcg_gen_qemu_st64(cpu_M0
, addr
, IS_USER(s
));
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1440 gen_st32(tmp
, addr
, IS_USER(s
));
1443 if (insn
& (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1445 gen_st16(tmp
, addr
, IS_USER(s
));
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1448 gen_st8(tmp
, addr
, IS_USER(s
));
1453 tcg_temp_free_i32(addr
);
1457 if ((insn
& 0x0f000000) != 0x0e000000)
1460 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd
= (insn
>> 12) & 0xf;
1463 rd0
= (insn
>> 0) & 0xf;
1464 rd1
= (insn
>> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1472 case 0x011: /* TMCR */
1475 rd
= (insn
>> 12) & 0xf;
1476 wrd
= (insn
>> 16) & 0xf;
1478 case ARM_IWMMXT_wCID
:
1479 case ARM_IWMMXT_wCASF
:
1481 case ARM_IWMMXT_wCon
:
1482 gen_op_iwmmxt_set_cup();
1484 case ARM_IWMMXT_wCSSF
:
1485 tmp
= iwmmxt_load_creg(wrd
);
1486 tmp2
= load_reg(s
, rd
);
1487 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1488 tcg_temp_free_i32(tmp2
);
1489 iwmmxt_store_creg(wrd
, tmp
);
1491 case ARM_IWMMXT_wCGR0
:
1492 case ARM_IWMMXT_wCGR1
:
1493 case ARM_IWMMXT_wCGR2
:
1494 case ARM_IWMMXT_wCGR3
:
1495 gen_op_iwmmxt_set_cup();
1496 tmp
= load_reg(s
, rd
);
1497 iwmmxt_store_creg(wrd
, tmp
);
1503 case 0x100: /* WXOR */
1504 wrd
= (insn
>> 12) & 0xf;
1505 rd0
= (insn
>> 0) & 0xf;
1506 rd1
= (insn
>> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1514 case 0x111: /* TMRC */
1517 rd
= (insn
>> 12) & 0xf;
1518 wrd
= (insn
>> 16) & 0xf;
1519 tmp
= iwmmxt_load_creg(wrd
);
1520 store_reg(s
, rd
, tmp
);
1522 case 0x300: /* WANDN */
1523 wrd
= (insn
>> 12) & 0xf;
1524 rd0
= (insn
>> 0) & 0xf;
1525 rd1
= (insn
>> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1527 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1528 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1534 case 0x200: /* WAND */
1535 wrd
= (insn
>> 12) & 0xf;
1536 rd0
= (insn
>> 0) & 0xf;
1537 rd1
= (insn
>> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd
= (insn
>> 12) & 0xf;
1547 rd0
= (insn
>> 0) & 0xf;
1548 rd1
= (insn
>> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1550 if (insn
& (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1555 gen_op_iwmmxt_set_mup();
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd
= (insn
>> 12) & 0xf;
1559 rd0
= (insn
>> 16) & 0xf;
1560 rd1
= (insn
>> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1562 switch ((insn
>> 22) & 3) {
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1575 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd
= (insn
>> 12) & 0xf;
1581 rd0
= (insn
>> 16) & 0xf;
1582 rd1
= (insn
>> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1584 switch ((insn
>> 22) & 3) {
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd
= (insn
>> 12) & 0xf;
1603 rd0
= (insn
>> 16) & 0xf;
1604 rd1
= (insn
>> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1606 if (insn
& (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1610 if (!(insn
& (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1613 gen_op_iwmmxt_set_mup();
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd
= (insn
>> 12) & 0xf;
1617 rd0
= (insn
>> 16) & 0xf;
1618 rd1
= (insn
>> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1620 if (insn
& (1 << 21)) {
1621 if (insn
& (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1626 if (insn
& (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1631 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1632 gen_op_iwmmxt_set_mup();
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd
= (insn
>> 12) & 0xf;
1636 rd0
= (insn
>> 16) & 0xf;
1637 rd1
= (insn
>> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1639 if (insn
& (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1643 if (!(insn
& (1 << 20))) {
1644 iwmmxt_load_reg(cpu_V1
, wrd
);
1645 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1647 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1648 gen_op_iwmmxt_set_mup();
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd
= (insn
>> 12) & 0xf;
1652 rd0
= (insn
>> 16) & 0xf;
1653 rd1
= (insn
>> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1655 switch ((insn
>> 22) & 3) {
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd
= (insn
>> 12) & 0xf;
1674 rd0
= (insn
>> 16) & 0xf;
1675 rd1
= (insn
>> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1677 if (insn
& (1 << 22)) {
1678 if (insn
& (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1683 if (insn
& (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1688 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd
= (insn
>> 12) & 0xf;
1694 rd0
= (insn
>> 16) & 0xf;
1695 rd1
= (insn
>> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1697 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1698 tcg_gen_andi_i32(tmp
, tmp
, 7);
1699 iwmmxt_load_reg(cpu_V1
, rd1
);
1700 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1701 tcg_temp_free_i32(tmp
);
1702 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1703 gen_op_iwmmxt_set_mup();
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1706 if (((insn
>> 6) & 3) == 3)
1708 rd
= (insn
>> 12) & 0xf;
1709 wrd
= (insn
>> 16) & 0xf;
1710 tmp
= load_reg(s
, rd
);
1711 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1712 switch ((insn
>> 6) & 3) {
1714 tmp2
= tcg_const_i32(0xff);
1715 tmp3
= tcg_const_i32((insn
& 7) << 3);
1718 tmp2
= tcg_const_i32(0xffff);
1719 tmp3
= tcg_const_i32((insn
& 3) << 4);
1722 tmp2
= tcg_const_i32(0xffffffff);
1723 tmp3
= tcg_const_i32((insn
& 1) << 5);
1729 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1730 tcg_temp_free(tmp3
);
1731 tcg_temp_free(tmp2
);
1732 tcg_temp_free_i32(tmp
);
1733 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1734 gen_op_iwmmxt_set_mup();
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd
= (insn
>> 12) & 0xf;
1738 wrd
= (insn
>> 16) & 0xf;
1739 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1741 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1742 tmp
= tcg_temp_new_i32();
1743 switch ((insn
>> 22) & 3) {
1745 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1748 tcg_gen_ext8s_i32(tmp
, tmp
);
1750 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1754 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1757 tcg_gen_ext16s_i32(tmp
, tmp
);
1759 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1763 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1767 store_reg(s
, rd
, tmp
);
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1770 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1772 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1773 switch ((insn
>> 22) & 3) {
1775 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1778 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1781 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1784 tcg_gen_shli_i32(tmp
, tmp
, 28);
1786 tcg_temp_free_i32(tmp
);
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1789 if (((insn
>> 6) & 3) == 3)
1791 rd
= (insn
>> 12) & 0xf;
1792 wrd
= (insn
>> 16) & 0xf;
1793 tmp
= load_reg(s
, rd
);
1794 switch ((insn
>> 6) & 3) {
1796 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1799 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1802 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1805 tcg_temp_free_i32(tmp
);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1807 gen_op_iwmmxt_set_mup();
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1810 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1812 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1813 tmp2
= tcg_temp_new_i32();
1814 tcg_gen_mov_i32(tmp2
, tmp
);
1815 switch ((insn
>> 22) & 3) {
1817 for (i
= 0; i
< 7; i
++) {
1818 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1819 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1823 for (i
= 0; i
< 3; i
++) {
1824 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1825 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1829 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1830 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1834 tcg_temp_free_i32(tmp2
);
1835 tcg_temp_free_i32(tmp
);
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd
= (insn
>> 12) & 0xf;
1839 rd0
= (insn
>> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1841 switch ((insn
>> 22) & 3) {
1843 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1846 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1849 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1854 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1855 gen_op_iwmmxt_set_mup();
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1858 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1860 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1861 tmp2
= tcg_temp_new_i32();
1862 tcg_gen_mov_i32(tmp2
, tmp
);
1863 switch ((insn
>> 22) & 3) {
1865 for (i
= 0; i
< 7; i
++) {
1866 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1867 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1871 for (i
= 0; i
< 3; i
++) {
1872 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1873 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1877 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1878 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1882 tcg_temp_free_i32(tmp2
);
1883 tcg_temp_free_i32(tmp
);
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd
= (insn
>> 12) & 0xf;
1887 rd0
= (insn
>> 16) & 0xf;
1888 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1890 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1891 tmp
= tcg_temp_new_i32();
1892 switch ((insn
>> 22) & 3) {
1894 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1897 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1900 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1903 store_reg(s
, rd
, tmp
);
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd
= (insn
>> 12) & 0xf;
1908 rd0
= (insn
>> 16) & 0xf;
1909 rd1
= (insn
>> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1911 switch ((insn
>> 22) & 3) {
1913 if (insn
& (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1919 if (insn
& (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1925 if (insn
& (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1933 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd
= (insn
>> 12) & 0xf;
1940 rd0
= (insn
>> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1942 switch ((insn
>> 22) & 3) {
1944 if (insn
& (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1947 gen_op_iwmmxt_unpacklub_M0();
1950 if (insn
& (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1953 gen_op_iwmmxt_unpackluw_M0();
1956 if (insn
& (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1959 gen_op_iwmmxt_unpacklul_M0();
1964 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd
= (insn
>> 12) & 0xf;
1971 rd0
= (insn
>> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1973 switch ((insn
>> 22) & 3) {
1975 if (insn
& (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1978 gen_op_iwmmxt_unpackhub_M0();
1981 if (insn
& (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1984 gen_op_iwmmxt_unpackhuw_M0();
1987 if (insn
& (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1990 gen_op_iwmmxt_unpackhul_M0();
1995 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
2001 if (((insn
>> 22) & 3) == 0)
2003 wrd
= (insn
>> 12) & 0xf;
2004 rd0
= (insn
>> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2006 tmp
= tcg_temp_new_i32();
2007 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2008 tcg_temp_free_i32(tmp
);
2011 switch ((insn
>> 22) & 3) {
2013 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2016 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2019 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2022 tcg_temp_free_i32(tmp
);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
2029 if (((insn
>> 22) & 3) == 0)
2031 wrd
= (insn
>> 12) & 0xf;
2032 rd0
= (insn
>> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2034 tmp
= tcg_temp_new_i32();
2035 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2036 tcg_temp_free_i32(tmp
);
2039 switch ((insn
>> 22) & 3) {
2041 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2044 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2047 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2050 tcg_temp_free_i32(tmp
);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
2057 if (((insn
>> 22) & 3) == 0)
2059 wrd
= (insn
>> 12) & 0xf;
2060 rd0
= (insn
>> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2062 tmp
= tcg_temp_new_i32();
2063 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2064 tcg_temp_free_i32(tmp
);
2067 switch ((insn
>> 22) & 3) {
2069 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2072 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2075 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2078 tcg_temp_free_i32(tmp
);
2079 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
2085 if (((insn
>> 22) & 3) == 0)
2087 wrd
= (insn
>> 12) & 0xf;
2088 rd0
= (insn
>> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2090 tmp
= tcg_temp_new_i32();
2091 switch ((insn
>> 22) & 3) {
2093 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2094 tcg_temp_free_i32(tmp
);
2097 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2100 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2101 tcg_temp_free_i32(tmp
);
2104 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2107 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2108 tcg_temp_free_i32(tmp
);
2111 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2114 tcg_temp_free_i32(tmp
);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd
= (insn
>> 12) & 0xf;
2122 rd0
= (insn
>> 16) & 0xf;
2123 rd1
= (insn
>> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2125 switch ((insn
>> 22) & 3) {
2127 if (insn
& (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2130 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2133 if (insn
& (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2139 if (insn
& (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2142 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2148 gen_op_iwmmxt_set_mup();
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd
= (insn
>> 12) & 0xf;
2153 rd0
= (insn
>> 16) & 0xf;
2154 rd1
= (insn
>> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2156 switch ((insn
>> 22) & 3) {
2158 if (insn
& (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2164 if (insn
& (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2170 if (insn
& (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2178 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2179 gen_op_iwmmxt_set_mup();
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd
= (insn
>> 12) & 0xf;
2184 rd0
= (insn
>> 16) & 0xf;
2185 rd1
= (insn
>> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2187 tmp
= tcg_const_i32((insn
>> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1
, rd1
);
2189 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2191 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2192 gen_op_iwmmxt_set_mup();
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd
= (insn
>> 12) & 0xf;
2199 rd0
= (insn
>> 16) & 0xf;
2200 rd1
= (insn
>> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2202 switch ((insn
>> 20) & 0xf) {
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2207 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2225 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2233 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd
= (insn
>> 12) & 0xf;
2242 rd0
= (insn
>> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2244 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2245 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2247 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd
= (insn
>> 12) & 0xf;
2256 rd0
= (insn
>> 16) & 0xf;
2257 rd1
= (insn
>> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2259 switch ((insn
>> 20) & 0xf) {
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2264 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2282 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2290 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2298 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2300 wrd
= (insn
>> 12) & 0xf;
2301 rd0
= (insn
>> 16) & 0xf;
2302 rd1
= (insn
>> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2304 switch ((insn
>> 22) & 3) {
2306 if (insn
& (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2312 if (insn
& (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2315 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2318 if (insn
& (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2324 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd
= (insn
>> 5) & 0xf;
2333 rd0
= (insn
>> 12) & 0xf;
2334 rd1
= (insn
>> 0) & 0xf;
2335 if (rd0
== 0xf || rd1
== 0xf)
2337 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2338 tmp
= load_reg(s
, rd0
);
2339 tmp2
= load_reg(s
, rd1
);
2340 switch ((insn
>> 16) & 0xf) {
2341 case 0x0: /* TMIA */
2342 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2344 case 0x8: /* TMIAPH */
2345 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2348 if (insn
& (1 << 16))
2349 tcg_gen_shri_i32(tmp
, tmp
, 16);
2350 if (insn
& (1 << 17))
2351 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2355 tcg_temp_free_i32(tmp2
);
2356 tcg_temp_free_i32(tmp
);
2359 tcg_temp_free_i32(tmp2
);
2360 tcg_temp_free_i32(tmp
);
2361 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2362 gen_op_iwmmxt_set_mup();
2371 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2372 (ie. an undefined instruction). */
2373 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2375 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2378 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0
= (insn
>> 12) & 0xf;
2382 acc
= (insn
>> 5) & 7;
2387 tmp
= load_reg(s
, rd0
);
2388 tmp2
= load_reg(s
, rd1
);
2389 switch ((insn
>> 16) & 0xf) {
2391 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2393 case 0x8: /* MIAPH */
2394 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
2400 if (insn
& (1 << 16))
2401 tcg_gen_shri_i32(tmp
, tmp
, 16);
2402 if (insn
& (1 << 17))
2403 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2409 tcg_temp_free_i32(tmp2
);
2410 tcg_temp_free_i32(tmp
);
2412 gen_op_iwmmxt_movq_wRn_M0(acc
);
2416 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi
= (insn
>> 16) & 0xf;
2419 rdlo
= (insn
>> 12) & 0xf;
2425 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2426 iwmmxt_load_reg(cpu_V0
, acc
);
2427 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2428 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2430 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2432 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2433 iwmmxt_store_reg(cpu_V0
, acc
);
2441 /* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443 static int disas_cp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2446 uint32_t rd
= (insn
>> 12) & 0xf;
2447 uint32_t cp
= (insn
>> 8) & 0xf;
2452 if (insn
& ARM_CP_RW_BIT
) {
2453 if (!env
->cp
[cp
].cp_read
)
2455 gen_set_pc_im(s
->pc
);
2456 tmp
= tcg_temp_new_i32();
2457 tmp2
= tcg_const_i32(insn
);
2458 gen_helper_get_cp(tmp
, cpu_env
, tmp2
);
2459 tcg_temp_free(tmp2
);
2460 store_reg(s
, rd
, tmp
);
2462 if (!env
->cp
[cp
].cp_write
)
2464 gen_set_pc_im(s
->pc
);
2465 tmp
= load_reg(s
, rd
);
2466 tmp2
= tcg_const_i32(insn
);
2467 gen_helper_set_cp(cpu_env
, tmp2
, tmp
);
2468 tcg_temp_free(tmp2
);
2469 tcg_temp_free_i32(tmp
);
2474 static int cp15_user_ok(CPUARMState
*env
, uint32_t insn
)
2476 int cpn
= (insn
>> 16) & 0xf;
2477 int cpm
= insn
& 0xf;
2478 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2480 if (arm_feature(env
, ARM_FEATURE_V7
) && cpn
== 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2486 if ((cpm
== 12 && (op
< 6)) ||
2487 (cpm
== 13 && (op
< 3))) {
2488 return env
->cp15
.c9_pmuserenr
;
2489 } else if (cpm
== 14 && op
== 0 && (insn
& ARM_CP_RW_BIT
)) {
2490 /* PMUSERENR, read only */
2496 if (cpn
== 13 && cpm
== 0) {
2498 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2504 static int cp15_tls_load_store(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
, uint32_t rd
)
2507 int cpn
= (insn
>> 16) & 0xf;
2508 int cpm
= insn
& 0xf;
2509 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2511 if (!arm_feature(env
, ARM_FEATURE_V6K
))
2514 if (!(cpn
== 13 && cpm
== 0))
2517 if (insn
& ARM_CP_RW_BIT
) {
2520 tmp
= load_cpu_field(cp15
.c13_tls1
);
2523 tmp
= load_cpu_field(cp15
.c13_tls2
);
2526 tmp
= load_cpu_field(cp15
.c13_tls3
);
2531 store_reg(s
, rd
, tmp
);
2534 tmp
= load_reg(s
, rd
);
2537 store_cpu_field(tmp
, cp15
.c13_tls1
);
2540 store_cpu_field(tmp
, cp15
.c13_tls2
);
2543 store_cpu_field(tmp
, cp15
.c13_tls3
);
2546 tcg_temp_free_i32(tmp
);
2553 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2554 instruction is not defined. */
2555 static int disas_cp15_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2560 /* M profile cores use memory mapped registers instead of cp15. */
2561 if (arm_feature(env
, ARM_FEATURE_M
))
2564 if ((insn
& (1 << 25)) == 0) {
2565 if (insn
& (1 << 20)) {
2569 /* mcrr. Used for block cache operations, so implement as no-op. */
2572 if ((insn
& (1 << 4)) == 0) {
2576 /* We special case a number of cp15 instructions which were used
2577 * for things which are real instructions in ARMv7. This allows
2578 * them to work in linux-user mode which doesn't provide functional
2579 * get_cp15/set_cp15 helpers, and is more efficient anyway.
2581 switch ((insn
& 0x0fff0fff)) {
2583 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2584 * In v7, this must NOP.
2589 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
2590 /* Wait for interrupt. */
2591 gen_set_pc_im(s
->pc
);
2592 s
->is_jmp
= DISAS_WFI
;
2596 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2597 * so this is slightly over-broad.
2599 if (!IS_USER(s
) && !arm_feature(env
, ARM_FEATURE_V6
)) {
2600 /* Wait for interrupt. */
2601 gen_set_pc_im(s
->pc
);
2602 s
->is_jmp
= DISAS_WFI
;
2605 /* Otherwise continue to handle via helper function.
2606 * In particular, on v7 and some v6 cores this is one of
2607 * the VA-PA registers.
2611 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2612 if (arm_feature(env
, ARM_FEATURE_V6
)) {
2613 return IS_USER(s
) ? 1 : 0;
2616 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2617 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2618 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2619 /* Barriers in both v6 and v7 */
2620 if (arm_feature(env
, ARM_FEATURE_V6
)) {
2628 if (IS_USER(s
) && !cp15_user_ok(env
, insn
)) {
2632 rd
= (insn
>> 12) & 0xf;
2634 if (cp15_tls_load_store(env
, s
, insn
, rd
))
2637 tmp2
= tcg_const_i32(insn
);
2638 if (insn
& ARM_CP_RW_BIT
) {
2639 tmp
= tcg_temp_new_i32();
2640 gen_helper_get_cp15(tmp
, cpu_env
, tmp2
);
2641 /* If the destination register is r15 then sets condition codes. */
2643 store_reg(s
, rd
, tmp
);
2645 tcg_temp_free_i32(tmp
);
2647 tmp
= load_reg(s
, rd
);
2648 gen_helper_set_cp15(cpu_env
, tmp2
, tmp
);
2649 tcg_temp_free_i32(tmp
);
2650 /* Normally we would always end the TB here, but Linux
2651 * arch/arm/mach-pxa/sleep.S expects two instructions following
2652 * an MMU enable to execute from cache. Imitate this behaviour. */
2653 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2654 (insn
& 0x0fff0fff) != 0x0e010f10)
2657 tcg_temp_free_i32(tmp2
);
2661 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2662 #define VFP_SREG(insn, bigbit, smallbit) \
2663 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2664 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2665 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2666 reg = (((insn) >> (bigbit)) & 0x0f) \
2667 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2669 if (insn & (1 << (smallbit))) \
2671 reg = ((insn) >> (bigbit)) & 0x0f; \
2674 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2675 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2676 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2677 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2678 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2679 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2681 /* Move between integer and VFP cores. */
2682 static TCGv
gen_vfp_mrs(void)
2684 TCGv tmp
= tcg_temp_new_i32();
2685 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2689 static void gen_vfp_msr(TCGv tmp
)
2691 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2692 tcg_temp_free_i32(tmp
);
2695 static void gen_neon_dup_u8(TCGv var
, int shift
)
2697 TCGv tmp
= tcg_temp_new_i32();
2699 tcg_gen_shri_i32(var
, var
, shift
);
2700 tcg_gen_ext8u_i32(var
, var
);
2701 tcg_gen_shli_i32(tmp
, var
, 8);
2702 tcg_gen_or_i32(var
, var
, tmp
);
2703 tcg_gen_shli_i32(tmp
, var
, 16);
2704 tcg_gen_or_i32(var
, var
, tmp
);
2705 tcg_temp_free_i32(tmp
);
2708 static void gen_neon_dup_low16(TCGv var
)
2710 TCGv tmp
= tcg_temp_new_i32();
2711 tcg_gen_ext16u_i32(var
, var
);
2712 tcg_gen_shli_i32(tmp
, var
, 16);
2713 tcg_gen_or_i32(var
, var
, tmp
);
2714 tcg_temp_free_i32(tmp
);
2717 static void gen_neon_dup_high16(TCGv var
)
2719 TCGv tmp
= tcg_temp_new_i32();
2720 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2721 tcg_gen_shri_i32(tmp
, var
, 16);
2722 tcg_gen_or_i32(var
, var
, tmp
);
2723 tcg_temp_free_i32(tmp
);
2726 static TCGv
gen_load_and_replicate(DisasContext
*s
, TCGv addr
, int size
)
2728 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2732 tmp
= gen_ld8u(addr
, IS_USER(s
));
2733 gen_neon_dup_u8(tmp
, 0);
2736 tmp
= gen_ld16u(addr
, IS_USER(s
));
2737 gen_neon_dup_low16(tmp
);
2740 tmp
= gen_ld32(addr
, IS_USER(s
));
2742 default: /* Avoid compiler warnings. */
2748 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2749 (ie. an undefined instruction). */
2750 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2752 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2758 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2761 if (!s
->vfp_enabled
) {
2762 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2763 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2765 rn
= (insn
>> 16) & 0xf;
2766 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2767 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2770 dp
= ((insn
& 0xf00) == 0xb00);
2771 switch ((insn
>> 24) & 0xf) {
2773 if (insn
& (1 << 4)) {
2774 /* single register transfer */
2775 rd
= (insn
>> 12) & 0xf;
2780 VFP_DREG_N(rn
, insn
);
2783 if (insn
& 0x00c00060
2784 && !arm_feature(env
, ARM_FEATURE_NEON
))
2787 pass
= (insn
>> 21) & 1;
2788 if (insn
& (1 << 22)) {
2790 offset
= ((insn
>> 5) & 3) * 8;
2791 } else if (insn
& (1 << 5)) {
2793 offset
= (insn
& (1 << 6)) ? 16 : 0;
2798 if (insn
& ARM_CP_RW_BIT
) {
2800 tmp
= neon_load_reg(rn
, pass
);
2804 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2805 if (insn
& (1 << 23))
2811 if (insn
& (1 << 23)) {
2813 tcg_gen_shri_i32(tmp
, tmp
, 16);
2819 tcg_gen_sari_i32(tmp
, tmp
, 16);
2828 store_reg(s
, rd
, tmp
);
2831 tmp
= load_reg(s
, rd
);
2832 if (insn
& (1 << 23)) {
2835 gen_neon_dup_u8(tmp
, 0);
2836 } else if (size
== 1) {
2837 gen_neon_dup_low16(tmp
);
2839 for (n
= 0; n
<= pass
* 2; n
++) {
2840 tmp2
= tcg_temp_new_i32();
2841 tcg_gen_mov_i32(tmp2
, tmp
);
2842 neon_store_reg(rn
, n
, tmp2
);
2844 neon_store_reg(rn
, n
, tmp
);
2849 tmp2
= neon_load_reg(rn
, pass
);
2850 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2851 tcg_temp_free_i32(tmp2
);
2854 tmp2
= neon_load_reg(rn
, pass
);
2855 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2856 tcg_temp_free_i32(tmp2
);
2861 neon_store_reg(rn
, pass
, tmp
);
2865 if ((insn
& 0x6f) != 0x00)
2867 rn
= VFP_SREG_N(insn
);
2868 if (insn
& ARM_CP_RW_BIT
) {
2870 if (insn
& (1 << 21)) {
2871 /* system register */
2876 /* VFP2 allows access to FSID from userspace.
2877 VFP3 restricts all id registers to privileged
2880 && arm_feature(env
, ARM_FEATURE_VFP3
))
2882 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2887 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2889 case ARM_VFP_FPINST
:
2890 case ARM_VFP_FPINST2
:
2891 /* Not present in VFP3. */
2893 || arm_feature(env
, ARM_FEATURE_VFP3
))
2895 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2899 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2900 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2902 tmp
= tcg_temp_new_i32();
2903 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2909 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2911 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2917 gen_mov_F0_vreg(0, rn
);
2918 tmp
= gen_vfp_mrs();
2921 /* Set the 4 flag bits in the CPSR. */
2923 tcg_temp_free_i32(tmp
);
2925 store_reg(s
, rd
, tmp
);
2929 tmp
= load_reg(s
, rd
);
2930 if (insn
& (1 << 21)) {
2932 /* system register */
2937 /* Writes are ignored. */
2940 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2941 tcg_temp_free_i32(tmp
);
2947 /* TODO: VFP subarchitecture support.
2948 * For now, keep the EN bit only */
2949 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
2950 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2953 case ARM_VFP_FPINST
:
2954 case ARM_VFP_FPINST2
:
2955 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2962 gen_mov_vreg_F0(0, rn
);
2967 /* data processing */
2968 /* The opcode is in bits 23, 21, 20 and 6. */
2969 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2973 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2975 /* rn is register number */
2976 VFP_DREG_N(rn
, insn
);
2979 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18))) {
2980 /* Integer or single precision destination. */
2981 rd
= VFP_SREG_D(insn
);
2983 VFP_DREG_D(rd
, insn
);
2986 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14))) {
2987 /* VCVT from int is always from S reg regardless of dp bit.
2988 * VCVT with immediate frac_bits has same format as SREG_M
2990 rm
= VFP_SREG_M(insn
);
2992 VFP_DREG_M(rm
, insn
);
2995 rn
= VFP_SREG_N(insn
);
2996 if (op
== 15 && rn
== 15) {
2997 /* Double precision destination. */
2998 VFP_DREG_D(rd
, insn
);
3000 rd
= VFP_SREG_D(insn
);
3002 /* NB that we implicitly rely on the encoding for the frac_bits
3003 * in VCVT of fixed to float being the same as that of an SREG_M
3005 rm
= VFP_SREG_M(insn
);
3008 veclen
= s
->vec_len
;
3009 if (op
== 15 && rn
> 3)
3012 /* Shut up compiler warnings. */
3023 /* Figure out what type of vector operation this is. */
3024 if ((rd
& bank_mask
) == 0) {
3029 delta_d
= (s
->vec_stride
>> 1) + 1;
3031 delta_d
= s
->vec_stride
+ 1;
3033 if ((rm
& bank_mask
) == 0) {
3034 /* mixed scalar/vector */
3043 /* Load the initial operands. */
3048 /* Integer source */
3049 gen_mov_F0_vreg(0, rm
);
3054 gen_mov_F0_vreg(dp
, rd
);
3055 gen_mov_F1_vreg(dp
, rm
);
3059 /* Compare with zero */
3060 gen_mov_F0_vreg(dp
, rd
);
3071 /* Source and destination the same. */
3072 gen_mov_F0_vreg(dp
, rd
);
3078 /* VCVTB, VCVTT: only present with the halfprec extension,
3079 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3081 if (dp
|| !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
3084 /* Otherwise fall through */
3086 /* One source operand. */
3087 gen_mov_F0_vreg(dp
, rm
);
3091 /* Two source operands. */
3092 gen_mov_F0_vreg(dp
, rn
);
3093 gen_mov_F1_vreg(dp
, rm
);
3097 /* Perform the calculation. */
3099 case 0: /* VMLA: fd + (fn * fm) */
3100 /* Note that order of inputs to the add matters for NaNs */
3102 gen_mov_F0_vreg(dp
, rd
);
3105 case 1: /* VMLS: fd + -(fn * fm) */
3108 gen_mov_F0_vreg(dp
, rd
);
3111 case 2: /* VNMLS: -fd + (fn * fm) */
3112 /* Note that it isn't valid to replace (-A + B) with (B - A)
3113 * or similar plausible looking simplifications
3114 * because this will give wrong results for NaNs.
3117 gen_mov_F0_vreg(dp
, rd
);
3121 case 3: /* VNMLA: -fd + -(fn * fm) */
3124 gen_mov_F0_vreg(dp
, rd
);
3128 case 4: /* mul: fn * fm */
3131 case 5: /* nmul: -(fn * fm) */
3135 case 6: /* add: fn + fm */
3138 case 7: /* sub: fn - fm */
3141 case 8: /* div: fn / fm */
3144 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3145 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3146 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3147 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3148 /* These are fused multiply-add, and must be done as one
3149 * floating point operation with no rounding between the
3150 * multiplication and addition steps.
3151 * NB that doing the negations here as separate steps is
3152 * correct : an input NaN should come out with its sign bit
3153 * flipped if it is a negated-input.
3155 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
3163 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3165 frd
= tcg_temp_new_i64();
3166 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3169 gen_helper_vfp_negd(frd
, frd
);
3171 fpst
= get_fpstatus_ptr(0);
3172 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3173 cpu_F1d
, frd
, fpst
);
3174 tcg_temp_free_ptr(fpst
);
3175 tcg_temp_free_i64(frd
);
3181 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3183 frd
= tcg_temp_new_i32();
3184 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3186 gen_helper_vfp_negs(frd
, frd
);
3188 fpst
= get_fpstatus_ptr(0);
3189 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3190 cpu_F1s
, frd
, fpst
);
3191 tcg_temp_free_ptr(fpst
);
3192 tcg_temp_free_i32(frd
);
3195 case 14: /* fconst */
3196 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3199 n
= (insn
<< 12) & 0x80000000;
3200 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3207 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3214 tcg_gen_movi_i32(cpu_F0s
, n
);
3217 case 15: /* extension space */
3231 case 4: /* vcvtb.f32.f16 */
3232 tmp
= gen_vfp_mrs();
3233 tcg_gen_ext16u_i32(tmp
, tmp
);
3234 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3235 tcg_temp_free_i32(tmp
);
3237 case 5: /* vcvtt.f32.f16 */
3238 tmp
= gen_vfp_mrs();
3239 tcg_gen_shri_i32(tmp
, tmp
, 16);
3240 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
, cpu_env
);
3241 tcg_temp_free_i32(tmp
);
3243 case 6: /* vcvtb.f16.f32 */
3244 tmp
= tcg_temp_new_i32();
3245 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3246 gen_mov_F0_vreg(0, rd
);
3247 tmp2
= gen_vfp_mrs();
3248 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3249 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3250 tcg_temp_free_i32(tmp2
);
3253 case 7: /* vcvtt.f16.f32 */
3254 tmp
= tcg_temp_new_i32();
3255 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
3256 tcg_gen_shli_i32(tmp
, tmp
, 16);
3257 gen_mov_F0_vreg(0, rd
);
3258 tmp2
= gen_vfp_mrs();
3259 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3260 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3261 tcg_temp_free_i32(tmp2
);
3273 case 11: /* cmpez */
3277 case 15: /* single<->double conversion */
3279 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3281 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3283 case 16: /* fuito */
3284 gen_vfp_uito(dp
, 0);
3286 case 17: /* fsito */
3287 gen_vfp_sito(dp
, 0);
3289 case 20: /* fshto */
3290 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3292 gen_vfp_shto(dp
, 16 - rm
, 0);
3294 case 21: /* fslto */
3295 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3297 gen_vfp_slto(dp
, 32 - rm
, 0);
3299 case 22: /* fuhto */
3300 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3302 gen_vfp_uhto(dp
, 16 - rm
, 0);
3304 case 23: /* fulto */
3305 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3307 gen_vfp_ulto(dp
, 32 - rm
, 0);
3309 case 24: /* ftoui */
3310 gen_vfp_toui(dp
, 0);
3312 case 25: /* ftouiz */
3313 gen_vfp_touiz(dp
, 0);
3315 case 26: /* ftosi */
3316 gen_vfp_tosi(dp
, 0);
3318 case 27: /* ftosiz */
3319 gen_vfp_tosiz(dp
, 0);
3321 case 28: /* ftosh */
3322 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3324 gen_vfp_tosh(dp
, 16 - rm
, 0);
3326 case 29: /* ftosl */
3327 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3329 gen_vfp_tosl(dp
, 32 - rm
, 0);
3331 case 30: /* ftouh */
3332 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3334 gen_vfp_touh(dp
, 16 - rm
, 0);
3336 case 31: /* ftoul */
3337 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3339 gen_vfp_toul(dp
, 32 - rm
, 0);
3341 default: /* undefined */
3345 default: /* undefined */
3349 /* Write back the result. */
3350 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3351 ; /* Comparison, do nothing. */
3352 else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18))
3353 /* VCVT double to int: always integer result. */
3354 gen_mov_vreg_F0(0, rd
);
3355 else if (op
== 15 && rn
== 15)
3357 gen_mov_vreg_F0(!dp
, rd
);
3359 gen_mov_vreg_F0(dp
, rd
);
3361 /* break out of the loop if we have finished */
3365 if (op
== 15 && delta_m
== 0) {
3366 /* single source one-many */
3368 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3370 gen_mov_vreg_F0(dp
, rd
);
3374 /* Setup the next operands. */
3376 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3380 /* One source operand. */
3381 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3383 gen_mov_F0_vreg(dp
, rm
);
3385 /* Two source operands. */
3386 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3388 gen_mov_F0_vreg(dp
, rn
);
3390 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3392 gen_mov_F1_vreg(dp
, rm
);
3400 if ((insn
& 0x03e00000) == 0x00400000) {
3401 /* two-register transfer */
3402 rn
= (insn
>> 16) & 0xf;
3403 rd
= (insn
>> 12) & 0xf;
3405 VFP_DREG_M(rm
, insn
);
3407 rm
= VFP_SREG_M(insn
);
3410 if (insn
& ARM_CP_RW_BIT
) {
3413 gen_mov_F0_vreg(0, rm
* 2);
3414 tmp
= gen_vfp_mrs();
3415 store_reg(s
, rd
, tmp
);
3416 gen_mov_F0_vreg(0, rm
* 2 + 1);
3417 tmp
= gen_vfp_mrs();
3418 store_reg(s
, rn
, tmp
);
3420 gen_mov_F0_vreg(0, rm
);
3421 tmp
= gen_vfp_mrs();
3422 store_reg(s
, rd
, tmp
);
3423 gen_mov_F0_vreg(0, rm
+ 1);
3424 tmp
= gen_vfp_mrs();
3425 store_reg(s
, rn
, tmp
);
3430 tmp
= load_reg(s
, rd
);
3432 gen_mov_vreg_F0(0, rm
* 2);
3433 tmp
= load_reg(s
, rn
);
3435 gen_mov_vreg_F0(0, rm
* 2 + 1);
3437 tmp
= load_reg(s
, rd
);
3439 gen_mov_vreg_F0(0, rm
);
3440 tmp
= load_reg(s
, rn
);
3442 gen_mov_vreg_F0(0, rm
+ 1);
3447 rn
= (insn
>> 16) & 0xf;
3449 VFP_DREG_D(rd
, insn
);
3451 rd
= VFP_SREG_D(insn
);
3452 if ((insn
& 0x01200000) == 0x01000000) {
3453 /* Single load/store */
3454 offset
= (insn
& 0xff) << 2;
3455 if ((insn
& (1 << 23)) == 0)
3457 if (s
->thumb
&& rn
== 15) {
3458 /* This is actually UNPREDICTABLE */
3459 addr
= tcg_temp_new_i32();
3460 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3462 addr
= load_reg(s
, rn
);
3464 tcg_gen_addi_i32(addr
, addr
, offset
);
3465 if (insn
& (1 << 20)) {
3466 gen_vfp_ld(s
, dp
, addr
);
3467 gen_mov_vreg_F0(dp
, rd
);
3469 gen_mov_F0_vreg(dp
, rd
);
3470 gen_vfp_st(s
, dp
, addr
);
3472 tcg_temp_free_i32(addr
);
3474 /* load/store multiple */
3475 int w
= insn
& (1 << 21);
3477 n
= (insn
>> 1) & 0x7f;
3481 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3482 /* P == U , W == 1 => UNDEF */
3485 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3486 /* UNPREDICTABLE cases for bad immediates: we choose to
3487 * UNDEF to avoid generating huge numbers of TCG ops
3491 if (rn
== 15 && w
) {
3492 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3496 if (s
->thumb
&& rn
== 15) {
3497 /* This is actually UNPREDICTABLE */
3498 addr
= tcg_temp_new_i32();
3499 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3501 addr
= load_reg(s
, rn
);
3503 if (insn
& (1 << 24)) /* pre-decrement */
3504 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3510 for (i
= 0; i
< n
; i
++) {
3511 if (insn
& ARM_CP_RW_BIT
) {
3513 gen_vfp_ld(s
, dp
, addr
);
3514 gen_mov_vreg_F0(dp
, rd
+ i
);
3517 gen_mov_F0_vreg(dp
, rd
+ i
);
3518 gen_vfp_st(s
, dp
, addr
);
3520 tcg_gen_addi_i32(addr
, addr
, offset
);
3524 if (insn
& (1 << 24))
3525 offset
= -offset
* n
;
3526 else if (dp
&& (insn
& 1))
3532 tcg_gen_addi_i32(addr
, addr
, offset
);
3533 store_reg(s
, rn
, addr
);
3535 tcg_temp_free_i32(addr
);
3541 /* Should never happen. */
3547 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3549 TranslationBlock
*tb
;
3552 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3554 gen_set_pc_im(dest
);
3555 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
3557 gen_set_pc_im(dest
);
3562 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3564 if (unlikely(s
->singlestep_enabled
)) {
3565 /* An indirect jump so that we still trigger the debug exception. */
3570 gen_goto_tb(s
, 0, dest
);
3571 s
->is_jmp
= DISAS_TB_JUMP
;
3575 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3578 tcg_gen_sari_i32(t0
, t0
, 16);
3582 tcg_gen_sari_i32(t1
, t1
, 16);
3585 tcg_gen_mul_i32(t0
, t0
, t1
);
3588 /* Return the mask of PSR bits set by a MSR instruction. */
3589 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3593 if (flags
& (1 << 0))
3595 if (flags
& (1 << 1))
3597 if (flags
& (1 << 2))
3599 if (flags
& (1 << 3))
3602 /* Mask out undefined bits. */
3603 mask
&= ~CPSR_RESERVED
;
3604 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3606 if (!arm_feature(env
, ARM_FEATURE_V5
))
3607 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3608 if (!arm_feature(env
, ARM_FEATURE_V6
))
3609 mask
&= ~(CPSR_E
| CPSR_GE
);
3610 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3612 /* Mask out execution state bits. */
3615 /* Mask out privileged bits. */
3621 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3622 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3626 /* ??? This is also undefined in system mode. */
3630 tmp
= load_cpu_field(spsr
);
3631 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3632 tcg_gen_andi_i32(t0
, t0
, mask
);
3633 tcg_gen_or_i32(tmp
, tmp
, t0
);
3634 store_cpu_field(tmp
, spsr
);
3636 gen_set_cpsr(t0
, mask
);
3638 tcg_temp_free_i32(t0
);
3643 /* Returns nonzero if access to the PSR is not permitted. */
3644 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3647 tmp
= tcg_temp_new_i32();
3648 tcg_gen_movi_i32(tmp
, val
);
3649 return gen_set_psr(s
, mask
, spsr
, tmp
);
3652 /* Generate an old-style exception return. Marks pc as dead. */
3653 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3656 store_reg(s
, 15, pc
);
3657 tmp
= load_cpu_field(spsr
);
3658 gen_set_cpsr(tmp
, 0xffffffff);
3659 tcg_temp_free_i32(tmp
);
3660 s
->is_jmp
= DISAS_UPDATE
;
3663 /* Generate a v6 exception return. Marks both values as dead. */
3664 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3666 gen_set_cpsr(cpsr
, 0xffffffff);
3667 tcg_temp_free_i32(cpsr
);
3668 store_reg(s
, 15, pc
);
3669 s
->is_jmp
= DISAS_UPDATE
;
3673 gen_set_condexec (DisasContext
*s
)
3675 if (s
->condexec_mask
) {
3676 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3677 TCGv tmp
= tcg_temp_new_i32();
3678 tcg_gen_movi_i32(tmp
, val
);
3679 store_cpu_field(tmp
, condexec_bits
);
3683 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3685 gen_set_condexec(s
);
3686 gen_set_pc_im(s
->pc
- offset
);
3687 gen_exception(excp
);
3688 s
->is_jmp
= DISAS_JUMP
;
3691 static void gen_nop_hint(DisasContext
*s
, int val
)
3695 gen_set_pc_im(s
->pc
);
3696 s
->is_jmp
= DISAS_WFI
;
3700 /* TODO: Implement SEV and WFE. May help SMP performance. */
3706 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3708 static inline void gen_neon_add(int size
, TCGv t0
, TCGv t1
)
3711 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3712 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3713 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3718 static inline void gen_neon_rsb(int size
, TCGv t0
, TCGv t1
)
3721 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3722 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3723 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3728 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3729 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3730 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3731 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3732 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3734 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3735 switch ((size << 1) | u) { \
3737 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3740 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3743 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3746 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3749 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3752 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3754 default: return 1; \
3757 #define GEN_NEON_INTEGER_OP(name) do { \
3758 switch ((size << 1) | u) { \
3760 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3763 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3766 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3769 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3772 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3775 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3777 default: return 1; \
3780 static TCGv
neon_load_scratch(int scratch
)
3782 TCGv tmp
= tcg_temp_new_i32();
3783 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3787 static void neon_store_scratch(int scratch
, TCGv var
)
3789 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3790 tcg_temp_free_i32(var
);
3793 static inline TCGv
neon_get_scalar(int size
, int reg
)
3797 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3799 gen_neon_dup_high16(tmp
);
3801 gen_neon_dup_low16(tmp
);
3804 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3809 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3812 if (!q
&& size
== 2) {
3815 tmp
= tcg_const_i32(rd
);
3816 tmp2
= tcg_const_i32(rm
);
3820 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
3823 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
3826 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
3834 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
3837 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
3843 tcg_temp_free_i32(tmp
);
3844 tcg_temp_free_i32(tmp2
);
3848 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3851 if (!q
&& size
== 2) {
3854 tmp
= tcg_const_i32(rd
);
3855 tmp2
= tcg_const_i32(rm
);
3859 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
3862 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
3865 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
3873 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
3876 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
3882 tcg_temp_free_i32(tmp
);
3883 tcg_temp_free_i32(tmp2
);
3887 static void gen_neon_trn_u8(TCGv t0
, TCGv t1
)
3891 rd
= tcg_temp_new_i32();
3892 tmp
= tcg_temp_new_i32();
3894 tcg_gen_shli_i32(rd
, t0
, 8);
3895 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3896 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3897 tcg_gen_or_i32(rd
, rd
, tmp
);
3899 tcg_gen_shri_i32(t1
, t1
, 8);
3900 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3901 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3902 tcg_gen_or_i32(t1
, t1
, tmp
);
3903 tcg_gen_mov_i32(t0
, rd
);
3905 tcg_temp_free_i32(tmp
);
3906 tcg_temp_free_i32(rd
);
3909 static void gen_neon_trn_u16(TCGv t0
, TCGv t1
)
3913 rd
= tcg_temp_new_i32();
3914 tmp
= tcg_temp_new_i32();
3916 tcg_gen_shli_i32(rd
, t0
, 16);
3917 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3918 tcg_gen_or_i32(rd
, rd
, tmp
);
3919 tcg_gen_shri_i32(t1
, t1
, 16);
3920 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3921 tcg_gen_or_i32(t1
, t1
, tmp
);
3922 tcg_gen_mov_i32(t0
, rd
);
3924 tcg_temp_free_i32(tmp
);
3925 tcg_temp_free_i32(rd
);
3933 } neon_ls_element_type
[11] = {
3947 /* Translate a NEON load/store element instruction. Return nonzero if the
3948 instruction is invalid. */
3949 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
3968 if (!s
->vfp_enabled
)
3970 VFP_DREG_D(rd
, insn
);
3971 rn
= (insn
>> 16) & 0xf;
3973 load
= (insn
& (1 << 21)) != 0;
3974 if ((insn
& (1 << 23)) == 0) {
3975 /* Load store all elements. */
3976 op
= (insn
>> 8) & 0xf;
3977 size
= (insn
>> 6) & 3;
3980 /* Catch UNDEF cases for bad values of align field */
3983 if (((insn
>> 5) & 1) == 1) {
3988 if (((insn
>> 4) & 3) == 3) {
3995 nregs
= neon_ls_element_type
[op
].nregs
;
3996 interleave
= neon_ls_element_type
[op
].interleave
;
3997 spacing
= neon_ls_element_type
[op
].spacing
;
3998 if (size
== 3 && (interleave
| spacing
) != 1)
4000 addr
= tcg_temp_new_i32();
4001 load_reg_var(s
, addr
, rn
);
4002 stride
= (1 << size
) * interleave
;
4003 for (reg
= 0; reg
< nregs
; reg
++) {
4004 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
4005 load_reg_var(s
, addr
, rn
);
4006 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
4007 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
4008 load_reg_var(s
, addr
, rn
);
4009 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4013 tmp64
= gen_ld64(addr
, IS_USER(s
));
4014 neon_store_reg64(tmp64
, rd
);
4015 tcg_temp_free_i64(tmp64
);
4017 tmp64
= tcg_temp_new_i64();
4018 neon_load_reg64(tmp64
, rd
);
4019 gen_st64(tmp64
, addr
, IS_USER(s
));
4021 tcg_gen_addi_i32(addr
, addr
, stride
);
4023 for (pass
= 0; pass
< 2; pass
++) {
4026 tmp
= gen_ld32(addr
, IS_USER(s
));
4027 neon_store_reg(rd
, pass
, tmp
);
4029 tmp
= neon_load_reg(rd
, pass
);
4030 gen_st32(tmp
, addr
, IS_USER(s
));
4032 tcg_gen_addi_i32(addr
, addr
, stride
);
4033 } else if (size
== 1) {
4035 tmp
= gen_ld16u(addr
, IS_USER(s
));
4036 tcg_gen_addi_i32(addr
, addr
, stride
);
4037 tmp2
= gen_ld16u(addr
, IS_USER(s
));
4038 tcg_gen_addi_i32(addr
, addr
, stride
);
4039 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
4040 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4041 tcg_temp_free_i32(tmp2
);
4042 neon_store_reg(rd
, pass
, tmp
);
4044 tmp
= neon_load_reg(rd
, pass
);
4045 tmp2
= tcg_temp_new_i32();
4046 tcg_gen_shri_i32(tmp2
, tmp
, 16);
4047 gen_st16(tmp
, addr
, IS_USER(s
));
4048 tcg_gen_addi_i32(addr
, addr
, stride
);
4049 gen_st16(tmp2
, addr
, IS_USER(s
));
4050 tcg_gen_addi_i32(addr
, addr
, stride
);
4052 } else /* size == 0 */ {
4055 for (n
= 0; n
< 4; n
++) {
4056 tmp
= gen_ld8u(addr
, IS_USER(s
));
4057 tcg_gen_addi_i32(addr
, addr
, stride
);
4061 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4062 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4063 tcg_temp_free_i32(tmp
);
4066 neon_store_reg(rd
, pass
, tmp2
);
4068 tmp2
= neon_load_reg(rd
, pass
);
4069 for (n
= 0; n
< 4; n
++) {
4070 tmp
= tcg_temp_new_i32();
4072 tcg_gen_mov_i32(tmp
, tmp2
);
4074 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4076 gen_st8(tmp
, addr
, IS_USER(s
));
4077 tcg_gen_addi_i32(addr
, addr
, stride
);
4079 tcg_temp_free_i32(tmp2
);
4086 tcg_temp_free_i32(addr
);
4089 size
= (insn
>> 10) & 3;
4091 /* Load single element to all lanes. */
4092 int a
= (insn
>> 4) & 1;
4096 size
= (insn
>> 6) & 3;
4097 nregs
= ((insn
>> 8) & 3) + 1;
4100 if (nregs
!= 4 || a
== 0) {
4103 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4106 if (nregs
== 1 && a
== 1 && size
== 0) {
4109 if (nregs
== 3 && a
== 1) {
4112 addr
= tcg_temp_new_i32();
4113 load_reg_var(s
, addr
, rn
);
4115 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4116 tmp
= gen_load_and_replicate(s
, addr
, size
);
4117 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4118 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4119 if (insn
& (1 << 5)) {
4120 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4121 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4123 tcg_temp_free_i32(tmp
);
4125 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4126 stride
= (insn
& (1 << 5)) ? 2 : 1;
4127 for (reg
= 0; reg
< nregs
; reg
++) {
4128 tmp
= gen_load_and_replicate(s
, addr
, size
);
4129 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4130 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4131 tcg_temp_free_i32(tmp
);
4132 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4136 tcg_temp_free_i32(addr
);
4137 stride
= (1 << size
) * nregs
;
4139 /* Single element. */
4140 int idx
= (insn
>> 4) & 0xf;
4141 pass
= (insn
>> 7) & 1;
4144 shift
= ((insn
>> 5) & 3) * 8;
4148 shift
= ((insn
>> 6) & 1) * 16;
4149 stride
= (insn
& (1 << 5)) ? 2 : 1;
4153 stride
= (insn
& (1 << 6)) ? 2 : 1;
4158 nregs
= ((insn
>> 8) & 3) + 1;
4159 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4162 if (((idx
& (1 << size
)) != 0) ||
4163 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4168 if ((idx
& 1) != 0) {
4173 if (size
== 2 && (idx
& 2) != 0) {
4178 if ((size
== 2) && ((idx
& 3) == 3)) {
4185 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4186 /* Attempts to write off the end of the register file
4187 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4188 * the neon_load_reg() would write off the end of the array.
4192 addr
= tcg_temp_new_i32();
4193 load_reg_var(s
, addr
, rn
);
4194 for (reg
= 0; reg
< nregs
; reg
++) {
4198 tmp
= gen_ld8u(addr
, IS_USER(s
));
4201 tmp
= gen_ld16u(addr
, IS_USER(s
));
4204 tmp
= gen_ld32(addr
, IS_USER(s
));
4206 default: /* Avoid compiler warnings. */
4210 tmp2
= neon_load_reg(rd
, pass
);
4211 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
4212 tcg_temp_free_i32(tmp2
);
4214 neon_store_reg(rd
, pass
, tmp
);
4215 } else { /* Store */
4216 tmp
= neon_load_reg(rd
, pass
);
4218 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4221 gen_st8(tmp
, addr
, IS_USER(s
));
4224 gen_st16(tmp
, addr
, IS_USER(s
));
4227 gen_st32(tmp
, addr
, IS_USER(s
));
4232 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4234 tcg_temp_free_i32(addr
);
4235 stride
= nregs
* (1 << size
);
4241 base
= load_reg(s
, rn
);
4243 tcg_gen_addi_i32(base
, base
, stride
);
4246 index
= load_reg(s
, rm
);
4247 tcg_gen_add_i32(base
, base
, index
);
4248 tcg_temp_free_i32(index
);
4250 store_reg(s
, rn
, base
);
4255 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4256 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
4258 tcg_gen_and_i32(t
, t
, c
);
4259 tcg_gen_andc_i32(f
, f
, c
);
4260 tcg_gen_or_i32(dest
, t
, f
);
4263 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
4266 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4267 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4268 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4273 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4276 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4277 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4278 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4283 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
4286 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4287 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4288 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4293 static inline void gen_neon_unarrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
4296 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4297 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4298 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4303 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
4309 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4310 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4315 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4316 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4323 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4324 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4329 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4330 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4337 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
4341 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4342 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4343 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4348 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4349 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4350 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4354 tcg_temp_free_i32(src
);
4357 static inline void gen_neon_addl(int size
)
4360 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4361 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4362 case 2: tcg_gen_add_i64(CPU_V001
); break;
4367 static inline void gen_neon_subl(int size
)
4370 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4371 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4372 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4377 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4380 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4381 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4382 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4387 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4390 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4391 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4396 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4400 switch ((size
<< 1) | u
) {
4401 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4402 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4403 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4404 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4406 tmp
= gen_muls_i64_i32(a
, b
);
4407 tcg_gen_mov_i64(dest
, tmp
);
4408 tcg_temp_free_i64(tmp
);
4411 tmp
= gen_mulu_i64_i32(a
, b
);
4412 tcg_gen_mov_i64(dest
, tmp
);
4413 tcg_temp_free_i64(tmp
);
4418 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4419 Don't forget to clean them now. */
4421 tcg_temp_free_i32(a
);
4422 tcg_temp_free_i32(b
);
4426 static void gen_neon_narrow_op(int op
, int u
, int size
, TCGv dest
, TCGv_i64 src
)
4430 gen_neon_unarrow_sats(size
, dest
, src
);
4432 gen_neon_narrow(size
, dest
, src
);
4436 gen_neon_narrow_satu(size
, dest
, src
);
4438 gen_neon_narrow_sats(size
, dest
, src
);
4443 /* Symbolic constants for op fields for Neon 3-register same-length.
4444 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4447 #define NEON_3R_VHADD 0
4448 #define NEON_3R_VQADD 1
4449 #define NEON_3R_VRHADD 2
4450 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4451 #define NEON_3R_VHSUB 4
4452 #define NEON_3R_VQSUB 5
4453 #define NEON_3R_VCGT 6
4454 #define NEON_3R_VCGE 7
4455 #define NEON_3R_VSHL 8
4456 #define NEON_3R_VQSHL 9
4457 #define NEON_3R_VRSHL 10
4458 #define NEON_3R_VQRSHL 11
4459 #define NEON_3R_VMAX 12
4460 #define NEON_3R_VMIN 13
4461 #define NEON_3R_VABD 14
4462 #define NEON_3R_VABA 15
4463 #define NEON_3R_VADD_VSUB 16
4464 #define NEON_3R_VTST_VCEQ 17
4465 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4466 #define NEON_3R_VMUL 19
4467 #define NEON_3R_VPMAX 20
4468 #define NEON_3R_VPMIN 21
4469 #define NEON_3R_VQDMULH_VQRDMULH 22
4470 #define NEON_3R_VPADD 23
4471 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4472 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4473 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4474 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4475 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4476 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4477 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4479 static const uint8_t neon_3r_sizes
[] = {
4480 [NEON_3R_VHADD
] = 0x7,
4481 [NEON_3R_VQADD
] = 0xf,
4482 [NEON_3R_VRHADD
] = 0x7,
4483 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4484 [NEON_3R_VHSUB
] = 0x7,
4485 [NEON_3R_VQSUB
] = 0xf,
4486 [NEON_3R_VCGT
] = 0x7,
4487 [NEON_3R_VCGE
] = 0x7,
4488 [NEON_3R_VSHL
] = 0xf,
4489 [NEON_3R_VQSHL
] = 0xf,
4490 [NEON_3R_VRSHL
] = 0xf,
4491 [NEON_3R_VQRSHL
] = 0xf,
4492 [NEON_3R_VMAX
] = 0x7,
4493 [NEON_3R_VMIN
] = 0x7,
4494 [NEON_3R_VABD
] = 0x7,
4495 [NEON_3R_VABA
] = 0x7,
4496 [NEON_3R_VADD_VSUB
] = 0xf,
4497 [NEON_3R_VTST_VCEQ
] = 0x7,
4498 [NEON_3R_VML
] = 0x7,
4499 [NEON_3R_VMUL
] = 0x7,
4500 [NEON_3R_VPMAX
] = 0x7,
4501 [NEON_3R_VPMIN
] = 0x7,
4502 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4503 [NEON_3R_VPADD
] = 0x7,
4504 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4505 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4506 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4507 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4508 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4509 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4510 [NEON_3R_VRECPS_VRSQRTS
] = 0x5, /* size bit 1 encodes op */
4513 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4514 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4517 #define NEON_2RM_VREV64 0
4518 #define NEON_2RM_VREV32 1
4519 #define NEON_2RM_VREV16 2
4520 #define NEON_2RM_VPADDL 4
4521 #define NEON_2RM_VPADDL_U 5
4522 #define NEON_2RM_VCLS 8
4523 #define NEON_2RM_VCLZ 9
4524 #define NEON_2RM_VCNT 10
4525 #define NEON_2RM_VMVN 11
4526 #define NEON_2RM_VPADAL 12
4527 #define NEON_2RM_VPADAL_U 13
4528 #define NEON_2RM_VQABS 14
4529 #define NEON_2RM_VQNEG 15
4530 #define NEON_2RM_VCGT0 16
4531 #define NEON_2RM_VCGE0 17
4532 #define NEON_2RM_VCEQ0 18
4533 #define NEON_2RM_VCLE0 19
4534 #define NEON_2RM_VCLT0 20
4535 #define NEON_2RM_VABS 22
4536 #define NEON_2RM_VNEG 23
4537 #define NEON_2RM_VCGT0_F 24
4538 #define NEON_2RM_VCGE0_F 25
4539 #define NEON_2RM_VCEQ0_F 26
4540 #define NEON_2RM_VCLE0_F 27
4541 #define NEON_2RM_VCLT0_F 28
4542 #define NEON_2RM_VABS_F 30
4543 #define NEON_2RM_VNEG_F 31
4544 #define NEON_2RM_VSWP 32
4545 #define NEON_2RM_VTRN 33
4546 #define NEON_2RM_VUZP 34
4547 #define NEON_2RM_VZIP 35
4548 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4549 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4550 #define NEON_2RM_VSHLL 38
4551 #define NEON_2RM_VCVT_F16_F32 44
4552 #define NEON_2RM_VCVT_F32_F16 46
4553 #define NEON_2RM_VRECPE 56
4554 #define NEON_2RM_VRSQRTE 57
4555 #define NEON_2RM_VRECPE_F 58
4556 #define NEON_2RM_VRSQRTE_F 59
4557 #define NEON_2RM_VCVT_FS 60
4558 #define NEON_2RM_VCVT_FU 61
4559 #define NEON_2RM_VCVT_SF 62
4560 #define NEON_2RM_VCVT_UF 63
4562 static int neon_2rm_is_float_op(int op
)
4564 /* Return true if this neon 2reg-misc op is float-to-float */
4565 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4566 op
>= NEON_2RM_VRECPE_F
);
4569 /* Each entry in this array has bit n set if the insn allows
4570 * size value n (otherwise it will UNDEF). Since unallocated
4571 * op values will have no bits set they always UNDEF.
4573 static const uint8_t neon_2rm_sizes
[] = {
4574 [NEON_2RM_VREV64
] = 0x7,
4575 [NEON_2RM_VREV32
] = 0x3,
4576 [NEON_2RM_VREV16
] = 0x1,
4577 [NEON_2RM_VPADDL
] = 0x7,
4578 [NEON_2RM_VPADDL_U
] = 0x7,
4579 [NEON_2RM_VCLS
] = 0x7,
4580 [NEON_2RM_VCLZ
] = 0x7,
4581 [NEON_2RM_VCNT
] = 0x1,
4582 [NEON_2RM_VMVN
] = 0x1,
4583 [NEON_2RM_VPADAL
] = 0x7,
4584 [NEON_2RM_VPADAL_U
] = 0x7,
4585 [NEON_2RM_VQABS
] = 0x7,
4586 [NEON_2RM_VQNEG
] = 0x7,
4587 [NEON_2RM_VCGT0
] = 0x7,
4588 [NEON_2RM_VCGE0
] = 0x7,
4589 [NEON_2RM_VCEQ0
] = 0x7,
4590 [NEON_2RM_VCLE0
] = 0x7,
4591 [NEON_2RM_VCLT0
] = 0x7,
4592 [NEON_2RM_VABS
] = 0x7,
4593 [NEON_2RM_VNEG
] = 0x7,
4594 [NEON_2RM_VCGT0_F
] = 0x4,
4595 [NEON_2RM_VCGE0_F
] = 0x4,
4596 [NEON_2RM_VCEQ0_F
] = 0x4,
4597 [NEON_2RM_VCLE0_F
] = 0x4,
4598 [NEON_2RM_VCLT0_F
] = 0x4,
4599 [NEON_2RM_VABS_F
] = 0x4,
4600 [NEON_2RM_VNEG_F
] = 0x4,
4601 [NEON_2RM_VSWP
] = 0x1,
4602 [NEON_2RM_VTRN
] = 0x7,
4603 [NEON_2RM_VUZP
] = 0x7,
4604 [NEON_2RM_VZIP
] = 0x7,
4605 [NEON_2RM_VMOVN
] = 0x7,
4606 [NEON_2RM_VQMOVN
] = 0x7,
4607 [NEON_2RM_VSHLL
] = 0x7,
4608 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4609 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4610 [NEON_2RM_VRECPE
] = 0x4,
4611 [NEON_2RM_VRSQRTE
] = 0x4,
4612 [NEON_2RM_VRECPE_F
] = 0x4,
4613 [NEON_2RM_VRSQRTE_F
] = 0x4,
4614 [NEON_2RM_VCVT_FS
] = 0x4,
4615 [NEON_2RM_VCVT_FU
] = 0x4,
4616 [NEON_2RM_VCVT_SF
] = 0x4,
4617 [NEON_2RM_VCVT_UF
] = 0x4,
4620 /* Translate a NEON data processing instruction. Return nonzero if the
4621 instruction is invalid.
4622 We process data in a mixture of 32-bit and 64-bit chunks.
4623 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4625 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4637 TCGv tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4640 if (!s
->vfp_enabled
)
4642 q
= (insn
& (1 << 6)) != 0;
4643 u
= (insn
>> 24) & 1;
4644 VFP_DREG_D(rd
, insn
);
4645 VFP_DREG_N(rn
, insn
);
4646 VFP_DREG_M(rm
, insn
);
4647 size
= (insn
>> 20) & 3;
4648 if ((insn
& (1 << 23)) == 0) {
4649 /* Three register same length. */
4650 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4651 /* Catch invalid op and bad size combinations: UNDEF */
4652 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4655 /* All insns of this form UNDEF for either this condition or the
4656 * superset of cases "Q==1"; we catch the latter later.
4658 if (q
&& ((rd
| rn
| rm
) & 1)) {
4661 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4662 /* 64-bit element instructions. */
4663 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4664 neon_load_reg64(cpu_V0
, rn
+ pass
);
4665 neon_load_reg64(cpu_V1
, rm
+ pass
);
4669 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4672 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4678 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4681 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4687 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4689 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4694 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4697 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4703 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4705 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4708 case NEON_3R_VQRSHL
:
4710 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4713 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4717 case NEON_3R_VADD_VSUB
:
4719 tcg_gen_sub_i64(CPU_V001
);
4721 tcg_gen_add_i64(CPU_V001
);
4727 neon_store_reg64(cpu_V0
, rd
+ pass
);
4736 case NEON_3R_VQRSHL
:
4739 /* Shift instruction operands are reversed. */
4754 case NEON_3R_FLOAT_ARITH
:
4755 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4757 case NEON_3R_FLOAT_MINMAX
:
4758 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4760 case NEON_3R_FLOAT_CMP
:
4762 /* no encoding for U=0 C=1x */
4766 case NEON_3R_FLOAT_ACMP
:
4771 case NEON_3R_VRECPS_VRSQRTS
:
4777 if (u
&& (size
!= 0)) {
4778 /* UNDEF on invalid size for polynomial subcase */
4783 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
4791 if (pairwise
&& q
) {
4792 /* All the pairwise insns UNDEF if Q is set */
4796 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4801 tmp
= neon_load_reg(rn
, 0);
4802 tmp2
= neon_load_reg(rn
, 1);
4804 tmp
= neon_load_reg(rm
, 0);
4805 tmp2
= neon_load_reg(rm
, 1);
4809 tmp
= neon_load_reg(rn
, pass
);
4810 tmp2
= neon_load_reg(rm
, pass
);
4814 GEN_NEON_INTEGER_OP(hadd
);
4817 GEN_NEON_INTEGER_OP_ENV(qadd
);
4819 case NEON_3R_VRHADD
:
4820 GEN_NEON_INTEGER_OP(rhadd
);
4822 case NEON_3R_LOGIC
: /* Logic ops. */
4823 switch ((u
<< 2) | size
) {
4825 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
4828 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
4831 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4834 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
4837 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
4840 tmp3
= neon_load_reg(rd
, pass
);
4841 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
4842 tcg_temp_free_i32(tmp3
);
4845 tmp3
= neon_load_reg(rd
, pass
);
4846 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
4847 tcg_temp_free_i32(tmp3
);
4850 tmp3
= neon_load_reg(rd
, pass
);
4851 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
4852 tcg_temp_free_i32(tmp3
);
4857 GEN_NEON_INTEGER_OP(hsub
);
4860 GEN_NEON_INTEGER_OP_ENV(qsub
);
4863 GEN_NEON_INTEGER_OP(cgt
);
4866 GEN_NEON_INTEGER_OP(cge
);
4869 GEN_NEON_INTEGER_OP(shl
);
4872 GEN_NEON_INTEGER_OP_ENV(qshl
);
4875 GEN_NEON_INTEGER_OP(rshl
);
4877 case NEON_3R_VQRSHL
:
4878 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4881 GEN_NEON_INTEGER_OP(max
);
4884 GEN_NEON_INTEGER_OP(min
);
4887 GEN_NEON_INTEGER_OP(abd
);
4890 GEN_NEON_INTEGER_OP(abd
);
4891 tcg_temp_free_i32(tmp2
);
4892 tmp2
= neon_load_reg(rd
, pass
);
4893 gen_neon_add(size
, tmp
, tmp2
);
4895 case NEON_3R_VADD_VSUB
:
4896 if (!u
) { /* VADD */
4897 gen_neon_add(size
, tmp
, tmp2
);
4900 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
4901 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
4902 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
4907 case NEON_3R_VTST_VCEQ
:
4908 if (!u
) { /* VTST */
4910 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
4911 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
4912 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
4917 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
4918 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
4919 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
4924 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
4926 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4927 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4928 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4931 tcg_temp_free_i32(tmp2
);
4932 tmp2
= neon_load_reg(rd
, pass
);
4934 gen_neon_rsb(size
, tmp
, tmp2
);
4936 gen_neon_add(size
, tmp
, tmp2
);
4940 if (u
) { /* polynomial */
4941 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
4942 } else { /* Integer */
4944 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
4945 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
4946 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
4952 GEN_NEON_INTEGER_OP(pmax
);
4955 GEN_NEON_INTEGER_OP(pmin
);
4957 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
4958 if (!u
) { /* VQDMULH */
4961 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4964 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4968 } else { /* VQRDMULH */
4971 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
4974 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
4982 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
4983 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
4984 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
4988 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
4990 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
4991 switch ((u
<< 2) | size
) {
4994 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
4997 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5000 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5005 tcg_temp_free_ptr(fpstatus
);
5008 case NEON_3R_FLOAT_MULTIPLY
:
5010 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5011 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5013 tcg_temp_free_i32(tmp2
);
5014 tmp2
= neon_load_reg(rd
, pass
);
5016 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5018 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5021 tcg_temp_free_ptr(fpstatus
);
5024 case NEON_3R_FLOAT_CMP
:
5026 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5028 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5031 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5033 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5036 tcg_temp_free_ptr(fpstatus
);
5039 case NEON_3R_FLOAT_ACMP
:
5041 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5043 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5045 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5047 tcg_temp_free_ptr(fpstatus
);
5050 case NEON_3R_FLOAT_MINMAX
:
5052 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5054 gen_helper_neon_max_f32(tmp
, tmp
, tmp2
, fpstatus
);
5056 gen_helper_neon_min_f32(tmp
, tmp
, tmp2
, fpstatus
);
5058 tcg_temp_free_ptr(fpstatus
);
5061 case NEON_3R_VRECPS_VRSQRTS
:
5063 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5065 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5069 /* VFMA, VFMS: fused multiply-add */
5070 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5071 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5074 gen_helper_vfp_negs(tmp
, tmp
);
5076 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5077 tcg_temp_free_i32(tmp3
);
5078 tcg_temp_free_ptr(fpstatus
);
5084 tcg_temp_free_i32(tmp2
);
5086 /* Save the result. For elementwise operations we can put it
5087 straight into the destination register. For pairwise operations
5088 we have to be careful to avoid clobbering the source operands. */
5089 if (pairwise
&& rd
== rm
) {
5090 neon_store_scratch(pass
, tmp
);
5092 neon_store_reg(rd
, pass
, tmp
);
5096 if (pairwise
&& rd
== rm
) {
5097 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5098 tmp
= neon_load_scratch(pass
);
5099 neon_store_reg(rd
, pass
, tmp
);
5102 /* End of 3 register same size operations. */
5103 } else if (insn
& (1 << 4)) {
5104 if ((insn
& 0x00380080) != 0) {
5105 /* Two registers and shift. */
5106 op
= (insn
>> 8) & 0xf;
5107 if (insn
& (1 << 7)) {
5115 while ((insn
& (1 << (size
+ 19))) == 0)
5118 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5119 /* To avoid excessive dumplication of ops we implement shift
5120 by immediate using the variable shift operations. */
5122 /* Shift by immediate:
5123 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5124 if (q
&& ((rd
| rm
) & 1)) {
5127 if (!u
&& (op
== 4 || op
== 6)) {
5130 /* Right shifts are encoded as N - shift, where N is the
5131 element size in bits. */
5133 shift
= shift
- (1 << (size
+ 3));
5141 imm
= (uint8_t) shift
;
5146 imm
= (uint16_t) shift
;
5157 for (pass
= 0; pass
< count
; pass
++) {
5159 neon_load_reg64(cpu_V0
, rm
+ pass
);
5160 tcg_gen_movi_i64(cpu_V1
, imm
);
5165 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5167 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5172 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5174 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5177 case 5: /* VSHL, VSLI */
5178 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5180 case 6: /* VQSHLU */
5181 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5186 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5189 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5194 if (op
== 1 || op
== 3) {
5196 neon_load_reg64(cpu_V1
, rd
+ pass
);
5197 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5198 } else if (op
== 4 || (op
== 5 && u
)) {
5200 neon_load_reg64(cpu_V1
, rd
+ pass
);
5202 if (shift
< -63 || shift
> 63) {
5206 mask
= 0xffffffffffffffffull
>> -shift
;
5208 mask
= 0xffffffffffffffffull
<< shift
;
5211 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5212 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5214 neon_store_reg64(cpu_V0
, rd
+ pass
);
5215 } else { /* size < 3 */
5216 /* Operands in T0 and T1. */
5217 tmp
= neon_load_reg(rm
, pass
);
5218 tmp2
= tcg_temp_new_i32();
5219 tcg_gen_movi_i32(tmp2
, imm
);
5223 GEN_NEON_INTEGER_OP(shl
);
5227 GEN_NEON_INTEGER_OP(rshl
);
5230 case 5: /* VSHL, VSLI */
5232 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5233 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5234 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5238 case 6: /* VQSHLU */
5241 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5245 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5249 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5257 GEN_NEON_INTEGER_OP_ENV(qshl
);
5260 tcg_temp_free_i32(tmp2
);
5262 if (op
== 1 || op
== 3) {
5264 tmp2
= neon_load_reg(rd
, pass
);
5265 gen_neon_add(size
, tmp
, tmp2
);
5266 tcg_temp_free_i32(tmp2
);
5267 } else if (op
== 4 || (op
== 5 && u
)) {
5272 mask
= 0xff >> -shift
;
5274 mask
= (uint8_t)(0xff << shift
);
5280 mask
= 0xffff >> -shift
;
5282 mask
= (uint16_t)(0xffff << shift
);
5286 if (shift
< -31 || shift
> 31) {
5290 mask
= 0xffffffffu
>> -shift
;
5292 mask
= 0xffffffffu
<< shift
;
5298 tmp2
= neon_load_reg(rd
, pass
);
5299 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5300 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5301 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5302 tcg_temp_free_i32(tmp2
);
5304 neon_store_reg(rd
, pass
, tmp
);
5307 } else if (op
< 10) {
5308 /* Shift by immediate and narrow:
5309 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5310 int input_unsigned
= (op
== 8) ? !u
: u
;
5314 shift
= shift
- (1 << (size
+ 3));
5317 tmp64
= tcg_const_i64(shift
);
5318 neon_load_reg64(cpu_V0
, rm
);
5319 neon_load_reg64(cpu_V1
, rm
+ 1);
5320 for (pass
= 0; pass
< 2; pass
++) {
5328 if (input_unsigned
) {
5329 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5331 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5334 if (input_unsigned
) {
5335 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5337 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5340 tmp
= tcg_temp_new_i32();
5341 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5342 neon_store_reg(rd
, pass
, tmp
);
5344 tcg_temp_free_i64(tmp64
);
5347 imm
= (uint16_t)shift
;
5351 imm
= (uint32_t)shift
;
5353 tmp2
= tcg_const_i32(imm
);
5354 tmp4
= neon_load_reg(rm
+ 1, 0);
5355 tmp5
= neon_load_reg(rm
+ 1, 1);
5356 for (pass
= 0; pass
< 2; pass
++) {
5358 tmp
= neon_load_reg(rm
, 0);
5362 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5365 tmp3
= neon_load_reg(rm
, 1);
5369 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5371 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5372 tcg_temp_free_i32(tmp
);
5373 tcg_temp_free_i32(tmp3
);
5374 tmp
= tcg_temp_new_i32();
5375 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5376 neon_store_reg(rd
, pass
, tmp
);
5378 tcg_temp_free_i32(tmp2
);
5380 } else if (op
== 10) {
5382 if (q
|| (rd
& 1)) {
5385 tmp
= neon_load_reg(rm
, 0);
5386 tmp2
= neon_load_reg(rm
, 1);
5387 for (pass
= 0; pass
< 2; pass
++) {
5391 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5394 /* The shift is less than the width of the source
5395 type, so we can just shift the whole register. */
5396 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5397 /* Widen the result of shift: we need to clear
5398 * the potential overflow bits resulting from
5399 * left bits of the narrow input appearing as
5400 * right bits of left the neighbour narrow
5402 if (size
< 2 || !u
) {
5405 imm
= (0xffu
>> (8 - shift
));
5407 } else if (size
== 1) {
5408 imm
= 0xffff >> (16 - shift
);
5411 imm
= 0xffffffff >> (32 - shift
);
5414 imm64
= imm
| (((uint64_t)imm
) << 32);
5418 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5421 neon_store_reg64(cpu_V0
, rd
+ pass
);
5423 } else if (op
>= 14) {
5424 /* VCVT fixed-point. */
5425 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5428 /* We have already masked out the must-be-1 top bit of imm6,
5429 * hence this 32-shift where the ARM ARM has 64-imm6.
5432 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5433 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5436 gen_vfp_ulto(0, shift
, 1);
5438 gen_vfp_slto(0, shift
, 1);
5441 gen_vfp_toul(0, shift
, 1);
5443 gen_vfp_tosl(0, shift
, 1);
5445 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5450 } else { /* (insn & 0x00380080) == 0 */
5452 if (q
&& (rd
& 1)) {
5456 op
= (insn
>> 8) & 0xf;
5457 /* One register and immediate. */
5458 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5459 invert
= (insn
& (1 << 5)) != 0;
5460 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5461 * We choose to not special-case this and will behave as if a
5462 * valid constant encoding of 0 had been given.
5481 imm
= (imm
<< 8) | (imm
<< 24);
5484 imm
= (imm
<< 8) | 0xff;
5487 imm
= (imm
<< 16) | 0xffff;
5490 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5498 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5499 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5505 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5506 if (op
& 1 && op
< 12) {
5507 tmp
= neon_load_reg(rd
, pass
);
5509 /* The immediate value has already been inverted, so
5511 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5513 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5517 tmp
= tcg_temp_new_i32();
5518 if (op
== 14 && invert
) {
5522 for (n
= 0; n
< 4; n
++) {
5523 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5524 val
|= 0xff << (n
* 8);
5526 tcg_gen_movi_i32(tmp
, val
);
5528 tcg_gen_movi_i32(tmp
, imm
);
5531 neon_store_reg(rd
, pass
, tmp
);
5534 } else { /* (insn & 0x00800010 == 0x00800000) */
5536 op
= (insn
>> 8) & 0xf;
5537 if ((insn
& (1 << 6)) == 0) {
5538 /* Three registers of different lengths. */
5542 /* undefreq: bit 0 : UNDEF if size != 0
5543 * bit 1 : UNDEF if size == 0
5544 * bit 2 : UNDEF if U == 1
5545 * Note that [1:0] set implies 'always UNDEF'
5548 /* prewiden, src1_wide, src2_wide, undefreq */
5549 static const int neon_3reg_wide
[16][4] = {
5550 {1, 0, 0, 0}, /* VADDL */
5551 {1, 1, 0, 0}, /* VADDW */
5552 {1, 0, 0, 0}, /* VSUBL */
5553 {1, 1, 0, 0}, /* VSUBW */
5554 {0, 1, 1, 0}, /* VADDHN */
5555 {0, 0, 0, 0}, /* VABAL */
5556 {0, 1, 1, 0}, /* VSUBHN */
5557 {0, 0, 0, 0}, /* VABDL */
5558 {0, 0, 0, 0}, /* VMLAL */
5559 {0, 0, 0, 6}, /* VQDMLAL */
5560 {0, 0, 0, 0}, /* VMLSL */
5561 {0, 0, 0, 6}, /* VQDMLSL */
5562 {0, 0, 0, 0}, /* Integer VMULL */
5563 {0, 0, 0, 2}, /* VQDMULL */
5564 {0, 0, 0, 5}, /* Polynomial VMULL */
5565 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5568 prewiden
= neon_3reg_wide
[op
][0];
5569 src1_wide
= neon_3reg_wide
[op
][1];
5570 src2_wide
= neon_3reg_wide
[op
][2];
5571 undefreq
= neon_3reg_wide
[op
][3];
5573 if (((undefreq
& 1) && (size
!= 0)) ||
5574 ((undefreq
& 2) && (size
== 0)) ||
5575 ((undefreq
& 4) && u
)) {
5578 if ((src1_wide
&& (rn
& 1)) ||
5579 (src2_wide
&& (rm
& 1)) ||
5580 (!src2_wide
&& (rd
& 1))) {
5584 /* Avoid overlapping operands. Wide source operands are
5585 always aligned so will never overlap with wide
5586 destinations in problematic ways. */
5587 if (rd
== rm
&& !src2_wide
) {
5588 tmp
= neon_load_reg(rm
, 1);
5589 neon_store_scratch(2, tmp
);
5590 } else if (rd
== rn
&& !src1_wide
) {
5591 tmp
= neon_load_reg(rn
, 1);
5592 neon_store_scratch(2, tmp
);
5595 for (pass
= 0; pass
< 2; pass
++) {
5597 neon_load_reg64(cpu_V0
, rn
+ pass
);
5600 if (pass
== 1 && rd
== rn
) {
5601 tmp
= neon_load_scratch(2);
5603 tmp
= neon_load_reg(rn
, pass
);
5606 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5610 neon_load_reg64(cpu_V1
, rm
+ pass
);
5613 if (pass
== 1 && rd
== rm
) {
5614 tmp2
= neon_load_scratch(2);
5616 tmp2
= neon_load_reg(rm
, pass
);
5619 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5623 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5624 gen_neon_addl(size
);
5626 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5627 gen_neon_subl(size
);
5629 case 5: case 7: /* VABAL, VABDL */
5630 switch ((size
<< 1) | u
) {
5632 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5635 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5638 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5641 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5644 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5647 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5651 tcg_temp_free_i32(tmp2
);
5652 tcg_temp_free_i32(tmp
);
5654 case 8: case 9: case 10: case 11: case 12: case 13:
5655 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5656 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5658 case 14: /* Polynomial VMULL */
5659 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5660 tcg_temp_free_i32(tmp2
);
5661 tcg_temp_free_i32(tmp
);
5663 default: /* 15 is RESERVED: caught earlier */
5668 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5669 neon_store_reg64(cpu_V0
, rd
+ pass
);
5670 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5672 neon_load_reg64(cpu_V1
, rd
+ pass
);
5674 case 10: /* VMLSL */
5675 gen_neon_negl(cpu_V0
, size
);
5677 case 5: case 8: /* VABAL, VMLAL */
5678 gen_neon_addl(size
);
5680 case 9: case 11: /* VQDMLAL, VQDMLSL */
5681 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5683 gen_neon_negl(cpu_V0
, size
);
5685 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5690 neon_store_reg64(cpu_V0
, rd
+ pass
);
5691 } else if (op
== 4 || op
== 6) {
5692 /* Narrowing operation. */
5693 tmp
= tcg_temp_new_i32();
5697 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5700 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5703 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5704 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5711 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5714 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5717 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5718 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
5719 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
5727 neon_store_reg(rd
, 0, tmp3
);
5728 neon_store_reg(rd
, 1, tmp
);
5731 /* Write back the result. */
5732 neon_store_reg64(cpu_V0
, rd
+ pass
);
5736 /* Two registers and a scalar. NB that for ops of this form
5737 * the ARM ARM labels bit 24 as Q, but it is in our variable
5744 case 1: /* Float VMLA scalar */
5745 case 5: /* Floating point VMLS scalar */
5746 case 9: /* Floating point VMUL scalar */
5751 case 0: /* Integer VMLA scalar */
5752 case 4: /* Integer VMLS scalar */
5753 case 8: /* Integer VMUL scalar */
5754 case 12: /* VQDMULH scalar */
5755 case 13: /* VQRDMULH scalar */
5756 if (u
&& ((rd
| rn
) & 1)) {
5759 tmp
= neon_get_scalar(size
, rm
);
5760 neon_store_scratch(0, tmp
);
5761 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5762 tmp
= neon_load_scratch(0);
5763 tmp2
= neon_load_reg(rn
, pass
);
5766 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5768 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5770 } else if (op
== 13) {
5772 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5774 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5776 } else if (op
& 1) {
5777 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5778 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5779 tcg_temp_free_ptr(fpstatus
);
5782 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5783 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5784 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5788 tcg_temp_free_i32(tmp2
);
5791 tmp2
= neon_load_reg(rd
, pass
);
5794 gen_neon_add(size
, tmp
, tmp2
);
5798 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5799 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5800 tcg_temp_free_ptr(fpstatus
);
5804 gen_neon_rsb(size
, tmp
, tmp2
);
5808 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5809 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5810 tcg_temp_free_ptr(fpstatus
);
5816 tcg_temp_free_i32(tmp2
);
5818 neon_store_reg(rd
, pass
, tmp
);
5821 case 3: /* VQDMLAL scalar */
5822 case 7: /* VQDMLSL scalar */
5823 case 11: /* VQDMULL scalar */
5828 case 2: /* VMLAL sclar */
5829 case 6: /* VMLSL scalar */
5830 case 10: /* VMULL scalar */
5834 tmp2
= neon_get_scalar(size
, rm
);
5835 /* We need a copy of tmp2 because gen_neon_mull
5836 * deletes it during pass 0. */
5837 tmp4
= tcg_temp_new_i32();
5838 tcg_gen_mov_i32(tmp4
, tmp2
);
5839 tmp3
= neon_load_reg(rn
, 1);
5841 for (pass
= 0; pass
< 2; pass
++) {
5843 tmp
= neon_load_reg(rn
, 0);
5848 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5850 neon_load_reg64(cpu_V1
, rd
+ pass
);
5854 gen_neon_negl(cpu_V0
, size
);
5857 gen_neon_addl(size
);
5860 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5862 gen_neon_negl(cpu_V0
, size
);
5864 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5870 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5875 neon_store_reg64(cpu_V0
, rd
+ pass
);
5880 default: /* 14 and 15 are RESERVED */
5884 } else { /* size == 3 */
5887 imm
= (insn
>> 8) & 0xf;
5892 if (q
&& ((rd
| rn
| rm
) & 1)) {
5897 neon_load_reg64(cpu_V0
, rn
);
5899 neon_load_reg64(cpu_V1
, rn
+ 1);
5901 } else if (imm
== 8) {
5902 neon_load_reg64(cpu_V0
, rn
+ 1);
5904 neon_load_reg64(cpu_V1
, rm
);
5907 tmp64
= tcg_temp_new_i64();
5909 neon_load_reg64(cpu_V0
, rn
);
5910 neon_load_reg64(tmp64
, rn
+ 1);
5912 neon_load_reg64(cpu_V0
, rn
+ 1);
5913 neon_load_reg64(tmp64
, rm
);
5915 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5916 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5917 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5919 neon_load_reg64(cpu_V1
, rm
);
5921 neon_load_reg64(cpu_V1
, rm
+ 1);
5924 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5925 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5926 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5927 tcg_temp_free_i64(tmp64
);
5930 neon_load_reg64(cpu_V0
, rn
);
5931 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5932 neon_load_reg64(cpu_V1
, rm
);
5933 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5934 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5936 neon_store_reg64(cpu_V0
, rd
);
5938 neon_store_reg64(cpu_V1
, rd
+ 1);
5940 } else if ((insn
& (1 << 11)) == 0) {
5941 /* Two register misc. */
5942 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5943 size
= (insn
>> 18) & 3;
5944 /* UNDEF for unknown op values and bad op-size combinations */
5945 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
5948 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
5949 q
&& ((rm
| rd
) & 1)) {
5953 case NEON_2RM_VREV64
:
5954 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5955 tmp
= neon_load_reg(rm
, pass
* 2);
5956 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
5958 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
5959 case 1: gen_swap_half(tmp
); break;
5960 case 2: /* no-op */ break;
5963 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
5965 neon_store_reg(rd
, pass
* 2, tmp2
);
5968 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
5969 case 1: gen_swap_half(tmp2
); break;
5972 neon_store_reg(rd
, pass
* 2, tmp2
);
5976 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
5977 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
5978 for (pass
= 0; pass
< q
+ 1; pass
++) {
5979 tmp
= neon_load_reg(rm
, pass
* 2);
5980 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5981 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5982 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5984 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5985 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5986 case 2: tcg_gen_add_i64(CPU_V001
); break;
5989 if (op
>= NEON_2RM_VPADAL
) {
5991 neon_load_reg64(cpu_V1
, rd
+ pass
);
5992 gen_neon_addl(size
);
5994 neon_store_reg64(cpu_V0
, rd
+ pass
);
6000 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6001 tmp
= neon_load_reg(rm
, n
);
6002 tmp2
= neon_load_reg(rd
, n
+ 1);
6003 neon_store_reg(rm
, n
, tmp2
);
6004 neon_store_reg(rd
, n
+ 1, tmp
);
6011 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6016 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6020 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6021 /* also VQMOVUN; op field and mnemonics don't line up */
6026 for (pass
= 0; pass
< 2; pass
++) {
6027 neon_load_reg64(cpu_V0
, rm
+ pass
);
6028 tmp
= tcg_temp_new_i32();
6029 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6034 neon_store_reg(rd
, 0, tmp2
);
6035 neon_store_reg(rd
, 1, tmp
);
6039 case NEON_2RM_VSHLL
:
6040 if (q
|| (rd
& 1)) {
6043 tmp
= neon_load_reg(rm
, 0);
6044 tmp2
= neon_load_reg(rm
, 1);
6045 for (pass
= 0; pass
< 2; pass
++) {
6048 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6049 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6050 neon_store_reg64(cpu_V0
, rd
+ pass
);
6053 case NEON_2RM_VCVT_F16_F32
:
6054 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
6058 tmp
= tcg_temp_new_i32();
6059 tmp2
= tcg_temp_new_i32();
6060 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
6061 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6062 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
6063 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6064 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6065 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6066 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
6067 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6068 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
6069 neon_store_reg(rd
, 0, tmp2
);
6070 tmp2
= tcg_temp_new_i32();
6071 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6072 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6073 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6074 neon_store_reg(rd
, 1, tmp2
);
6075 tcg_temp_free_i32(tmp
);
6077 case NEON_2RM_VCVT_F32_F16
:
6078 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
6082 tmp3
= tcg_temp_new_i32();
6083 tmp
= neon_load_reg(rm
, 0);
6084 tmp2
= neon_load_reg(rm
, 1);
6085 tcg_gen_ext16u_i32(tmp3
, tmp
);
6086 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6087 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6088 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6089 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6090 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6091 tcg_temp_free_i32(tmp
);
6092 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6093 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6094 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6095 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6096 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6097 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6098 tcg_temp_free_i32(tmp2
);
6099 tcg_temp_free_i32(tmp3
);
6103 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6104 if (neon_2rm_is_float_op(op
)) {
6105 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
6106 neon_reg_offset(rm
, pass
));
6109 tmp
= neon_load_reg(rm
, pass
);
6112 case NEON_2RM_VREV32
:
6114 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6115 case 1: gen_swap_half(tmp
); break;
6119 case NEON_2RM_VREV16
:
6124 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6125 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6126 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6132 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6133 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6134 case 2: gen_helper_clz(tmp
, tmp
); break;
6139 gen_helper_neon_cnt_u8(tmp
, tmp
);
6142 tcg_gen_not_i32(tmp
, tmp
);
6144 case NEON_2RM_VQABS
:
6147 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6150 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6153 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6158 case NEON_2RM_VQNEG
:
6161 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6164 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6167 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6172 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6173 tmp2
= tcg_const_i32(0);
6175 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6176 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6177 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6180 tcg_temp_free(tmp2
);
6181 if (op
== NEON_2RM_VCLE0
) {
6182 tcg_gen_not_i32(tmp
, tmp
);
6185 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6186 tmp2
= tcg_const_i32(0);
6188 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6189 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6190 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6193 tcg_temp_free(tmp2
);
6194 if (op
== NEON_2RM_VCLT0
) {
6195 tcg_gen_not_i32(tmp
, tmp
);
6198 case NEON_2RM_VCEQ0
:
6199 tmp2
= tcg_const_i32(0);
6201 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6202 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6203 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6206 tcg_temp_free(tmp2
);
6210 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6211 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6212 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6217 tmp2
= tcg_const_i32(0);
6218 gen_neon_rsb(size
, tmp
, tmp2
);
6219 tcg_temp_free(tmp2
);
6221 case NEON_2RM_VCGT0_F
:
6223 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6224 tmp2
= tcg_const_i32(0);
6225 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6226 tcg_temp_free(tmp2
);
6227 tcg_temp_free_ptr(fpstatus
);
6230 case NEON_2RM_VCGE0_F
:
6232 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6233 tmp2
= tcg_const_i32(0);
6234 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6235 tcg_temp_free(tmp2
);
6236 tcg_temp_free_ptr(fpstatus
);
6239 case NEON_2RM_VCEQ0_F
:
6241 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6242 tmp2
= tcg_const_i32(0);
6243 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6244 tcg_temp_free(tmp2
);
6245 tcg_temp_free_ptr(fpstatus
);
6248 case NEON_2RM_VCLE0_F
:
6250 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6251 tmp2
= tcg_const_i32(0);
6252 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6253 tcg_temp_free(tmp2
);
6254 tcg_temp_free_ptr(fpstatus
);
6257 case NEON_2RM_VCLT0_F
:
6259 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6260 tmp2
= tcg_const_i32(0);
6261 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6262 tcg_temp_free(tmp2
);
6263 tcg_temp_free_ptr(fpstatus
);
6266 case NEON_2RM_VABS_F
:
6269 case NEON_2RM_VNEG_F
:
6273 tmp2
= neon_load_reg(rd
, pass
);
6274 neon_store_reg(rm
, pass
, tmp2
);
6277 tmp2
= neon_load_reg(rd
, pass
);
6279 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6280 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6283 neon_store_reg(rm
, pass
, tmp2
);
6285 case NEON_2RM_VRECPE
:
6286 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6288 case NEON_2RM_VRSQRTE
:
6289 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6291 case NEON_2RM_VRECPE_F
:
6292 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6294 case NEON_2RM_VRSQRTE_F
:
6295 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6297 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6300 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6303 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6304 gen_vfp_tosiz(0, 1);
6306 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6307 gen_vfp_touiz(0, 1);
6310 /* Reserved op values were caught by the
6311 * neon_2rm_sizes[] check earlier.
6315 if (neon_2rm_is_float_op(op
)) {
6316 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6317 neon_reg_offset(rd
, pass
));
6319 neon_store_reg(rd
, pass
, tmp
);
6324 } else if ((insn
& (1 << 10)) == 0) {
6326 int n
= ((insn
>> 8) & 3) + 1;
6327 if ((rn
+ n
) > 32) {
6328 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6329 * helper function running off the end of the register file.
6334 if (insn
& (1 << 6)) {
6335 tmp
= neon_load_reg(rd
, 0);
6337 tmp
= tcg_temp_new_i32();
6338 tcg_gen_movi_i32(tmp
, 0);
6340 tmp2
= neon_load_reg(rm
, 0);
6341 tmp4
= tcg_const_i32(rn
);
6342 tmp5
= tcg_const_i32(n
);
6343 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tmp4
, tmp5
);
6344 tcg_temp_free_i32(tmp
);
6345 if (insn
& (1 << 6)) {
6346 tmp
= neon_load_reg(rd
, 1);
6348 tmp
= tcg_temp_new_i32();
6349 tcg_gen_movi_i32(tmp
, 0);
6351 tmp3
= neon_load_reg(rm
, 1);
6352 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tmp4
, tmp5
);
6353 tcg_temp_free_i32(tmp5
);
6354 tcg_temp_free_i32(tmp4
);
6355 neon_store_reg(rd
, 0, tmp2
);
6356 neon_store_reg(rd
, 1, tmp3
);
6357 tcg_temp_free_i32(tmp
);
6358 } else if ((insn
& 0x380) == 0) {
6360 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6363 if (insn
& (1 << 19)) {
6364 tmp
= neon_load_reg(rm
, 1);
6366 tmp
= neon_load_reg(rm
, 0);
6368 if (insn
& (1 << 16)) {
6369 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6370 } else if (insn
& (1 << 17)) {
6371 if ((insn
>> 18) & 1)
6372 gen_neon_dup_high16(tmp
);
6374 gen_neon_dup_low16(tmp
);
6376 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6377 tmp2
= tcg_temp_new_i32();
6378 tcg_gen_mov_i32(tmp2
, tmp
);
6379 neon_store_reg(rd
, pass
, tmp2
);
6381 tcg_temp_free_i32(tmp
);
6390 static int disas_cp14_read(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6392 int crn
= (insn
>> 16) & 0xf;
6393 int crm
= insn
& 0xf;
6394 int op1
= (insn
>> 21) & 7;
6395 int op2
= (insn
>> 5) & 7;
6396 int rt
= (insn
>> 12) & 0xf;
6399 /* Minimal set of debug registers, since we don't support debug */
6400 if (op1
== 0 && crn
== 0 && op2
== 0) {
6403 /* DBGDIDR: just RAZ. In particular this means the
6404 * "debug architecture version" bits will read as
6405 * a reserved value, which should cause Linux to
6406 * not try to use the debug hardware.
6408 tmp
= tcg_const_i32(0);
6409 store_reg(s
, rt
, tmp
);
6413 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6414 * don't implement memory mapped debug components
6416 if (ENABLE_ARCH_7
) {
6417 tmp
= tcg_const_i32(0);
6418 store_reg(s
, rt
, tmp
);
6427 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6428 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6432 tmp
= load_cpu_field(teecr
);
6433 store_reg(s
, rt
, tmp
);
6436 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6438 if (IS_USER(s
) && (env
->teecr
& 1))
6440 tmp
= load_cpu_field(teehbr
);
6441 store_reg(s
, rt
, tmp
);
6448 static int disas_cp14_write(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6450 int crn
= (insn
>> 16) & 0xf;
6451 int crm
= insn
& 0xf;
6452 int op1
= (insn
>> 21) & 7;
6453 int op2
= (insn
>> 5) & 7;
6454 int rt
= (insn
>> 12) & 0xf;
6457 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6458 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
6462 tmp
= load_reg(s
, rt
);
6463 gen_helper_set_teecr(cpu_env
, tmp
);
6464 tcg_temp_free_i32(tmp
);
6467 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
6469 if (IS_USER(s
) && (env
->teecr
& 1))
6471 tmp
= load_reg(s
, rt
);
6472 store_cpu_field(tmp
, teehbr
);
6479 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6483 cpnum
= (insn
>> 8) & 0xf;
6484 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6485 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6491 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6492 return disas_iwmmxt_insn(env
, s
, insn
);
6493 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6494 return disas_dsp_insn(env
, s
, insn
);
6499 return disas_vfp_insn (env
, s
, insn
);
6501 /* Coprocessors 7-15 are architecturally reserved by ARM.
6502 Unfortunately Intel decided to ignore this. */
6503 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
6505 if (insn
& (1 << 20))
6506 return disas_cp14_read(env
, s
, insn
);
6508 return disas_cp14_write(env
, s
, insn
);
6510 return disas_cp15_insn (env
, s
, insn
);
6513 /* Unknown coprocessor. See if the board has hooked it. */
6514 return disas_cp_insn (env
, s
, insn
);
6519 /* Store a 64-bit value to a register pair. Clobbers val. */
6520 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6523 tmp
= tcg_temp_new_i32();
6524 tcg_gen_trunc_i64_i32(tmp
, val
);
6525 store_reg(s
, rlow
, tmp
);
6526 tmp
= tcg_temp_new_i32();
6527 tcg_gen_shri_i64(val
, val
, 32);
6528 tcg_gen_trunc_i64_i32(tmp
, val
);
6529 store_reg(s
, rhigh
, tmp
);
6532 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6533 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6538 /* Load value and extend to 64 bits. */
6539 tmp
= tcg_temp_new_i64();
6540 tmp2
= load_reg(s
, rlow
);
6541 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6542 tcg_temp_free_i32(tmp2
);
6543 tcg_gen_add_i64(val
, val
, tmp
);
6544 tcg_temp_free_i64(tmp
);
6547 /* load and add a 64-bit value from a register pair. */
6548 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6554 /* Load 64-bit value rd:rn. */
6555 tmpl
= load_reg(s
, rlow
);
6556 tmph
= load_reg(s
, rhigh
);
6557 tmp
= tcg_temp_new_i64();
6558 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6559 tcg_temp_free_i32(tmpl
);
6560 tcg_temp_free_i32(tmph
);
6561 tcg_gen_add_i64(val
, val
, tmp
);
6562 tcg_temp_free_i64(tmp
);
6565 /* Set N and Z flags from a 64-bit value. */
6566 static void gen_logicq_cc(TCGv_i64 val
)
6568 TCGv tmp
= tcg_temp_new_i32();
6569 gen_helper_logicq_cc(tmp
, val
);
6571 tcg_temp_free_i32(tmp
);
6574 /* Load/Store exclusive instructions are implemented by remembering
6575 the value/address loaded, and seeing if these are the same
6576 when the store is performed. This should be is sufficient to implement
6577 the architecturally mandated semantics, and avoids having to monitor
6580 In system emulation mode only one CPU will be running at once, so
6581 this sequence is effectively atomic. In user emulation mode we
6582 throw an exception and handle the atomic operation elsewhere. */
6583 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6584 TCGv addr
, int size
)
6590 tmp
= gen_ld8u(addr
, IS_USER(s
));
6593 tmp
= gen_ld16u(addr
, IS_USER(s
));
6597 tmp
= gen_ld32(addr
, IS_USER(s
));
6602 tcg_gen_mov_i32(cpu_exclusive_val
, tmp
);
6603 store_reg(s
, rt
, tmp
);
6605 TCGv tmp2
= tcg_temp_new_i32();
6606 tcg_gen_addi_i32(tmp2
, addr
, 4);
6607 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6608 tcg_temp_free_i32(tmp2
);
6609 tcg_gen_mov_i32(cpu_exclusive_high
, tmp
);
6610 store_reg(s
, rt2
, tmp
);
6612 tcg_gen_mov_i32(cpu_exclusive_addr
, addr
);
6615 static void gen_clrex(DisasContext
*s
)
6617 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6620 #ifdef CONFIG_USER_ONLY
6621 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6622 TCGv addr
, int size
)
6624 tcg_gen_mov_i32(cpu_exclusive_test
, addr
);
6625 tcg_gen_movi_i32(cpu_exclusive_info
,
6626 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
6627 gen_exception_insn(s
, 4, EXCP_STREX
);
6630 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
6631 TCGv addr
, int size
)
6637 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6643 fail_label
= gen_new_label();
6644 done_label
= gen_new_label();
6645 tcg_gen_brcond_i32(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
6648 tmp
= gen_ld8u(addr
, IS_USER(s
));
6651 tmp
= gen_ld16u(addr
, IS_USER(s
));
6655 tmp
= gen_ld32(addr
, IS_USER(s
));
6660 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_val
, fail_label
);
6661 tcg_temp_free_i32(tmp
);
6663 TCGv tmp2
= tcg_temp_new_i32();
6664 tcg_gen_addi_i32(tmp2
, addr
, 4);
6665 tmp
= gen_ld32(tmp2
, IS_USER(s
));
6666 tcg_temp_free_i32(tmp2
);
6667 tcg_gen_brcond_i32(TCG_COND_NE
, tmp
, cpu_exclusive_high
, fail_label
);
6668 tcg_temp_free_i32(tmp
);
6670 tmp
= load_reg(s
, rt
);
6673 gen_st8(tmp
, addr
, IS_USER(s
));
6676 gen_st16(tmp
, addr
, IS_USER(s
));
6680 gen_st32(tmp
, addr
, IS_USER(s
));
6686 tcg_gen_addi_i32(addr
, addr
, 4);
6687 tmp
= load_reg(s
, rt2
);
6688 gen_st32(tmp
, addr
, IS_USER(s
));
6690 tcg_gen_movi_i32(cpu_R
[rd
], 0);
6691 tcg_gen_br(done_label
);
6692 gen_set_label(fail_label
);
6693 tcg_gen_movi_i32(cpu_R
[rd
], 1);
6694 gen_set_label(done_label
);
6695 tcg_gen_movi_i32(cpu_exclusive_addr
, -1);
6699 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
6701 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
6708 insn
= ldl_code(s
->pc
);
6711 /* M variants do not implement ARM mode. */
6716 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6717 * choose to UNDEF. In ARMv5 and above the space is used
6718 * for miscellaneous unconditional instructions.
6722 /* Unconditional instructions. */
6723 if (((insn
>> 25) & 7) == 1) {
6724 /* NEON Data processing. */
6725 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6728 if (disas_neon_data_insn(env
, s
, insn
))
6732 if ((insn
& 0x0f100000) == 0x04000000) {
6733 /* NEON load/store. */
6734 if (!arm_feature(env
, ARM_FEATURE_NEON
))
6737 if (disas_neon_ls_insn(env
, s
, insn
))
6741 if (((insn
& 0x0f30f000) == 0x0510f000) ||
6742 ((insn
& 0x0f30f010) == 0x0710f000)) {
6743 if ((insn
& (1 << 22)) == 0) {
6745 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6749 /* Otherwise PLD; v5TE+ */
6753 if (((insn
& 0x0f70f000) == 0x0450f000) ||
6754 ((insn
& 0x0f70f010) == 0x0650f000)) {
6756 return; /* PLI; V7 */
6758 if (((insn
& 0x0f700000) == 0x04100000) ||
6759 ((insn
& 0x0f700010) == 0x06100000)) {
6760 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
6763 return; /* v7MP: Unallocated memory hint: must NOP */
6766 if ((insn
& 0x0ffffdff) == 0x01010000) {
6769 if (insn
& (1 << 9)) {
6770 /* BE8 mode not implemented. */
6774 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
6775 switch ((insn
>> 4) & 0xf) {
6784 /* We don't emulate caches so these are a no-op. */
6789 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
6795 op1
= (insn
& 0x1f);
6796 addr
= tcg_temp_new_i32();
6797 tmp
= tcg_const_i32(op1
);
6798 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
6799 tcg_temp_free_i32(tmp
);
6800 i
= (insn
>> 23) & 3;
6802 case 0: offset
= -4; break; /* DA */
6803 case 1: offset
= 0; break; /* IA */
6804 case 2: offset
= -8; break; /* DB */
6805 case 3: offset
= 4; break; /* IB */
6809 tcg_gen_addi_i32(addr
, addr
, offset
);
6810 tmp
= load_reg(s
, 14);
6811 gen_st32(tmp
, addr
, 0);
6812 tmp
= load_cpu_field(spsr
);
6813 tcg_gen_addi_i32(addr
, addr
, 4);
6814 gen_st32(tmp
, addr
, 0);
6815 if (insn
& (1 << 21)) {
6816 /* Base writeback. */
6818 case 0: offset
= -8; break;
6819 case 1: offset
= 4; break;
6820 case 2: offset
= -4; break;
6821 case 3: offset
= 0; break;
6825 tcg_gen_addi_i32(addr
, addr
, offset
);
6826 tmp
= tcg_const_i32(op1
);
6827 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
6828 tcg_temp_free_i32(tmp
);
6829 tcg_temp_free_i32(addr
);
6831 tcg_temp_free_i32(addr
);
6834 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
6840 rn
= (insn
>> 16) & 0xf;
6841 addr
= load_reg(s
, rn
);
6842 i
= (insn
>> 23) & 3;
6844 case 0: offset
= -4; break; /* DA */
6845 case 1: offset
= 0; break; /* IA */
6846 case 2: offset
= -8; break; /* DB */
6847 case 3: offset
= 4; break; /* IB */
6851 tcg_gen_addi_i32(addr
, addr
, offset
);
6852 /* Load PC into tmp and CPSR into tmp2. */
6853 tmp
= gen_ld32(addr
, 0);
6854 tcg_gen_addi_i32(addr
, addr
, 4);
6855 tmp2
= gen_ld32(addr
, 0);
6856 if (insn
& (1 << 21)) {
6857 /* Base writeback. */
6859 case 0: offset
= -8; break;
6860 case 1: offset
= 4; break;
6861 case 2: offset
= -4; break;
6862 case 3: offset
= 0; break;
6866 tcg_gen_addi_i32(addr
, addr
, offset
);
6867 store_reg(s
, rn
, addr
);
6869 tcg_temp_free_i32(addr
);
6871 gen_rfe(s
, tmp
, tmp2
);
6873 } else if ((insn
& 0x0e000000) == 0x0a000000) {
6874 /* branch link and change to thumb (blx <offset>) */
6877 val
= (uint32_t)s
->pc
;
6878 tmp
= tcg_temp_new_i32();
6879 tcg_gen_movi_i32(tmp
, val
);
6880 store_reg(s
, 14, tmp
);
6881 /* Sign-extend the 24-bit offset */
6882 offset
= (((int32_t)insn
) << 8) >> 8;
6883 /* offset * 4 + bit24 * 2 + (thumb bit) */
6884 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
6885 /* pipeline offset */
6887 /* protected by ARCH(5); above, near the start of uncond block */
6890 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
6891 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6892 /* iWMMXt register transfer. */
6893 if (env
->cp15
.c15_cpar
& (1 << 1))
6894 if (!disas_iwmmxt_insn(env
, s
, insn
))
6897 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
6898 /* Coprocessor double register transfer. */
6900 } else if ((insn
& 0x0f000010) == 0x0e000010) {
6901 /* Additional coprocessor register transfer. */
6902 } else if ((insn
& 0x0ff10020) == 0x01000000) {
6905 /* cps (privileged) */
6909 if (insn
& (1 << 19)) {
6910 if (insn
& (1 << 8))
6912 if (insn
& (1 << 7))
6914 if (insn
& (1 << 6))
6916 if (insn
& (1 << 18))
6919 if (insn
& (1 << 17)) {
6921 val
|= (insn
& 0x1f);
6924 gen_set_psr_im(s
, mask
, 0, val
);
6931 /* if not always execute, we generate a conditional jump to
6933 s
->condlabel
= gen_new_label();
6934 gen_test_cc(cond
^ 1, s
->condlabel
);
6937 if ((insn
& 0x0f900000) == 0x03000000) {
6938 if ((insn
& (1 << 21)) == 0) {
6940 rd
= (insn
>> 12) & 0xf;
6941 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
6942 if ((insn
& (1 << 22)) == 0) {
6944 tmp
= tcg_temp_new_i32();
6945 tcg_gen_movi_i32(tmp
, val
);
6948 tmp
= load_reg(s
, rd
);
6949 tcg_gen_ext16u_i32(tmp
, tmp
);
6950 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
6952 store_reg(s
, rd
, tmp
);
6954 if (((insn
>> 12) & 0xf) != 0xf)
6956 if (((insn
>> 16) & 0xf) == 0) {
6957 gen_nop_hint(s
, insn
& 0xff);
6959 /* CPSR = immediate */
6961 shift
= ((insn
>> 8) & 0xf) * 2;
6963 val
= (val
>> shift
) | (val
<< (32 - shift
));
6964 i
= ((insn
& (1 << 22)) != 0);
6965 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
6969 } else if ((insn
& 0x0f900000) == 0x01000000
6970 && (insn
& 0x00000090) != 0x00000090) {
6971 /* miscellaneous instructions */
6972 op1
= (insn
>> 21) & 3;
6973 sh
= (insn
>> 4) & 0xf;
6976 case 0x0: /* move program status register */
6979 tmp
= load_reg(s
, rm
);
6980 i
= ((op1
& 2) != 0);
6981 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
6985 rd
= (insn
>> 12) & 0xf;
6989 tmp
= load_cpu_field(spsr
);
6991 tmp
= tcg_temp_new_i32();
6992 gen_helper_cpsr_read(tmp
);
6994 store_reg(s
, rd
, tmp
);
6999 /* branch/exchange thumb (bx). */
7001 tmp
= load_reg(s
, rm
);
7003 } else if (op1
== 3) {
7006 rd
= (insn
>> 12) & 0xf;
7007 tmp
= load_reg(s
, rm
);
7008 gen_helper_clz(tmp
, tmp
);
7009 store_reg(s
, rd
, tmp
);
7017 /* Trivial implementation equivalent to bx. */
7018 tmp
= load_reg(s
, rm
);
7029 /* branch link/exchange thumb (blx) */
7030 tmp
= load_reg(s
, rm
);
7031 tmp2
= tcg_temp_new_i32();
7032 tcg_gen_movi_i32(tmp2
, s
->pc
);
7033 store_reg(s
, 14, tmp2
);
7036 case 0x5: /* saturating add/subtract */
7038 rd
= (insn
>> 12) & 0xf;
7039 rn
= (insn
>> 16) & 0xf;
7040 tmp
= load_reg(s
, rm
);
7041 tmp2
= load_reg(s
, rn
);
7043 gen_helper_double_saturate(tmp2
, tmp2
);
7045 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
7047 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7048 tcg_temp_free_i32(tmp2
);
7049 store_reg(s
, rd
, tmp
);
7052 /* SMC instruction (op1 == 3)
7053 and undefined instructions (op1 == 0 || op1 == 2)
7060 gen_exception_insn(s
, 4, EXCP_BKPT
);
7062 case 0x8: /* signed multiply */
7067 rs
= (insn
>> 8) & 0xf;
7068 rn
= (insn
>> 12) & 0xf;
7069 rd
= (insn
>> 16) & 0xf;
7071 /* (32 * 16) >> 16 */
7072 tmp
= load_reg(s
, rm
);
7073 tmp2
= load_reg(s
, rs
);
7075 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7078 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7079 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7080 tmp
= tcg_temp_new_i32();
7081 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7082 tcg_temp_free_i64(tmp64
);
7083 if ((sh
& 2) == 0) {
7084 tmp2
= load_reg(s
, rn
);
7085 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7086 tcg_temp_free_i32(tmp2
);
7088 store_reg(s
, rd
, tmp
);
7091 tmp
= load_reg(s
, rm
);
7092 tmp2
= load_reg(s
, rs
);
7093 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
7094 tcg_temp_free_i32(tmp2
);
7096 tmp64
= tcg_temp_new_i64();
7097 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7098 tcg_temp_free_i32(tmp
);
7099 gen_addq(s
, tmp64
, rn
, rd
);
7100 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7101 tcg_temp_free_i64(tmp64
);
7104 tmp2
= load_reg(s
, rn
);
7105 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7106 tcg_temp_free_i32(tmp2
);
7108 store_reg(s
, rd
, tmp
);
7115 } else if (((insn
& 0x0e000000) == 0 &&
7116 (insn
& 0x00000090) != 0x90) ||
7117 ((insn
& 0x0e000000) == (1 << 25))) {
7118 int set_cc
, logic_cc
, shiftop
;
7120 op1
= (insn
>> 21) & 0xf;
7121 set_cc
= (insn
>> 20) & 1;
7122 logic_cc
= table_logic_cc
[op1
] & set_cc
;
7124 /* data processing instruction */
7125 if (insn
& (1 << 25)) {
7126 /* immediate operand */
7128 shift
= ((insn
>> 8) & 0xf) * 2;
7130 val
= (val
>> shift
) | (val
<< (32 - shift
));
7132 tmp2
= tcg_temp_new_i32();
7133 tcg_gen_movi_i32(tmp2
, val
);
7134 if (logic_cc
&& shift
) {
7135 gen_set_CF_bit31(tmp2
);
7140 tmp2
= load_reg(s
, rm
);
7141 shiftop
= (insn
>> 5) & 3;
7142 if (!(insn
& (1 << 4))) {
7143 shift
= (insn
>> 7) & 0x1f;
7144 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7146 rs
= (insn
>> 8) & 0xf;
7147 tmp
= load_reg(s
, rs
);
7148 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
7151 if (op1
!= 0x0f && op1
!= 0x0d) {
7152 rn
= (insn
>> 16) & 0xf;
7153 tmp
= load_reg(s
, rn
);
7157 rd
= (insn
>> 12) & 0xf;
7160 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7164 store_reg_bx(env
, s
, rd
, tmp
);
7167 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7171 store_reg_bx(env
, s
, rd
, tmp
);
7174 if (set_cc
&& rd
== 15) {
7175 /* SUBS r15, ... is used for exception return. */
7179 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7180 gen_exception_return(s
, tmp
);
7183 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7185 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7187 store_reg_bx(env
, s
, rd
, tmp
);
7192 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
7194 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7196 store_reg_bx(env
, s
, rd
, tmp
);
7200 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7202 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7204 store_reg_bx(env
, s
, rd
, tmp
);
7208 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
7210 gen_add_carry(tmp
, tmp
, tmp2
);
7212 store_reg_bx(env
, s
, rd
, tmp
);
7216 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
7218 gen_sub_carry(tmp
, tmp
, tmp2
);
7220 store_reg_bx(env
, s
, rd
, tmp
);
7224 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
7226 gen_sub_carry(tmp
, tmp2
, tmp
);
7228 store_reg_bx(env
, s
, rd
, tmp
);
7232 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7235 tcg_temp_free_i32(tmp
);
7239 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7242 tcg_temp_free_i32(tmp
);
7246 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
7248 tcg_temp_free_i32(tmp
);
7252 gen_helper_add_cc(tmp
, tmp
, tmp2
);
7254 tcg_temp_free_i32(tmp
);
7257 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7261 store_reg_bx(env
, s
, rd
, tmp
);
7264 if (logic_cc
&& rd
== 15) {
7265 /* MOVS r15, ... is used for exception return. */
7269 gen_exception_return(s
, tmp2
);
7274 store_reg_bx(env
, s
, rd
, tmp2
);
7278 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7282 store_reg_bx(env
, s
, rd
, tmp
);
7286 tcg_gen_not_i32(tmp2
, tmp2
);
7290 store_reg_bx(env
, s
, rd
, tmp2
);
7293 if (op1
!= 0x0f && op1
!= 0x0d) {
7294 tcg_temp_free_i32(tmp2
);
7297 /* other instructions */
7298 op1
= (insn
>> 24) & 0xf;
7302 /* multiplies, extra load/stores */
7303 sh
= (insn
>> 5) & 3;
7306 rd
= (insn
>> 16) & 0xf;
7307 rn
= (insn
>> 12) & 0xf;
7308 rs
= (insn
>> 8) & 0xf;
7310 op1
= (insn
>> 20) & 0xf;
7312 case 0: case 1: case 2: case 3: case 6:
7314 tmp
= load_reg(s
, rs
);
7315 tmp2
= load_reg(s
, rm
);
7316 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7317 tcg_temp_free_i32(tmp2
);
7318 if (insn
& (1 << 22)) {
7319 /* Subtract (mls) */
7321 tmp2
= load_reg(s
, rn
);
7322 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7323 tcg_temp_free_i32(tmp2
);
7324 } else if (insn
& (1 << 21)) {
7326 tmp2
= load_reg(s
, rn
);
7327 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7328 tcg_temp_free_i32(tmp2
);
7330 if (insn
& (1 << 20))
7332 store_reg(s
, rd
, tmp
);
7335 /* 64 bit mul double accumulate (UMAAL) */
7337 tmp
= load_reg(s
, rs
);
7338 tmp2
= load_reg(s
, rm
);
7339 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7340 gen_addq_lo(s
, tmp64
, rn
);
7341 gen_addq_lo(s
, tmp64
, rd
);
7342 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7343 tcg_temp_free_i64(tmp64
);
7345 case 8: case 9: case 10: case 11:
7346 case 12: case 13: case 14: case 15:
7347 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7348 tmp
= load_reg(s
, rs
);
7349 tmp2
= load_reg(s
, rm
);
7350 if (insn
& (1 << 22)) {
7351 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7353 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7355 if (insn
& (1 << 21)) { /* mult accumulate */
7356 gen_addq(s
, tmp64
, rn
, rd
);
7358 if (insn
& (1 << 20)) {
7359 gen_logicq_cc(tmp64
);
7361 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7362 tcg_temp_free_i64(tmp64
);
7368 rn
= (insn
>> 16) & 0xf;
7369 rd
= (insn
>> 12) & 0xf;
7370 if (insn
& (1 << 23)) {
7371 /* load/store exclusive */
7372 op1
= (insn
>> 21) & 0x3;
7377 addr
= tcg_temp_local_new_i32();
7378 load_reg_var(s
, addr
, rn
);
7379 if (insn
& (1 << 20)) {
7382 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7384 case 1: /* ldrexd */
7385 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7387 case 2: /* ldrexb */
7388 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7390 case 3: /* ldrexh */
7391 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7400 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7402 case 1: /* strexd */
7403 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7405 case 2: /* strexb */
7406 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7408 case 3: /* strexh */
7409 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7415 tcg_temp_free(addr
);
7417 /* SWP instruction */
7420 /* ??? This is not really atomic. However we know
7421 we never have multiple CPUs running in parallel,
7422 so it is good enough. */
7423 addr
= load_reg(s
, rn
);
7424 tmp
= load_reg(s
, rm
);
7425 if (insn
& (1 << 22)) {
7426 tmp2
= gen_ld8u(addr
, IS_USER(s
));
7427 gen_st8(tmp
, addr
, IS_USER(s
));
7429 tmp2
= gen_ld32(addr
, IS_USER(s
));
7430 gen_st32(tmp
, addr
, IS_USER(s
));
7432 tcg_temp_free_i32(addr
);
7433 store_reg(s
, rd
, tmp2
);
7439 /* Misc load/store */
7440 rn
= (insn
>> 16) & 0xf;
7441 rd
= (insn
>> 12) & 0xf;
7442 addr
= load_reg(s
, rn
);
7443 if (insn
& (1 << 24))
7444 gen_add_datah_offset(s
, insn
, 0, addr
);
7446 if (insn
& (1 << 20)) {
7450 tmp
= gen_ld16u(addr
, IS_USER(s
));
7453 tmp
= gen_ld8s(addr
, IS_USER(s
));
7457 tmp
= gen_ld16s(addr
, IS_USER(s
));
7461 } else if (sh
& 2) {
7466 tmp
= load_reg(s
, rd
);
7467 gen_st32(tmp
, addr
, IS_USER(s
));
7468 tcg_gen_addi_i32(addr
, addr
, 4);
7469 tmp
= load_reg(s
, rd
+ 1);
7470 gen_st32(tmp
, addr
, IS_USER(s
));
7474 tmp
= gen_ld32(addr
, IS_USER(s
));
7475 store_reg(s
, rd
, tmp
);
7476 tcg_gen_addi_i32(addr
, addr
, 4);
7477 tmp
= gen_ld32(addr
, IS_USER(s
));
7481 address_offset
= -4;
7484 tmp
= load_reg(s
, rd
);
7485 gen_st16(tmp
, addr
, IS_USER(s
));
7488 /* Perform base writeback before the loaded value to
7489 ensure correct behavior with overlapping index registers.
7490 ldrd with base writeback is is undefined if the
7491 destination and index registers overlap. */
7492 if (!(insn
& (1 << 24))) {
7493 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
7494 store_reg(s
, rn
, addr
);
7495 } else if (insn
& (1 << 21)) {
7497 tcg_gen_addi_i32(addr
, addr
, address_offset
);
7498 store_reg(s
, rn
, addr
);
7500 tcg_temp_free_i32(addr
);
7503 /* Complete the load. */
7504 store_reg(s
, rd
, tmp
);
7513 if (insn
& (1 << 4)) {
7515 /* Armv6 Media instructions. */
7517 rn
= (insn
>> 16) & 0xf;
7518 rd
= (insn
>> 12) & 0xf;
7519 rs
= (insn
>> 8) & 0xf;
7520 switch ((insn
>> 23) & 3) {
7521 case 0: /* Parallel add/subtract. */
7522 op1
= (insn
>> 20) & 7;
7523 tmp
= load_reg(s
, rn
);
7524 tmp2
= load_reg(s
, rm
);
7525 sh
= (insn
>> 5) & 7;
7526 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
7528 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
7529 tcg_temp_free_i32(tmp2
);
7530 store_reg(s
, rd
, tmp
);
7533 if ((insn
& 0x00700020) == 0) {
7534 /* Halfword pack. */
7535 tmp
= load_reg(s
, rn
);
7536 tmp2
= load_reg(s
, rm
);
7537 shift
= (insn
>> 7) & 0x1f;
7538 if (insn
& (1 << 6)) {
7542 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
7543 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
7544 tcg_gen_ext16u_i32(tmp2
, tmp2
);
7548 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
7549 tcg_gen_ext16u_i32(tmp
, tmp
);
7550 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
7552 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7553 tcg_temp_free_i32(tmp2
);
7554 store_reg(s
, rd
, tmp
);
7555 } else if ((insn
& 0x00200020) == 0x00200000) {
7557 tmp
= load_reg(s
, rm
);
7558 shift
= (insn
>> 7) & 0x1f;
7559 if (insn
& (1 << 6)) {
7562 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7564 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7566 sh
= (insn
>> 16) & 0x1f;
7567 tmp2
= tcg_const_i32(sh
);
7568 if (insn
& (1 << 22))
7569 gen_helper_usat(tmp
, tmp
, tmp2
);
7571 gen_helper_ssat(tmp
, tmp
, tmp2
);
7572 tcg_temp_free_i32(tmp2
);
7573 store_reg(s
, rd
, tmp
);
7574 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
7576 tmp
= load_reg(s
, rm
);
7577 sh
= (insn
>> 16) & 0x1f;
7578 tmp2
= tcg_const_i32(sh
);
7579 if (insn
& (1 << 22))
7580 gen_helper_usat16(tmp
, tmp
, tmp2
);
7582 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7583 tcg_temp_free_i32(tmp2
);
7584 store_reg(s
, rd
, tmp
);
7585 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
7587 tmp
= load_reg(s
, rn
);
7588 tmp2
= load_reg(s
, rm
);
7589 tmp3
= tcg_temp_new_i32();
7590 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
7591 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7592 tcg_temp_free_i32(tmp3
);
7593 tcg_temp_free_i32(tmp2
);
7594 store_reg(s
, rd
, tmp
);
7595 } else if ((insn
& 0x000003e0) == 0x00000060) {
7596 tmp
= load_reg(s
, rm
);
7597 shift
= (insn
>> 10) & 3;
7598 /* ??? In many cases it's not necessary to do a
7599 rotate, a shift is sufficient. */
7601 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
7602 op1
= (insn
>> 20) & 7;
7604 case 0: gen_sxtb16(tmp
); break;
7605 case 2: gen_sxtb(tmp
); break;
7606 case 3: gen_sxth(tmp
); break;
7607 case 4: gen_uxtb16(tmp
); break;
7608 case 6: gen_uxtb(tmp
); break;
7609 case 7: gen_uxth(tmp
); break;
7610 default: goto illegal_op
;
7613 tmp2
= load_reg(s
, rn
);
7614 if ((op1
& 3) == 0) {
7615 gen_add16(tmp
, tmp2
);
7617 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7618 tcg_temp_free_i32(tmp2
);
7621 store_reg(s
, rd
, tmp
);
7622 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
7624 tmp
= load_reg(s
, rm
);
7625 if (insn
& (1 << 22)) {
7626 if (insn
& (1 << 7)) {
7630 gen_helper_rbit(tmp
, tmp
);
7633 if (insn
& (1 << 7))
7636 tcg_gen_bswap32_i32(tmp
, tmp
);
7638 store_reg(s
, rd
, tmp
);
7643 case 2: /* Multiplies (Type 3). */
7644 switch ((insn
>> 20) & 0x7) {
7646 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
7647 /* op2 not 00x or 11x : UNDEF */
7650 /* Signed multiply most significant [accumulate].
7651 (SMMUL, SMMLA, SMMLS) */
7652 tmp
= load_reg(s
, rm
);
7653 tmp2
= load_reg(s
, rs
);
7654 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7657 tmp
= load_reg(s
, rd
);
7658 if (insn
& (1 << 6)) {
7659 tmp64
= gen_subq_msw(tmp64
, tmp
);
7661 tmp64
= gen_addq_msw(tmp64
, tmp
);
7664 if (insn
& (1 << 5)) {
7665 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
7667 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7668 tmp
= tcg_temp_new_i32();
7669 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7670 tcg_temp_free_i64(tmp64
);
7671 store_reg(s
, rn
, tmp
);
7675 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7676 if (insn
& (1 << 7)) {
7679 tmp
= load_reg(s
, rm
);
7680 tmp2
= load_reg(s
, rs
);
7681 if (insn
& (1 << 5))
7682 gen_swap_half(tmp2
);
7683 gen_smul_dual(tmp
, tmp2
);
7684 if (insn
& (1 << 6)) {
7685 /* This subtraction cannot overflow. */
7686 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7688 /* This addition cannot overflow 32 bits;
7689 * however it may overflow considered as a signed
7690 * operation, in which case we must set the Q flag.
7692 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7694 tcg_temp_free_i32(tmp2
);
7695 if (insn
& (1 << 22)) {
7696 /* smlald, smlsld */
7697 tmp64
= tcg_temp_new_i64();
7698 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7699 tcg_temp_free_i32(tmp
);
7700 gen_addq(s
, tmp64
, rd
, rn
);
7701 gen_storeq_reg(s
, rd
, rn
, tmp64
);
7702 tcg_temp_free_i64(tmp64
);
7704 /* smuad, smusd, smlad, smlsd */
7707 tmp2
= load_reg(s
, rd
);
7708 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7709 tcg_temp_free_i32(tmp2
);
7711 store_reg(s
, rn
, tmp
);
7717 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
7720 if (((insn
>> 5) & 7) || (rd
!= 15)) {
7723 tmp
= load_reg(s
, rm
);
7724 tmp2
= load_reg(s
, rs
);
7725 if (insn
& (1 << 21)) {
7726 gen_helper_udiv(tmp
, tmp
, tmp2
);
7728 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7730 tcg_temp_free_i32(tmp2
);
7731 store_reg(s
, rn
, tmp
);
7738 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
7740 case 0: /* Unsigned sum of absolute differences. */
7742 tmp
= load_reg(s
, rm
);
7743 tmp2
= load_reg(s
, rs
);
7744 gen_helper_usad8(tmp
, tmp
, tmp2
);
7745 tcg_temp_free_i32(tmp2
);
7747 tmp2
= load_reg(s
, rd
);
7748 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7749 tcg_temp_free_i32(tmp2
);
7751 store_reg(s
, rn
, tmp
);
7753 case 0x20: case 0x24: case 0x28: case 0x2c:
7754 /* Bitfield insert/clear. */
7756 shift
= (insn
>> 7) & 0x1f;
7757 i
= (insn
>> 16) & 0x1f;
7760 tmp
= tcg_temp_new_i32();
7761 tcg_gen_movi_i32(tmp
, 0);
7763 tmp
= load_reg(s
, rm
);
7766 tmp2
= load_reg(s
, rd
);
7767 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
7768 tcg_temp_free_i32(tmp2
);
7770 store_reg(s
, rd
, tmp
);
7772 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7773 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7775 tmp
= load_reg(s
, rm
);
7776 shift
= (insn
>> 7) & 0x1f;
7777 i
= ((insn
>> 16) & 0x1f) + 1;
7782 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
7784 gen_sbfx(tmp
, shift
, i
);
7787 store_reg(s
, rd
, tmp
);
7797 /* Check for undefined extension instructions
7798 * per the ARM Bible IE:
7799 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7801 sh
= (0xf << 20) | (0xf << 4);
7802 if (op1
== 0x7 && ((insn
& sh
) == sh
))
7806 /* load/store byte/word */
7807 rn
= (insn
>> 16) & 0xf;
7808 rd
= (insn
>> 12) & 0xf;
7809 tmp2
= load_reg(s
, rn
);
7810 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
7811 if (insn
& (1 << 24))
7812 gen_add_data_offset(s
, insn
, tmp2
);
7813 if (insn
& (1 << 20)) {
7815 if (insn
& (1 << 22)) {
7816 tmp
= gen_ld8u(tmp2
, i
);
7818 tmp
= gen_ld32(tmp2
, i
);
7822 tmp
= load_reg(s
, rd
);
7823 if (insn
& (1 << 22))
7824 gen_st8(tmp
, tmp2
, i
);
7826 gen_st32(tmp
, tmp2
, i
);
7828 if (!(insn
& (1 << 24))) {
7829 gen_add_data_offset(s
, insn
, tmp2
);
7830 store_reg(s
, rn
, tmp2
);
7831 } else if (insn
& (1 << 21)) {
7832 store_reg(s
, rn
, tmp2
);
7834 tcg_temp_free_i32(tmp2
);
7836 if (insn
& (1 << 20)) {
7837 /* Complete the load. */
7838 store_reg_from_load(env
, s
, rd
, tmp
);
7844 int j
, n
, user
, loaded_base
;
7846 /* load/store multiple words */
7847 /* XXX: store correct base if write back */
7849 if (insn
& (1 << 22)) {
7851 goto illegal_op
; /* only usable in supervisor mode */
7853 if ((insn
& (1 << 15)) == 0)
7856 rn
= (insn
>> 16) & 0xf;
7857 addr
= load_reg(s
, rn
);
7859 /* compute total size */
7861 TCGV_UNUSED(loaded_var
);
7864 if (insn
& (1 << i
))
7867 /* XXX: test invalid n == 0 case ? */
7868 if (insn
& (1 << 23)) {
7869 if (insn
& (1 << 24)) {
7871 tcg_gen_addi_i32(addr
, addr
, 4);
7873 /* post increment */
7876 if (insn
& (1 << 24)) {
7878 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7880 /* post decrement */
7882 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7887 if (insn
& (1 << i
)) {
7888 if (insn
& (1 << 20)) {
7890 tmp
= gen_ld32(addr
, IS_USER(s
));
7892 tmp2
= tcg_const_i32(i
);
7893 gen_helper_set_user_reg(tmp2
, tmp
);
7894 tcg_temp_free_i32(tmp2
);
7895 tcg_temp_free_i32(tmp
);
7896 } else if (i
== rn
) {
7900 store_reg_from_load(env
, s
, i
, tmp
);
7905 /* special case: r15 = PC + 8 */
7906 val
= (long)s
->pc
+ 4;
7907 tmp
= tcg_temp_new_i32();
7908 tcg_gen_movi_i32(tmp
, val
);
7910 tmp
= tcg_temp_new_i32();
7911 tmp2
= tcg_const_i32(i
);
7912 gen_helper_get_user_reg(tmp
, tmp2
);
7913 tcg_temp_free_i32(tmp2
);
7915 tmp
= load_reg(s
, i
);
7917 gen_st32(tmp
, addr
, IS_USER(s
));
7920 /* no need to add after the last transfer */
7922 tcg_gen_addi_i32(addr
, addr
, 4);
7925 if (insn
& (1 << 21)) {
7927 if (insn
& (1 << 23)) {
7928 if (insn
& (1 << 24)) {
7931 /* post increment */
7932 tcg_gen_addi_i32(addr
, addr
, 4);
7935 if (insn
& (1 << 24)) {
7938 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7940 /* post decrement */
7941 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7944 store_reg(s
, rn
, addr
);
7946 tcg_temp_free_i32(addr
);
7949 store_reg(s
, rn
, loaded_var
);
7951 if ((insn
& (1 << 22)) && !user
) {
7952 /* Restore CPSR from SPSR. */
7953 tmp
= load_cpu_field(spsr
);
7954 gen_set_cpsr(tmp
, 0xffffffff);
7955 tcg_temp_free_i32(tmp
);
7956 s
->is_jmp
= DISAS_UPDATE
;
7965 /* branch (and link) */
7966 val
= (int32_t)s
->pc
;
7967 if (insn
& (1 << 24)) {
7968 tmp
= tcg_temp_new_i32();
7969 tcg_gen_movi_i32(tmp
, val
);
7970 store_reg(s
, 14, tmp
);
7972 offset
= (((int32_t)insn
<< 8) >> 8);
7973 val
+= (offset
<< 2) + 4;
7981 if (disas_coproc_insn(env
, s
, insn
))
7986 gen_set_pc_im(s
->pc
);
7987 s
->is_jmp
= DISAS_SWI
;
7991 gen_exception_insn(s
, 4, EXCP_UDEF
);
7997 /* Return true if this is a Thumb-2 logical op. */
7999 thumb2_logic_op(int op
)
8004 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8005 then set condition code flags based on the result of the operation.
8006 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8007 to the high bit of T1.
8008 Returns zero if the opcode is valid. */
8011 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
, TCGv t0
, TCGv t1
)
8018 tcg_gen_and_i32(t0
, t0
, t1
);
8022 tcg_gen_andc_i32(t0
, t0
, t1
);
8026 tcg_gen_or_i32(t0
, t0
, t1
);
8030 tcg_gen_orc_i32(t0
, t0
, t1
);
8034 tcg_gen_xor_i32(t0
, t0
, t1
);
8039 gen_helper_add_cc(t0
, t0
, t1
);
8041 tcg_gen_add_i32(t0
, t0
, t1
);
8045 gen_helper_adc_cc(t0
, t0
, t1
);
8051 gen_helper_sbc_cc(t0
, t0
, t1
);
8053 gen_sub_carry(t0
, t0
, t1
);
8057 gen_helper_sub_cc(t0
, t0
, t1
);
8059 tcg_gen_sub_i32(t0
, t0
, t1
);
8063 gen_helper_sub_cc(t0
, t1
, t0
);
8065 tcg_gen_sub_i32(t0
, t1
, t0
);
8067 default: /* 5, 6, 7, 9, 12, 15. */
8073 gen_set_CF_bit31(t1
);
8078 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8080 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
8082 uint32_t insn
, imm
, shift
, offset
;
8083 uint32_t rd
, rn
, rm
, rs
;
8094 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
8095 || arm_feature (env
, ARM_FEATURE_M
))) {
8096 /* Thumb-1 cores may need to treat bl and blx as a pair of
8097 16-bit instructions to get correct prefetch abort behavior. */
8099 if ((insn
& (1 << 12)) == 0) {
8101 /* Second half of blx. */
8102 offset
= ((insn
& 0x7ff) << 1);
8103 tmp
= load_reg(s
, 14);
8104 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8105 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
8107 tmp2
= tcg_temp_new_i32();
8108 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8109 store_reg(s
, 14, tmp2
);
8113 if (insn
& (1 << 11)) {
8114 /* Second half of bl. */
8115 offset
= ((insn
& 0x7ff) << 1) | 1;
8116 tmp
= load_reg(s
, 14);
8117 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8119 tmp2
= tcg_temp_new_i32();
8120 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8121 store_reg(s
, 14, tmp2
);
8125 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
8126 /* Instruction spans a page boundary. Implement it as two
8127 16-bit instructions in case the second half causes an
8129 offset
= ((int32_t)insn
<< 21) >> 9;
8130 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
8133 /* Fall through to 32-bit decode. */
8136 insn
= lduw_code(s
->pc
);
8138 insn
|= (uint32_t)insn_hw1
<< 16;
8140 if ((insn
& 0xf800e800) != 0xf000e800) {
8144 rn
= (insn
>> 16) & 0xf;
8145 rs
= (insn
>> 12) & 0xf;
8146 rd
= (insn
>> 8) & 0xf;
8148 switch ((insn
>> 25) & 0xf) {
8149 case 0: case 1: case 2: case 3:
8150 /* 16-bit instructions. Should never happen. */
8153 if (insn
& (1 << 22)) {
8154 /* Other load/store, table branch. */
8155 if (insn
& 0x01200000) {
8156 /* Load/store doubleword. */
8158 addr
= tcg_temp_new_i32();
8159 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
8161 addr
= load_reg(s
, rn
);
8163 offset
= (insn
& 0xff) * 4;
8164 if ((insn
& (1 << 23)) == 0)
8166 if (insn
& (1 << 24)) {
8167 tcg_gen_addi_i32(addr
, addr
, offset
);
8170 if (insn
& (1 << 20)) {
8172 tmp
= gen_ld32(addr
, IS_USER(s
));
8173 store_reg(s
, rs
, tmp
);
8174 tcg_gen_addi_i32(addr
, addr
, 4);
8175 tmp
= gen_ld32(addr
, IS_USER(s
));
8176 store_reg(s
, rd
, tmp
);
8179 tmp
= load_reg(s
, rs
);
8180 gen_st32(tmp
, addr
, IS_USER(s
));
8181 tcg_gen_addi_i32(addr
, addr
, 4);
8182 tmp
= load_reg(s
, rd
);
8183 gen_st32(tmp
, addr
, IS_USER(s
));
8185 if (insn
& (1 << 21)) {
8186 /* Base writeback. */
8189 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8190 store_reg(s
, rn
, addr
);
8192 tcg_temp_free_i32(addr
);
8194 } else if ((insn
& (1 << 23)) == 0) {
8195 /* Load/store exclusive word. */
8196 addr
= tcg_temp_local_new();
8197 load_reg_var(s
, addr
, rn
);
8198 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8199 if (insn
& (1 << 20)) {
8200 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8202 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8204 tcg_temp_free(addr
);
8205 } else if ((insn
& (1 << 6)) == 0) {
8208 addr
= tcg_temp_new_i32();
8209 tcg_gen_movi_i32(addr
, s
->pc
);
8211 addr
= load_reg(s
, rn
);
8213 tmp
= load_reg(s
, rm
);
8214 tcg_gen_add_i32(addr
, addr
, tmp
);
8215 if (insn
& (1 << 4)) {
8217 tcg_gen_add_i32(addr
, addr
, tmp
);
8218 tcg_temp_free_i32(tmp
);
8219 tmp
= gen_ld16u(addr
, IS_USER(s
));
8221 tcg_temp_free_i32(tmp
);
8222 tmp
= gen_ld8u(addr
, IS_USER(s
));
8224 tcg_temp_free_i32(addr
);
8225 tcg_gen_shli_i32(tmp
, tmp
, 1);
8226 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8227 store_reg(s
, 15, tmp
);
8229 /* Load/store exclusive byte/halfword/doubleword. */
8231 op
= (insn
>> 4) & 0x3;
8235 addr
= tcg_temp_local_new();
8236 load_reg_var(s
, addr
, rn
);
8237 if (insn
& (1 << 20)) {
8238 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8240 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8242 tcg_temp_free(addr
);
8245 /* Load/store multiple, RFE, SRS. */
8246 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8247 /* Not available in user mode. */
8250 if (insn
& (1 << 20)) {
8252 addr
= load_reg(s
, rn
);
8253 if ((insn
& (1 << 24)) == 0)
8254 tcg_gen_addi_i32(addr
, addr
, -8);
8255 /* Load PC into tmp and CPSR into tmp2. */
8256 tmp
= gen_ld32(addr
, 0);
8257 tcg_gen_addi_i32(addr
, addr
, 4);
8258 tmp2
= gen_ld32(addr
, 0);
8259 if (insn
& (1 << 21)) {
8260 /* Base writeback. */
8261 if (insn
& (1 << 24)) {
8262 tcg_gen_addi_i32(addr
, addr
, 4);
8264 tcg_gen_addi_i32(addr
, addr
, -4);
8266 store_reg(s
, rn
, addr
);
8268 tcg_temp_free_i32(addr
);
8270 gen_rfe(s
, tmp
, tmp2
);
8274 addr
= tcg_temp_new_i32();
8275 tmp
= tcg_const_i32(op
);
8276 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8277 tcg_temp_free_i32(tmp
);
8278 if ((insn
& (1 << 24)) == 0) {
8279 tcg_gen_addi_i32(addr
, addr
, -8);
8281 tmp
= load_reg(s
, 14);
8282 gen_st32(tmp
, addr
, 0);
8283 tcg_gen_addi_i32(addr
, addr
, 4);
8284 tmp
= tcg_temp_new_i32();
8285 gen_helper_cpsr_read(tmp
);
8286 gen_st32(tmp
, addr
, 0);
8287 if (insn
& (1 << 21)) {
8288 if ((insn
& (1 << 24)) == 0) {
8289 tcg_gen_addi_i32(addr
, addr
, -4);
8291 tcg_gen_addi_i32(addr
, addr
, 4);
8293 tmp
= tcg_const_i32(op
);
8294 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8295 tcg_temp_free_i32(tmp
);
8297 tcg_temp_free_i32(addr
);
8301 int i
, loaded_base
= 0;
8303 /* Load/store multiple. */
8304 addr
= load_reg(s
, rn
);
8306 for (i
= 0; i
< 16; i
++) {
8307 if (insn
& (1 << i
))
8310 if (insn
& (1 << 24)) {
8311 tcg_gen_addi_i32(addr
, addr
, -offset
);
8314 TCGV_UNUSED(loaded_var
);
8315 for (i
= 0; i
< 16; i
++) {
8316 if ((insn
& (1 << i
)) == 0)
8318 if (insn
& (1 << 20)) {
8320 tmp
= gen_ld32(addr
, IS_USER(s
));
8323 } else if (i
== rn
) {
8327 store_reg(s
, i
, tmp
);
8331 tmp
= load_reg(s
, i
);
8332 gen_st32(tmp
, addr
, IS_USER(s
));
8334 tcg_gen_addi_i32(addr
, addr
, 4);
8337 store_reg(s
, rn
, loaded_var
);
8339 if (insn
& (1 << 21)) {
8340 /* Base register writeback. */
8341 if (insn
& (1 << 24)) {
8342 tcg_gen_addi_i32(addr
, addr
, -offset
);
8344 /* Fault if writeback register is in register list. */
8345 if (insn
& (1 << rn
))
8347 store_reg(s
, rn
, addr
);
8349 tcg_temp_free_i32(addr
);
8356 op
= (insn
>> 21) & 0xf;
8358 /* Halfword pack. */
8359 tmp
= load_reg(s
, rn
);
8360 tmp2
= load_reg(s
, rm
);
8361 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8362 if (insn
& (1 << 5)) {
8366 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8367 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8368 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8372 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8373 tcg_gen_ext16u_i32(tmp
, tmp
);
8374 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8376 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8377 tcg_temp_free_i32(tmp2
);
8378 store_reg(s
, rd
, tmp
);
8380 /* Data processing register constant shift. */
8382 tmp
= tcg_temp_new_i32();
8383 tcg_gen_movi_i32(tmp
, 0);
8385 tmp
= load_reg(s
, rn
);
8387 tmp2
= load_reg(s
, rm
);
8389 shiftop
= (insn
>> 4) & 3;
8390 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8391 conds
= (insn
& (1 << 20)) != 0;
8392 logic_cc
= (conds
&& thumb2_logic_op(op
));
8393 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8394 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
8396 tcg_temp_free_i32(tmp2
);
8398 store_reg(s
, rd
, tmp
);
8400 tcg_temp_free_i32(tmp
);
8404 case 13: /* Misc data processing. */
8405 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
8406 if (op
< 4 && (insn
& 0xf000) != 0xf000)
8409 case 0: /* Register controlled shift. */
8410 tmp
= load_reg(s
, rn
);
8411 tmp2
= load_reg(s
, rm
);
8412 if ((insn
& 0x70) != 0)
8414 op
= (insn
>> 21) & 3;
8415 logic_cc
= (insn
& (1 << 20)) != 0;
8416 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
8419 store_reg_bx(env
, s
, rd
, tmp
);
8421 case 1: /* Sign/zero extend. */
8422 tmp
= load_reg(s
, rm
);
8423 shift
= (insn
>> 4) & 3;
8424 /* ??? In many cases it's not necessary to do a
8425 rotate, a shift is sufficient. */
8427 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8428 op
= (insn
>> 20) & 7;
8430 case 0: gen_sxth(tmp
); break;
8431 case 1: gen_uxth(tmp
); break;
8432 case 2: gen_sxtb16(tmp
); break;
8433 case 3: gen_uxtb16(tmp
); break;
8434 case 4: gen_sxtb(tmp
); break;
8435 case 5: gen_uxtb(tmp
); break;
8436 default: goto illegal_op
;
8439 tmp2
= load_reg(s
, rn
);
8440 if ((op
>> 1) == 1) {
8441 gen_add16(tmp
, tmp2
);
8443 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8444 tcg_temp_free_i32(tmp2
);
8447 store_reg(s
, rd
, tmp
);
8449 case 2: /* SIMD add/subtract. */
8450 op
= (insn
>> 20) & 7;
8451 shift
= (insn
>> 4) & 7;
8452 if ((op
& 3) == 3 || (shift
& 3) == 3)
8454 tmp
= load_reg(s
, rn
);
8455 tmp2
= load_reg(s
, rm
);
8456 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
8457 tcg_temp_free_i32(tmp2
);
8458 store_reg(s
, rd
, tmp
);
8460 case 3: /* Other data processing. */
8461 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
8463 /* Saturating add/subtract. */
8464 tmp
= load_reg(s
, rn
);
8465 tmp2
= load_reg(s
, rm
);
8467 gen_helper_double_saturate(tmp
, tmp
);
8469 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
8471 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
8472 tcg_temp_free_i32(tmp2
);
8474 tmp
= load_reg(s
, rn
);
8476 case 0x0a: /* rbit */
8477 gen_helper_rbit(tmp
, tmp
);
8479 case 0x08: /* rev */
8480 tcg_gen_bswap32_i32(tmp
, tmp
);
8482 case 0x09: /* rev16 */
8485 case 0x0b: /* revsh */
8488 case 0x10: /* sel */
8489 tmp2
= load_reg(s
, rm
);
8490 tmp3
= tcg_temp_new_i32();
8491 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8492 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8493 tcg_temp_free_i32(tmp3
);
8494 tcg_temp_free_i32(tmp2
);
8496 case 0x18: /* clz */
8497 gen_helper_clz(tmp
, tmp
);
8503 store_reg(s
, rd
, tmp
);
8505 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8506 op
= (insn
>> 4) & 0xf;
8507 tmp
= load_reg(s
, rn
);
8508 tmp2
= load_reg(s
, rm
);
8509 switch ((insn
>> 20) & 7) {
8510 case 0: /* 32 x 32 -> 32 */
8511 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8512 tcg_temp_free_i32(tmp2
);
8514 tmp2
= load_reg(s
, rs
);
8516 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8518 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8519 tcg_temp_free_i32(tmp2
);
8522 case 1: /* 16 x 16 -> 32 */
8523 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8524 tcg_temp_free_i32(tmp2
);
8526 tmp2
= load_reg(s
, rs
);
8527 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8528 tcg_temp_free_i32(tmp2
);
8531 case 2: /* Dual multiply add. */
8532 case 4: /* Dual multiply subtract. */
8534 gen_swap_half(tmp2
);
8535 gen_smul_dual(tmp
, tmp2
);
8536 if (insn
& (1 << 22)) {
8537 /* This subtraction cannot overflow. */
8538 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8540 /* This addition cannot overflow 32 bits;
8541 * however it may overflow considered as a signed
8542 * operation, in which case we must set the Q flag.
8544 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8546 tcg_temp_free_i32(tmp2
);
8549 tmp2
= load_reg(s
, rs
);
8550 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8551 tcg_temp_free_i32(tmp2
);
8554 case 3: /* 32 * 16 -> 32msb */
8556 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8559 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8560 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8561 tmp
= tcg_temp_new_i32();
8562 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8563 tcg_temp_free_i64(tmp64
);
8566 tmp2
= load_reg(s
, rs
);
8567 gen_helper_add_setq(tmp
, tmp
, tmp2
);
8568 tcg_temp_free_i32(tmp2
);
8571 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8572 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8574 tmp
= load_reg(s
, rs
);
8575 if (insn
& (1 << 20)) {
8576 tmp64
= gen_addq_msw(tmp64
, tmp
);
8578 tmp64
= gen_subq_msw(tmp64
, tmp
);
8581 if (insn
& (1 << 4)) {
8582 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8584 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8585 tmp
= tcg_temp_new_i32();
8586 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8587 tcg_temp_free_i64(tmp64
);
8589 case 7: /* Unsigned sum of absolute differences. */
8590 gen_helper_usad8(tmp
, tmp
, tmp2
);
8591 tcg_temp_free_i32(tmp2
);
8593 tmp2
= load_reg(s
, rs
);
8594 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8595 tcg_temp_free_i32(tmp2
);
8599 store_reg(s
, rd
, tmp
);
8601 case 6: case 7: /* 64-bit multiply, Divide. */
8602 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
8603 tmp
= load_reg(s
, rn
);
8604 tmp2
= load_reg(s
, rm
);
8605 if ((op
& 0x50) == 0x10) {
8607 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
8611 gen_helper_udiv(tmp
, tmp
, tmp2
);
8613 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8614 tcg_temp_free_i32(tmp2
);
8615 store_reg(s
, rd
, tmp
);
8616 } else if ((op
& 0xe) == 0xc) {
8617 /* Dual multiply accumulate long. */
8619 gen_swap_half(tmp2
);
8620 gen_smul_dual(tmp
, tmp2
);
8622 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8624 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8626 tcg_temp_free_i32(tmp2
);
8628 tmp64
= tcg_temp_new_i64();
8629 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8630 tcg_temp_free_i32(tmp
);
8631 gen_addq(s
, tmp64
, rs
, rd
);
8632 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8633 tcg_temp_free_i64(tmp64
);
8636 /* Unsigned 64-bit multiply */
8637 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8641 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
8642 tcg_temp_free_i32(tmp2
);
8643 tmp64
= tcg_temp_new_i64();
8644 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8645 tcg_temp_free_i32(tmp
);
8647 /* Signed 64-bit multiply */
8648 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8653 gen_addq_lo(s
, tmp64
, rs
);
8654 gen_addq_lo(s
, tmp64
, rd
);
8655 } else if (op
& 0x40) {
8656 /* 64-bit accumulate. */
8657 gen_addq(s
, tmp64
, rs
, rd
);
8659 gen_storeq_reg(s
, rs
, rd
, tmp64
);
8660 tcg_temp_free_i64(tmp64
);
8665 case 6: case 7: case 14: case 15:
8667 if (((insn
>> 24) & 3) == 3) {
8668 /* Translate into the equivalent ARM encoding. */
8669 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
8670 if (disas_neon_data_insn(env
, s
, insn
))
8673 if (insn
& (1 << 28))
8675 if (disas_coproc_insn (env
, s
, insn
))
8679 case 8: case 9: case 10: case 11:
8680 if (insn
& (1 << 15)) {
8681 /* Branches, misc control. */
8682 if (insn
& 0x5000) {
8683 /* Unconditional branch. */
8684 /* signextend(hw1[10:0]) -> offset[:12]. */
8685 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
8686 /* hw1[10:0] -> offset[11:1]. */
8687 offset
|= (insn
& 0x7ff) << 1;
8688 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8689 offset[24:22] already have the same value because of the
8690 sign extension above. */
8691 offset
^= ((~insn
) & (1 << 13)) << 10;
8692 offset
^= ((~insn
) & (1 << 11)) << 11;
8694 if (insn
& (1 << 14)) {
8695 /* Branch and link. */
8696 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
8700 if (insn
& (1 << 12)) {
8705 offset
&= ~(uint32_t)2;
8706 /* thumb2 bx, no need to check */
8707 gen_bx_im(s
, offset
);
8709 } else if (((insn
>> 23) & 7) == 7) {
8711 if (insn
& (1 << 13))
8714 if (insn
& (1 << 26)) {
8715 /* Secure monitor call (v6Z) */
8716 goto illegal_op
; /* not implemented. */
8718 op
= (insn
>> 20) & 7;
8720 case 0: /* msr cpsr. */
8722 tmp
= load_reg(s
, rn
);
8723 addr
= tcg_const_i32(insn
& 0xff);
8724 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8725 tcg_temp_free_i32(addr
);
8726 tcg_temp_free_i32(tmp
);
8731 case 1: /* msr spsr. */
8734 tmp
= load_reg(s
, rn
);
8736 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
8740 case 2: /* cps, nop-hint. */
8741 if (((insn
>> 8) & 7) == 0) {
8742 gen_nop_hint(s
, insn
& 0xff);
8744 /* Implemented as NOP in user mode. */
8749 if (insn
& (1 << 10)) {
8750 if (insn
& (1 << 7))
8752 if (insn
& (1 << 6))
8754 if (insn
& (1 << 5))
8756 if (insn
& (1 << 9))
8757 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
8759 if (insn
& (1 << 8)) {
8761 imm
|= (insn
& 0x1f);
8764 gen_set_psr_im(s
, offset
, 0, imm
);
8767 case 3: /* Special control operations. */
8769 op
= (insn
>> 4) & 0xf;
8777 /* These execute as NOPs. */
8784 /* Trivial implementation equivalent to bx. */
8785 tmp
= load_reg(s
, rn
);
8788 case 5: /* Exception return. */
8792 if (rn
!= 14 || rd
!= 15) {
8795 tmp
= load_reg(s
, rn
);
8796 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
8797 gen_exception_return(s
, tmp
);
8799 case 6: /* mrs cpsr. */
8800 tmp
= tcg_temp_new_i32();
8802 addr
= tcg_const_i32(insn
& 0xff);
8803 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
8804 tcg_temp_free_i32(addr
);
8806 gen_helper_cpsr_read(tmp
);
8808 store_reg(s
, rd
, tmp
);
8810 case 7: /* mrs spsr. */
8811 /* Not accessible in user mode. */
8812 if (IS_USER(s
) || IS_M(env
))
8814 tmp
= load_cpu_field(spsr
);
8815 store_reg(s
, rd
, tmp
);
8820 /* Conditional branch. */
8821 op
= (insn
>> 22) & 0xf;
8822 /* Generate a conditional jump to next instruction. */
8823 s
->condlabel
= gen_new_label();
8824 gen_test_cc(op
^ 1, s
->condlabel
);
8827 /* offset[11:1] = insn[10:0] */
8828 offset
= (insn
& 0x7ff) << 1;
8829 /* offset[17:12] = insn[21:16]. */
8830 offset
|= (insn
& 0x003f0000) >> 4;
8831 /* offset[31:20] = insn[26]. */
8832 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
8833 /* offset[18] = insn[13]. */
8834 offset
|= (insn
& (1 << 13)) << 5;
8835 /* offset[19] = insn[11]. */
8836 offset
|= (insn
& (1 << 11)) << 8;
8838 /* jump to the offset */
8839 gen_jmp(s
, s
->pc
+ offset
);
8842 /* Data processing immediate. */
8843 if (insn
& (1 << 25)) {
8844 if (insn
& (1 << 24)) {
8845 if (insn
& (1 << 20))
8847 /* Bitfield/Saturate. */
8848 op
= (insn
>> 21) & 7;
8850 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
8852 tmp
= tcg_temp_new_i32();
8853 tcg_gen_movi_i32(tmp
, 0);
8855 tmp
= load_reg(s
, rn
);
8858 case 2: /* Signed bitfield extract. */
8860 if (shift
+ imm
> 32)
8863 gen_sbfx(tmp
, shift
, imm
);
8865 case 6: /* Unsigned bitfield extract. */
8867 if (shift
+ imm
> 32)
8870 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
8872 case 3: /* Bitfield insert/clear. */
8875 imm
= imm
+ 1 - shift
;
8877 tmp2
= load_reg(s
, rd
);
8878 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
8879 tcg_temp_free_i32(tmp2
);
8884 default: /* Saturate. */
8887 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8889 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8891 tmp2
= tcg_const_i32(imm
);
8894 if ((op
& 1) && shift
== 0)
8895 gen_helper_usat16(tmp
, tmp
, tmp2
);
8897 gen_helper_usat(tmp
, tmp
, tmp2
);
8900 if ((op
& 1) && shift
== 0)
8901 gen_helper_ssat16(tmp
, tmp
, tmp2
);
8903 gen_helper_ssat(tmp
, tmp
, tmp2
);
8905 tcg_temp_free_i32(tmp2
);
8908 store_reg(s
, rd
, tmp
);
8910 imm
= ((insn
& 0x04000000) >> 15)
8911 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
8912 if (insn
& (1 << 22)) {
8913 /* 16-bit immediate. */
8914 imm
|= (insn
>> 4) & 0xf000;
8915 if (insn
& (1 << 23)) {
8917 tmp
= load_reg(s
, rd
);
8918 tcg_gen_ext16u_i32(tmp
, tmp
);
8919 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
8922 tmp
= tcg_temp_new_i32();
8923 tcg_gen_movi_i32(tmp
, imm
);
8926 /* Add/sub 12-bit immediate. */
8928 offset
= s
->pc
& ~(uint32_t)3;
8929 if (insn
& (1 << 23))
8933 tmp
= tcg_temp_new_i32();
8934 tcg_gen_movi_i32(tmp
, offset
);
8936 tmp
= load_reg(s
, rn
);
8937 if (insn
& (1 << 23))
8938 tcg_gen_subi_i32(tmp
, tmp
, imm
);
8940 tcg_gen_addi_i32(tmp
, tmp
, imm
);
8943 store_reg(s
, rd
, tmp
);
8946 int shifter_out
= 0;
8947 /* modified 12-bit immediate. */
8948 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
8949 imm
= (insn
& 0xff);
8952 /* Nothing to do. */
8954 case 1: /* 00XY00XY */
8957 case 2: /* XY00XY00 */
8961 case 3: /* XYXYXYXY */
8965 default: /* Rotated constant. */
8966 shift
= (shift
<< 1) | (imm
>> 7);
8968 imm
= imm
<< (32 - shift
);
8972 tmp2
= tcg_temp_new_i32();
8973 tcg_gen_movi_i32(tmp2
, imm
);
8974 rn
= (insn
>> 16) & 0xf;
8976 tmp
= tcg_temp_new_i32();
8977 tcg_gen_movi_i32(tmp
, 0);
8979 tmp
= load_reg(s
, rn
);
8981 op
= (insn
>> 21) & 0xf;
8982 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
8983 shifter_out
, tmp
, tmp2
))
8985 tcg_temp_free_i32(tmp2
);
8986 rd
= (insn
>> 8) & 0xf;
8988 store_reg(s
, rd
, tmp
);
8990 tcg_temp_free_i32(tmp
);
8995 case 12: /* Load/store single data item. */
9000 if ((insn
& 0x01100000) == 0x01000000) {
9001 if (disas_neon_ls_insn(env
, s
, insn
))
9005 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
9007 if (!(insn
& (1 << 20))) {
9011 /* Byte or halfword load space with dest == r15 : memory hints.
9012 * Catch them early so we don't emit pointless addressing code.
9013 * This space is a mix of:
9014 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9015 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9017 * unallocated hints, which must be treated as NOPs
9018 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9019 * which is easiest for the decoding logic
9020 * Some space which must UNDEF
9022 int op1
= (insn
>> 23) & 3;
9023 int op2
= (insn
>> 6) & 0x3f;
9028 /* UNPREDICTABLE, unallocated hint or
9029 * PLD/PLDW/PLI (literal)
9034 return 0; /* PLD/PLDW/PLI or unallocated hint */
9036 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
9037 return 0; /* PLD/PLDW/PLI or unallocated hint */
9039 /* UNDEF space, or an UNPREDICTABLE */
9045 addr
= tcg_temp_new_i32();
9047 /* s->pc has already been incremented by 4. */
9048 imm
= s
->pc
& 0xfffffffc;
9049 if (insn
& (1 << 23))
9050 imm
+= insn
& 0xfff;
9052 imm
-= insn
& 0xfff;
9053 tcg_gen_movi_i32(addr
, imm
);
9055 addr
= load_reg(s
, rn
);
9056 if (insn
& (1 << 23)) {
9057 /* Positive offset. */
9059 tcg_gen_addi_i32(addr
, addr
, imm
);
9062 switch ((insn
>> 8) & 0xf) {
9063 case 0x0: /* Shifted Register. */
9064 shift
= (insn
>> 4) & 0xf;
9066 tcg_temp_free_i32(addr
);
9069 tmp
= load_reg(s
, rm
);
9071 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9072 tcg_gen_add_i32(addr
, addr
, tmp
);
9073 tcg_temp_free_i32(tmp
);
9075 case 0xc: /* Negative offset. */
9076 tcg_gen_addi_i32(addr
, addr
, -imm
);
9078 case 0xe: /* User privilege. */
9079 tcg_gen_addi_i32(addr
, addr
, imm
);
9082 case 0x9: /* Post-decrement. */
9085 case 0xb: /* Post-increment. */
9089 case 0xd: /* Pre-decrement. */
9092 case 0xf: /* Pre-increment. */
9093 tcg_gen_addi_i32(addr
, addr
, imm
);
9097 tcg_temp_free_i32(addr
);
9102 if (insn
& (1 << 20)) {
9105 case 0: tmp
= gen_ld8u(addr
, user
); break;
9106 case 4: tmp
= gen_ld8s(addr
, user
); break;
9107 case 1: tmp
= gen_ld16u(addr
, user
); break;
9108 case 5: tmp
= gen_ld16s(addr
, user
); break;
9109 case 2: tmp
= gen_ld32(addr
, user
); break;
9111 tcg_temp_free_i32(addr
);
9117 store_reg(s
, rs
, tmp
);
9121 tmp
= load_reg(s
, rs
);
9123 case 0: gen_st8(tmp
, addr
, user
); break;
9124 case 1: gen_st16(tmp
, addr
, user
); break;
9125 case 2: gen_st32(tmp
, addr
, user
); break;
9127 tcg_temp_free_i32(addr
);
9132 tcg_gen_addi_i32(addr
, addr
, imm
);
9134 store_reg(s
, rn
, addr
);
9136 tcg_temp_free_i32(addr
);
9148 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
9150 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
9157 if (s
->condexec_mask
) {
9158 cond
= s
->condexec_cond
;
9159 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
9160 s
->condlabel
= gen_new_label();
9161 gen_test_cc(cond
^ 1, s
->condlabel
);
9166 insn
= lduw_code(s
->pc
);
9169 switch (insn
>> 12) {
9173 op
= (insn
>> 11) & 3;
9176 rn
= (insn
>> 3) & 7;
9177 tmp
= load_reg(s
, rn
);
9178 if (insn
& (1 << 10)) {
9180 tmp2
= tcg_temp_new_i32();
9181 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9184 rm
= (insn
>> 6) & 7;
9185 tmp2
= load_reg(s
, rm
);
9187 if (insn
& (1 << 9)) {
9188 if (s
->condexec_mask
)
9189 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9191 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9193 if (s
->condexec_mask
)
9194 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9196 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9198 tcg_temp_free_i32(tmp2
);
9199 store_reg(s
, rd
, tmp
);
9201 /* shift immediate */
9202 rm
= (insn
>> 3) & 7;
9203 shift
= (insn
>> 6) & 0x1f;
9204 tmp
= load_reg(s
, rm
);
9205 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9206 if (!s
->condexec_mask
)
9208 store_reg(s
, rd
, tmp
);
9212 /* arithmetic large immediate */
9213 op
= (insn
>> 11) & 3;
9214 rd
= (insn
>> 8) & 0x7;
9215 if (op
== 0) { /* mov */
9216 tmp
= tcg_temp_new_i32();
9217 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9218 if (!s
->condexec_mask
)
9220 store_reg(s
, rd
, tmp
);
9222 tmp
= load_reg(s
, rd
);
9223 tmp2
= tcg_temp_new_i32();
9224 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9227 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9228 tcg_temp_free_i32(tmp
);
9229 tcg_temp_free_i32(tmp2
);
9232 if (s
->condexec_mask
)
9233 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9235 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9236 tcg_temp_free_i32(tmp2
);
9237 store_reg(s
, rd
, tmp
);
9240 if (s
->condexec_mask
)
9241 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9243 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9244 tcg_temp_free_i32(tmp2
);
9245 store_reg(s
, rd
, tmp
);
9251 if (insn
& (1 << 11)) {
9252 rd
= (insn
>> 8) & 7;
9253 /* load pc-relative. Bit 1 of PC is ignored. */
9254 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9255 val
&= ~(uint32_t)2;
9256 addr
= tcg_temp_new_i32();
9257 tcg_gen_movi_i32(addr
, val
);
9258 tmp
= gen_ld32(addr
, IS_USER(s
));
9259 tcg_temp_free_i32(addr
);
9260 store_reg(s
, rd
, tmp
);
9263 if (insn
& (1 << 10)) {
9264 /* data processing extended or blx */
9265 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9266 rm
= (insn
>> 3) & 0xf;
9267 op
= (insn
>> 8) & 3;
9270 tmp
= load_reg(s
, rd
);
9271 tmp2
= load_reg(s
, rm
);
9272 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9273 tcg_temp_free_i32(tmp2
);
9274 store_reg(s
, rd
, tmp
);
9277 tmp
= load_reg(s
, rd
);
9278 tmp2
= load_reg(s
, rm
);
9279 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9280 tcg_temp_free_i32(tmp2
);
9281 tcg_temp_free_i32(tmp
);
9283 case 2: /* mov/cpy */
9284 tmp
= load_reg(s
, rm
);
9285 store_reg(s
, rd
, tmp
);
9287 case 3:/* branch [and link] exchange thumb register */
9288 tmp
= load_reg(s
, rm
);
9289 if (insn
& (1 << 7)) {
9291 val
= (uint32_t)s
->pc
| 1;
9292 tmp2
= tcg_temp_new_i32();
9293 tcg_gen_movi_i32(tmp2
, val
);
9294 store_reg(s
, 14, tmp2
);
9296 /* already thumb, no need to check */
9303 /* data processing register */
9305 rm
= (insn
>> 3) & 7;
9306 op
= (insn
>> 6) & 0xf;
9307 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9308 /* the shift/rotate ops want the operands backwards */
9317 if (op
== 9) { /* neg */
9318 tmp
= tcg_temp_new_i32();
9319 tcg_gen_movi_i32(tmp
, 0);
9320 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9321 tmp
= load_reg(s
, rd
);
9326 tmp2
= load_reg(s
, rm
);
9329 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9330 if (!s
->condexec_mask
)
9334 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9335 if (!s
->condexec_mask
)
9339 if (s
->condexec_mask
) {
9340 gen_helper_shl(tmp2
, tmp2
, tmp
);
9342 gen_helper_shl_cc(tmp2
, tmp2
, tmp
);
9347 if (s
->condexec_mask
) {
9348 gen_helper_shr(tmp2
, tmp2
, tmp
);
9350 gen_helper_shr_cc(tmp2
, tmp2
, tmp
);
9355 if (s
->condexec_mask
) {
9356 gen_helper_sar(tmp2
, tmp2
, tmp
);
9358 gen_helper_sar_cc(tmp2
, tmp2
, tmp
);
9363 if (s
->condexec_mask
)
9366 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
9369 if (s
->condexec_mask
)
9370 gen_sub_carry(tmp
, tmp
, tmp2
);
9372 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
9375 if (s
->condexec_mask
) {
9376 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
9377 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
9379 gen_helper_ror_cc(tmp2
, tmp2
, tmp
);
9384 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9389 if (s
->condexec_mask
)
9390 tcg_gen_neg_i32(tmp
, tmp2
);
9392 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9395 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
9399 gen_helper_add_cc(tmp
, tmp
, tmp2
);
9403 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9404 if (!s
->condexec_mask
)
9408 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9409 if (!s
->condexec_mask
)
9413 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9414 if (!s
->condexec_mask
)
9418 tcg_gen_not_i32(tmp2
, tmp2
);
9419 if (!s
->condexec_mask
)
9427 store_reg(s
, rm
, tmp2
);
9429 tcg_temp_free_i32(tmp
);
9431 store_reg(s
, rd
, tmp
);
9432 tcg_temp_free_i32(tmp2
);
9435 tcg_temp_free_i32(tmp
);
9436 tcg_temp_free_i32(tmp2
);
9441 /* load/store register offset. */
9443 rn
= (insn
>> 3) & 7;
9444 rm
= (insn
>> 6) & 7;
9445 op
= (insn
>> 9) & 7;
9446 addr
= load_reg(s
, rn
);
9447 tmp
= load_reg(s
, rm
);
9448 tcg_gen_add_i32(addr
, addr
, tmp
);
9449 tcg_temp_free_i32(tmp
);
9451 if (op
< 3) /* store */
9452 tmp
= load_reg(s
, rd
);
9456 gen_st32(tmp
, addr
, IS_USER(s
));
9459 gen_st16(tmp
, addr
, IS_USER(s
));
9462 gen_st8(tmp
, addr
, IS_USER(s
));
9465 tmp
= gen_ld8s(addr
, IS_USER(s
));
9468 tmp
= gen_ld32(addr
, IS_USER(s
));
9471 tmp
= gen_ld16u(addr
, IS_USER(s
));
9474 tmp
= gen_ld8u(addr
, IS_USER(s
));
9477 tmp
= gen_ld16s(addr
, IS_USER(s
));
9480 if (op
>= 3) /* load */
9481 store_reg(s
, rd
, tmp
);
9482 tcg_temp_free_i32(addr
);
9486 /* load/store word immediate offset */
9488 rn
= (insn
>> 3) & 7;
9489 addr
= load_reg(s
, rn
);
9490 val
= (insn
>> 4) & 0x7c;
9491 tcg_gen_addi_i32(addr
, addr
, val
);
9493 if (insn
& (1 << 11)) {
9495 tmp
= gen_ld32(addr
, IS_USER(s
));
9496 store_reg(s
, rd
, tmp
);
9499 tmp
= load_reg(s
, rd
);
9500 gen_st32(tmp
, addr
, IS_USER(s
));
9502 tcg_temp_free_i32(addr
);
9506 /* load/store byte immediate offset */
9508 rn
= (insn
>> 3) & 7;
9509 addr
= load_reg(s
, rn
);
9510 val
= (insn
>> 6) & 0x1f;
9511 tcg_gen_addi_i32(addr
, addr
, val
);
9513 if (insn
& (1 << 11)) {
9515 tmp
= gen_ld8u(addr
, IS_USER(s
));
9516 store_reg(s
, rd
, tmp
);
9519 tmp
= load_reg(s
, rd
);
9520 gen_st8(tmp
, addr
, IS_USER(s
));
9522 tcg_temp_free_i32(addr
);
9526 /* load/store halfword immediate offset */
9528 rn
= (insn
>> 3) & 7;
9529 addr
= load_reg(s
, rn
);
9530 val
= (insn
>> 5) & 0x3e;
9531 tcg_gen_addi_i32(addr
, addr
, val
);
9533 if (insn
& (1 << 11)) {
9535 tmp
= gen_ld16u(addr
, IS_USER(s
));
9536 store_reg(s
, rd
, tmp
);
9539 tmp
= load_reg(s
, rd
);
9540 gen_st16(tmp
, addr
, IS_USER(s
));
9542 tcg_temp_free_i32(addr
);
9546 /* load/store from stack */
9547 rd
= (insn
>> 8) & 7;
9548 addr
= load_reg(s
, 13);
9549 val
= (insn
& 0xff) * 4;
9550 tcg_gen_addi_i32(addr
, addr
, val
);
9552 if (insn
& (1 << 11)) {
9554 tmp
= gen_ld32(addr
, IS_USER(s
));
9555 store_reg(s
, rd
, tmp
);
9558 tmp
= load_reg(s
, rd
);
9559 gen_st32(tmp
, addr
, IS_USER(s
));
9561 tcg_temp_free_i32(addr
);
9565 /* add to high reg */
9566 rd
= (insn
>> 8) & 7;
9567 if (insn
& (1 << 11)) {
9569 tmp
= load_reg(s
, 13);
9571 /* PC. bit 1 is ignored. */
9572 tmp
= tcg_temp_new_i32();
9573 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
9575 val
= (insn
& 0xff) * 4;
9576 tcg_gen_addi_i32(tmp
, tmp
, val
);
9577 store_reg(s
, rd
, tmp
);
9582 op
= (insn
>> 8) & 0xf;
9585 /* adjust stack pointer */
9586 tmp
= load_reg(s
, 13);
9587 val
= (insn
& 0x7f) * 4;
9588 if (insn
& (1 << 7))
9589 val
= -(int32_t)val
;
9590 tcg_gen_addi_i32(tmp
, tmp
, val
);
9591 store_reg(s
, 13, tmp
);
9594 case 2: /* sign/zero extend. */
9597 rm
= (insn
>> 3) & 7;
9598 tmp
= load_reg(s
, rm
);
9599 switch ((insn
>> 6) & 3) {
9600 case 0: gen_sxth(tmp
); break;
9601 case 1: gen_sxtb(tmp
); break;
9602 case 2: gen_uxth(tmp
); break;
9603 case 3: gen_uxtb(tmp
); break;
9605 store_reg(s
, rd
, tmp
);
9607 case 4: case 5: case 0xc: case 0xd:
9609 addr
= load_reg(s
, 13);
9610 if (insn
& (1 << 8))
9614 for (i
= 0; i
< 8; i
++) {
9615 if (insn
& (1 << i
))
9618 if ((insn
& (1 << 11)) == 0) {
9619 tcg_gen_addi_i32(addr
, addr
, -offset
);
9621 for (i
= 0; i
< 8; i
++) {
9622 if (insn
& (1 << i
)) {
9623 if (insn
& (1 << 11)) {
9625 tmp
= gen_ld32(addr
, IS_USER(s
));
9626 store_reg(s
, i
, tmp
);
9629 tmp
= load_reg(s
, i
);
9630 gen_st32(tmp
, addr
, IS_USER(s
));
9632 /* advance to the next address. */
9633 tcg_gen_addi_i32(addr
, addr
, 4);
9637 if (insn
& (1 << 8)) {
9638 if (insn
& (1 << 11)) {
9640 tmp
= gen_ld32(addr
, IS_USER(s
));
9641 /* don't set the pc until the rest of the instruction
9645 tmp
= load_reg(s
, 14);
9646 gen_st32(tmp
, addr
, IS_USER(s
));
9648 tcg_gen_addi_i32(addr
, addr
, 4);
9650 if ((insn
& (1 << 11)) == 0) {
9651 tcg_gen_addi_i32(addr
, addr
, -offset
);
9653 /* write back the new stack pointer */
9654 store_reg(s
, 13, addr
);
9655 /* set the new PC value */
9656 if ((insn
& 0x0900) == 0x0900) {
9657 store_reg_from_load(env
, s
, 15, tmp
);
9661 case 1: case 3: case 9: case 11: /* czb */
9663 tmp
= load_reg(s
, rm
);
9664 s
->condlabel
= gen_new_label();
9666 if (insn
& (1 << 11))
9667 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
9669 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
9670 tcg_temp_free_i32(tmp
);
9671 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
9672 val
= (uint32_t)s
->pc
+ 2;
9677 case 15: /* IT, nop-hint. */
9678 if ((insn
& 0xf) == 0) {
9679 gen_nop_hint(s
, (insn
>> 4) & 0xf);
9683 s
->condexec_cond
= (insn
>> 4) & 0xe;
9684 s
->condexec_mask
= insn
& 0x1f;
9685 /* No actual code generated for this insn, just setup state. */
9688 case 0xe: /* bkpt */
9690 gen_exception_insn(s
, 2, EXCP_BKPT
);
9695 rn
= (insn
>> 3) & 0x7;
9697 tmp
= load_reg(s
, rn
);
9698 switch ((insn
>> 6) & 3) {
9699 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
9700 case 1: gen_rev16(tmp
); break;
9701 case 3: gen_revsh(tmp
); break;
9702 default: goto illegal_op
;
9704 store_reg(s
, rd
, tmp
);
9712 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
9715 addr
= tcg_const_i32(19);
9716 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9717 tcg_temp_free_i32(addr
);
9721 addr
= tcg_const_i32(16);
9722 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9723 tcg_temp_free_i32(addr
);
9725 tcg_temp_free_i32(tmp
);
9728 if (insn
& (1 << 4))
9729 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
9732 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
9743 /* load/store multiple */
9745 TCGV_UNUSED(loaded_var
);
9746 rn
= (insn
>> 8) & 0x7;
9747 addr
= load_reg(s
, rn
);
9748 for (i
= 0; i
< 8; i
++) {
9749 if (insn
& (1 << i
)) {
9750 if (insn
& (1 << 11)) {
9752 tmp
= gen_ld32(addr
, IS_USER(s
));
9756 store_reg(s
, i
, tmp
);
9760 tmp
= load_reg(s
, i
);
9761 gen_st32(tmp
, addr
, IS_USER(s
));
9763 /* advance to the next address */
9764 tcg_gen_addi_i32(addr
, addr
, 4);
9767 if ((insn
& (1 << rn
)) == 0) {
9768 /* base reg not in list: base register writeback */
9769 store_reg(s
, rn
, addr
);
9771 /* base reg in list: if load, complete it now */
9772 if (insn
& (1 << 11)) {
9773 store_reg(s
, rn
, loaded_var
);
9775 tcg_temp_free_i32(addr
);
9780 /* conditional branch or swi */
9781 cond
= (insn
>> 8) & 0xf;
9787 gen_set_pc_im(s
->pc
);
9788 s
->is_jmp
= DISAS_SWI
;
9791 /* generate a conditional jump to next instruction */
9792 s
->condlabel
= gen_new_label();
9793 gen_test_cc(cond
^ 1, s
->condlabel
);
9796 /* jump to the offset */
9797 val
= (uint32_t)s
->pc
+ 2;
9798 offset
= ((int32_t)insn
<< 24) >> 24;
9804 if (insn
& (1 << 11)) {
9805 if (disas_thumb2_insn(env
, s
, insn
))
9809 /* unconditional branch */
9810 val
= (uint32_t)s
->pc
;
9811 offset
= ((int32_t)insn
<< 21) >> 21;
9812 val
+= (offset
<< 1) + 2;
9817 if (disas_thumb2_insn(env
, s
, insn
))
9823 gen_exception_insn(s
, 4, EXCP_UDEF
);
9827 gen_exception_insn(s
, 2, EXCP_UDEF
);
9830 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9831 basic block 'tb'. If search_pc is TRUE, also generate PC
9832 information for each intermediate instruction. */
9833 static inline void gen_intermediate_code_internal(CPUARMState
*env
,
9834 TranslationBlock
*tb
,
9837 DisasContext dc1
, *dc
= &dc1
;
9839 uint16_t *gen_opc_end
;
9841 target_ulong pc_start
;
9842 uint32_t next_page_start
;
9846 /* generate intermediate code */
9851 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
9853 dc
->is_jmp
= DISAS_NEXT
;
9855 dc
->singlestep_enabled
= env
->singlestep_enabled
;
9857 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
9858 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
9859 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
9860 #if !defined(CONFIG_USER_ONLY)
9861 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
9863 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
9864 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
9865 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
9866 cpu_F0s
= tcg_temp_new_i32();
9867 cpu_F1s
= tcg_temp_new_i32();
9868 cpu_F0d
= tcg_temp_new_i64();
9869 cpu_F1d
= tcg_temp_new_i64();
9872 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9873 cpu_M0
= tcg_temp_new_i64();
9874 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
9877 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
9879 max_insns
= CF_COUNT_MASK
;
9883 tcg_clear_temp_count();
9885 /* A note on handling of the condexec (IT) bits:
9887 * We want to avoid the overhead of having to write the updated condexec
9888 * bits back to the CPUARMState for every instruction in an IT block. So:
9889 * (1) if the condexec bits are not already zero then we write
9890 * zero back into the CPUARMState now. This avoids complications trying
9891 * to do it at the end of the block. (For example if we don't do this
9892 * it's hard to identify whether we can safely skip writing condexec
9893 * at the end of the TB, which we definitely want to do for the case
9894 * where a TB doesn't do anything with the IT state at all.)
9895 * (2) if we are going to leave the TB then we call gen_set_condexec()
9896 * which will write the correct value into CPUARMState if zero is wrong.
9897 * This is done both for leaving the TB at the end, and for leaving
9898 * it because of an exception we know will happen, which is done in
9899 * gen_exception_insn(). The latter is necessary because we need to
9900 * leave the TB with the PC/IT state just prior to execution of the
9901 * instruction which caused the exception.
9902 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9903 * then the CPUARMState will be wrong and we need to reset it.
9904 * This is handled in the same way as restoration of the
9905 * PC in these situations: we will be called again with search_pc=1
9906 * and generate a mapping of the condexec bits for each PC in
9907 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9908 * this to restore the condexec bits.
9910 * Note that there are no instructions which can read the condexec
9911 * bits, and none which can write non-static values to them, so
9912 * we don't need to care about whether CPUARMState is correct in the
9916 /* Reset the conditional execution bits immediately. This avoids
9917 complications trying to do it at the end of the block. */
9918 if (dc
->condexec_mask
|| dc
->condexec_cond
)
9920 TCGv tmp
= tcg_temp_new_i32();
9921 tcg_gen_movi_i32(tmp
, 0);
9922 store_cpu_field(tmp
, condexec_bits
);
9925 #ifdef CONFIG_USER_ONLY
9926 /* Intercept jump to the magic kernel page. */
9927 if (dc
->pc
>= 0xffff0000) {
9928 /* We always get here via a jump, so know we are not in a
9929 conditional execution block. */
9930 gen_exception(EXCP_KERNEL_TRAP
);
9931 dc
->is_jmp
= DISAS_UPDATE
;
9935 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
9936 /* We always get here via a jump, so know we are not in a
9937 conditional execution block. */
9938 gen_exception(EXCP_EXCEPTION_EXIT
);
9939 dc
->is_jmp
= DISAS_UPDATE
;
9944 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
9945 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
9946 if (bp
->pc
== dc
->pc
) {
9947 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
9948 /* Advance PC so that clearing the breakpoint will
9949 invalidate this TB. */
9951 goto done_generating
;
9957 j
= gen_opc_ptr
- gen_opc_buf
;
9961 gen_opc_instr_start
[lj
++] = 0;
9963 gen_opc_pc
[lj
] = dc
->pc
;
9964 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
9965 gen_opc_instr_start
[lj
] = 1;
9966 gen_opc_icount
[lj
] = num_insns
;
9969 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
9972 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
9973 tcg_gen_debug_insn_start(dc
->pc
);
9977 disas_thumb_insn(env
, dc
);
9978 if (dc
->condexec_mask
) {
9979 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
9980 | ((dc
->condexec_mask
>> 4) & 1);
9981 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9982 if (dc
->condexec_mask
== 0) {
9983 dc
->condexec_cond
= 0;
9987 disas_arm_insn(env
, dc
);
9990 if (dc
->condjmp
&& !dc
->is_jmp
) {
9991 gen_set_label(dc
->condlabel
);
9995 if (tcg_check_temp_count()) {
9996 fprintf(stderr
, "TCG temporary leak before %08x\n", dc
->pc
);
9999 /* Translation stops when a conditional branch is encountered.
10000 * Otherwise the subsequent code could get translated several times.
10001 * Also stop translation when a page boundary is reached. This
10002 * ensures prefetch aborts occur at the right place. */
10004 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
10005 !env
->singlestep_enabled
&&
10007 dc
->pc
< next_page_start
&&
10008 num_insns
< max_insns
);
10010 if (tb
->cflags
& CF_LAST_IO
) {
10012 /* FIXME: This can theoretically happen with self-modifying
10014 cpu_abort(env
, "IO on conditional branch instruction");
10019 /* At this stage dc->condjmp will only be set when the skipped
10020 instruction was a conditional branch or trap, and the PC has
10021 already been written. */
10022 if (unlikely(env
->singlestep_enabled
)) {
10023 /* Make sure the pc is updated, and raise a debug exception. */
10025 gen_set_condexec(dc
);
10026 if (dc
->is_jmp
== DISAS_SWI
) {
10027 gen_exception(EXCP_SWI
);
10029 gen_exception(EXCP_DEBUG
);
10031 gen_set_label(dc
->condlabel
);
10033 if (dc
->condjmp
|| !dc
->is_jmp
) {
10034 gen_set_pc_im(dc
->pc
);
10037 gen_set_condexec(dc
);
10038 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
10039 gen_exception(EXCP_SWI
);
10041 /* FIXME: Single stepping a WFI insn will not halt
10043 gen_exception(EXCP_DEBUG
);
10046 /* While branches must always occur at the end of an IT block,
10047 there are a few other things that can cause us to terminate
10048 the TB in the middel of an IT block:
10049 - Exception generating instructions (bkpt, swi, undefined).
10051 - Hardware watchpoints.
10052 Hardware breakpoints have already been handled and skip this code.
10054 gen_set_condexec(dc
);
10055 switch(dc
->is_jmp
) {
10057 gen_goto_tb(dc
, 1, dc
->pc
);
10062 /* indicate that the hash table must be used to find the next TB */
10063 tcg_gen_exit_tb(0);
10065 case DISAS_TB_JUMP
:
10066 /* nothing more to generate */
10072 gen_exception(EXCP_SWI
);
10076 gen_set_label(dc
->condlabel
);
10077 gen_set_condexec(dc
);
10078 gen_goto_tb(dc
, 1, dc
->pc
);
10084 gen_icount_end(tb
, num_insns
);
10085 *gen_opc_ptr
= INDEX_op_end
;
10088 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
10089 qemu_log("----------------\n");
10090 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
10091 log_target_disas(pc_start
, dc
->pc
- pc_start
, dc
->thumb
);
10096 j
= gen_opc_ptr
- gen_opc_buf
;
10099 gen_opc_instr_start
[lj
++] = 0;
10101 tb
->size
= dc
->pc
- pc_start
;
10102 tb
->icount
= num_insns
;
10106 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
10108 gen_intermediate_code_internal(env
, tb
, 0);
10111 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
10113 gen_intermediate_code_internal(env
, tb
, 1);
10116 static const char *cpu_mode_names
[16] = {
10117 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10118 "???", "???", "???", "und", "???", "???", "???", "sys"
10121 void cpu_dump_state(CPUARMState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
10131 /* ??? This assumes float64 and double have the same layout.
10132 Oh well, it's only debug dumps. */
10140 for(i
=0;i
<16;i
++) {
10141 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
10143 cpu_fprintf(f
, "\n");
10145 cpu_fprintf(f
, " ");
10147 psr
= cpsr_read(env
);
10148 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
10150 psr
& (1 << 31) ? 'N' : '-',
10151 psr
& (1 << 30) ? 'Z' : '-',
10152 psr
& (1 << 29) ? 'C' : '-',
10153 psr
& (1 << 28) ? 'V' : '-',
10154 psr
& CPSR_T
? 'T' : 'A',
10155 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10158 for (i
= 0; i
< 16; i
++) {
10159 d
.d
= env
->vfp
.regs
[i
];
10163 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10164 i
* 2, (int)s0
.i
, s0
.s
,
10165 i
* 2 + 1, (int)s1
.i
, s1
.s
,
10166 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
10169 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10173 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
10175 env
->regs
[15] = gen_opc_pc
[pc_pos
];
10176 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];