hxtool: Fix line number reporting on SQMP/EQMP errors
[qemu/mdroth.git] / target-arm / translate.c
blob0eccca5cdbbd11e87e0564e834dad01ddc5d7725
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 } DisasContext;
64 #if defined(CONFIG_USER_ONLY)
65 #define IS_USER(s) 1
66 #else
67 #define IS_USER(s) (s->user)
68 #endif
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72 #define DISAS_WFI 4
73 #define DISAS_SWI 5
75 static TCGv_ptr cpu_env;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
78 static TCGv_i32 cpu_R[16];
79 static TCGv_i32 cpu_exclusive_addr;
80 static TCGv_i32 cpu_exclusive_val;
81 static TCGv_i32 cpu_exclusive_high;
82 #ifdef CONFIG_USER_ONLY
83 static TCGv_i32 cpu_exclusive_test;
84 static TCGv_i32 cpu_exclusive_info;
85 #endif
87 /* FIXME: These should be removed. */
88 static TCGv cpu_F0s, cpu_F1s;
89 static TCGv_i64 cpu_F0d, cpu_F1d;
91 #include "gen-icount.h"
93 static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
97 /* initialize TCG globals. */
98 void arm_translate_init(void)
100 int i;
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115 #ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120 #endif
122 #define GEN_HELPER 2
123 #include "helpers.h"
126 static int num_temps;
128 /* Allocate a temporary variable. */
129 static TCGv_i32 new_tmp(void)
131 num_temps++;
132 return tcg_temp_new_i32();
135 /* Release a temporary variable. */
136 static void dead_tmp(TCGv tmp)
138 tcg_temp_free(tmp);
139 num_temps--;
142 static inline TCGv load_cpu_offset(int offset)
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
149 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
151 static inline void store_cpu_offset(TCGv var, int offset)
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
157 #define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
160 /* Set a variable to the value of a CPU register. */
161 static void load_reg_var(DisasContext *s, TCGv var, int reg)
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
172 tcg_gen_mov_i32(var, cpu_R[reg]);
176 /* Create a new temporary and set it to the value of a CPU register. */
177 static inline TCGv load_reg(DisasContext *s, int reg)
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
184 /* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186 static void store_reg(DisasContext *s, int reg, TCGv var)
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
192 tcg_gen_mov_i32(cpu_R[reg], var);
193 dead_tmp(var);
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
206 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
215 static void gen_exception(int excp)
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
223 static void gen_smul_dual(TCGv a, TCGv b)
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
238 /* Byteswap each halfword. */
239 static void gen_rev16(TCGv var)
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
250 /* Byteswap low halfword and sign extend. */
251 static void gen_revsh(TCGv var)
253 TCGv tmp = new_tmp();
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_ext8s_i32(var, var);
258 tcg_gen_or_i32(var, var, tmp);
259 dead_tmp(tmp);
262 /* Unsigned bitfield extract. */
263 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
265 if (shift)
266 tcg_gen_shri_i32(var, var, shift);
267 tcg_gen_andi_i32(var, var, mask);
270 /* Signed bitfield extract. */
271 static void gen_sbfx(TCGv var, int shift, int width)
273 uint32_t signbit;
275 if (shift)
276 tcg_gen_sari_i32(var, var, shift);
277 if (shift + width < 32) {
278 signbit = 1u << (width - 1);
279 tcg_gen_andi_i32(var, var, (1u << width) - 1);
280 tcg_gen_xori_i32(var, var, signbit);
281 tcg_gen_subi_i32(var, var, signbit);
285 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
286 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
288 tcg_gen_andi_i32(val, val, mask);
289 tcg_gen_shli_i32(val, val, shift);
290 tcg_gen_andi_i32(base, base, ~(mask << shift));
291 tcg_gen_or_i32(dest, base, val);
294 /* Round the top 32 bits of a 64-bit value. */
295 static void gen_roundqd(TCGv a, TCGv b)
297 tcg_gen_shri_i32(a, a, 31);
298 tcg_gen_add_i32(a, a, b);
301 /* FIXME: Most targets have native widening multiplication.
302 It would be good to use that instead of a full wide multiply. */
303 /* 32x32->64 multiply. Marks inputs as dead. */
304 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
306 TCGv_i64 tmp1 = tcg_temp_new_i64();
307 TCGv_i64 tmp2 = tcg_temp_new_i64();
309 tcg_gen_extu_i32_i64(tmp1, a);
310 dead_tmp(a);
311 tcg_gen_extu_i32_i64(tmp2, b);
312 dead_tmp(b);
313 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
314 tcg_temp_free_i64(tmp2);
315 return tmp1;
318 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
320 TCGv_i64 tmp1 = tcg_temp_new_i64();
321 TCGv_i64 tmp2 = tcg_temp_new_i64();
323 tcg_gen_ext_i32_i64(tmp1, a);
324 dead_tmp(a);
325 tcg_gen_ext_i32_i64(tmp2, b);
326 dead_tmp(b);
327 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
328 tcg_temp_free_i64(tmp2);
329 return tmp1;
332 /* Signed 32x32->64 multiply. */
333 static void gen_imull(TCGv a, TCGv b)
335 TCGv_i64 tmp1 = tcg_temp_new_i64();
336 TCGv_i64 tmp2 = tcg_temp_new_i64();
338 tcg_gen_ext_i32_i64(tmp1, a);
339 tcg_gen_ext_i32_i64(tmp2, b);
340 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
341 tcg_temp_free_i64(tmp2);
342 tcg_gen_trunc_i64_i32(a, tmp1);
343 tcg_gen_shri_i64(tmp1, tmp1, 32);
344 tcg_gen_trunc_i64_i32(b, tmp1);
345 tcg_temp_free_i64(tmp1);
348 /* Swap low and high halfwords. */
349 static void gen_swap_half(TCGv var)
351 TCGv tmp = new_tmp();
352 tcg_gen_shri_i32(tmp, var, 16);
353 tcg_gen_shli_i32(var, var, 16);
354 tcg_gen_or_i32(var, var, tmp);
355 dead_tmp(tmp);
358 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
359 tmp = (t0 ^ t1) & 0x8000;
360 t0 &= ~0x8000;
361 t1 &= ~0x8000;
362 t0 = (t0 + t1) ^ tmp;
365 static void gen_add16(TCGv t0, TCGv t1)
367 TCGv tmp = new_tmp();
368 tcg_gen_xor_i32(tmp, t0, t1);
369 tcg_gen_andi_i32(tmp, tmp, 0x8000);
370 tcg_gen_andi_i32(t0, t0, ~0x8000);
371 tcg_gen_andi_i32(t1, t1, ~0x8000);
372 tcg_gen_add_i32(t0, t0, t1);
373 tcg_gen_xor_i32(t0, t0, tmp);
374 dead_tmp(tmp);
375 dead_tmp(t1);
378 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
380 /* Set CF to the top bit of var. */
381 static void gen_set_CF_bit31(TCGv var)
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 31);
385 gen_set_CF(tmp);
386 dead_tmp(tmp);
389 /* Set N and Z flags from var. */
390 static inline void gen_logic_CC(TCGv var)
392 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
396 /* T0 += T1 + CF. */
397 static void gen_adc(TCGv t0, TCGv t1)
399 TCGv tmp;
400 tcg_gen_add_i32(t0, t0, t1);
401 tmp = load_cpu_field(CF);
402 tcg_gen_add_i32(t0, t0, tmp);
403 dead_tmp(tmp);
406 /* dest = T0 + T1 + CF. */
407 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
409 TCGv tmp;
410 tcg_gen_add_i32(dest, t0, t1);
411 tmp = load_cpu_field(CF);
412 tcg_gen_add_i32(dest, dest, tmp);
413 dead_tmp(tmp);
416 /* dest = T0 - T1 + CF - 1. */
417 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
419 TCGv tmp;
420 tcg_gen_sub_i32(dest, t0, t1);
421 tmp = load_cpu_field(CF);
422 tcg_gen_add_i32(dest, dest, tmp);
423 tcg_gen_subi_i32(dest, dest, 1);
424 dead_tmp(tmp);
427 /* FIXME: Implement this natively. */
428 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
430 static void shifter_out_im(TCGv var, int shift)
432 TCGv tmp = new_tmp();
433 if (shift == 0) {
434 tcg_gen_andi_i32(tmp, var, 1);
435 } else {
436 tcg_gen_shri_i32(tmp, var, shift);
437 if (shift != 31)
438 tcg_gen_andi_i32(tmp, tmp, 1);
440 gen_set_CF(tmp);
441 dead_tmp(tmp);
444 /* Shift by immediate. Includes special handling for shift == 0. */
445 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
447 switch (shiftop) {
448 case 0: /* LSL */
449 if (shift != 0) {
450 if (flags)
451 shifter_out_im(var, 32 - shift);
452 tcg_gen_shli_i32(var, var, shift);
454 break;
455 case 1: /* LSR */
456 if (shift == 0) {
457 if (flags) {
458 tcg_gen_shri_i32(var, var, 31);
459 gen_set_CF(var);
461 tcg_gen_movi_i32(var, 0);
462 } else {
463 if (flags)
464 shifter_out_im(var, shift - 1);
465 tcg_gen_shri_i32(var, var, shift);
467 break;
468 case 2: /* ASR */
469 if (shift == 0)
470 shift = 32;
471 if (flags)
472 shifter_out_im(var, shift - 1);
473 if (shift == 32)
474 shift = 31;
475 tcg_gen_sari_i32(var, var, shift);
476 break;
477 case 3: /* ROR/RRX */
478 if (shift != 0) {
479 if (flags)
480 shifter_out_im(var, shift - 1);
481 tcg_gen_rotri_i32(var, var, shift); break;
482 } else {
483 TCGv tmp = load_cpu_field(CF);
484 if (flags)
485 shifter_out_im(var, 0);
486 tcg_gen_shri_i32(var, var, 1);
487 tcg_gen_shli_i32(tmp, tmp, 31);
488 tcg_gen_or_i32(var, var, tmp);
489 dead_tmp(tmp);
494 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
495 TCGv shift, int flags)
497 if (flags) {
498 switch (shiftop) {
499 case 0: gen_helper_shl_cc(var, var, shift); break;
500 case 1: gen_helper_shr_cc(var, var, shift); break;
501 case 2: gen_helper_sar_cc(var, var, shift); break;
502 case 3: gen_helper_ror_cc(var, var, shift); break;
504 } else {
505 switch (shiftop) {
506 case 0: gen_helper_shl(var, var, shift); break;
507 case 1: gen_helper_shr(var, var, shift); break;
508 case 2: gen_helper_sar(var, var, shift); break;
509 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
510 tcg_gen_rotr_i32(var, var, shift); break;
513 dead_tmp(shift);
516 #define PAS_OP(pfx) \
517 switch (op2) { \
518 case 0: gen_pas_helper(glue(pfx,add16)); break; \
519 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
520 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
521 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
522 case 4: gen_pas_helper(glue(pfx,add8)); break; \
523 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
525 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
527 TCGv_ptr tmp;
529 switch (op1) {
530 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
531 case 1:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(s)
535 tcg_temp_free_ptr(tmp);
536 break;
537 case 5:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(u)
541 tcg_temp_free_ptr(tmp);
542 break;
543 #undef gen_pas_helper
544 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
545 case 2:
546 PAS_OP(q);
547 break;
548 case 3:
549 PAS_OP(sh);
550 break;
551 case 6:
552 PAS_OP(uq);
553 break;
554 case 7:
555 PAS_OP(uh);
556 break;
557 #undef gen_pas_helper
560 #undef PAS_OP
562 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
563 #define PAS_OP(pfx) \
564 switch (op2) { \
565 case 0: gen_pas_helper(glue(pfx,add8)); break; \
566 case 1: gen_pas_helper(glue(pfx,add16)); break; \
567 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
568 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
569 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
570 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
572 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
574 TCGv_ptr tmp;
576 switch (op1) {
577 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
578 case 0:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(s)
582 tcg_temp_free_ptr(tmp);
583 break;
584 case 4:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(u)
588 tcg_temp_free_ptr(tmp);
589 break;
590 #undef gen_pas_helper
591 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
592 case 1:
593 PAS_OP(q);
594 break;
595 case 2:
596 PAS_OP(sh);
597 break;
598 case 5:
599 PAS_OP(uq);
600 break;
601 case 6:
602 PAS_OP(uh);
603 break;
604 #undef gen_pas_helper
607 #undef PAS_OP
609 static void gen_test_cc(int cc, int label)
611 TCGv tmp;
612 TCGv tmp2;
613 int inv;
615 switch (cc) {
616 case 0: /* eq: Z */
617 tmp = load_cpu_field(ZF);
618 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
619 break;
620 case 1: /* ne: !Z */
621 tmp = load_cpu_field(ZF);
622 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
623 break;
624 case 2: /* cs: C */
625 tmp = load_cpu_field(CF);
626 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
627 break;
628 case 3: /* cc: !C */
629 tmp = load_cpu_field(CF);
630 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
631 break;
632 case 4: /* mi: N */
633 tmp = load_cpu_field(NF);
634 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
635 break;
636 case 5: /* pl: !N */
637 tmp = load_cpu_field(NF);
638 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
639 break;
640 case 6: /* vs: V */
641 tmp = load_cpu_field(VF);
642 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
643 break;
644 case 7: /* vc: !V */
645 tmp = load_cpu_field(VF);
646 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
647 break;
648 case 8: /* hi: C && !Z */
649 inv = gen_new_label();
650 tmp = load_cpu_field(CF);
651 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
652 dead_tmp(tmp);
653 tmp = load_cpu_field(ZF);
654 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
655 gen_set_label(inv);
656 break;
657 case 9: /* ls: !C || Z */
658 tmp = load_cpu_field(CF);
659 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
660 dead_tmp(tmp);
661 tmp = load_cpu_field(ZF);
662 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
663 break;
664 case 10: /* ge: N == V -> N ^ V == 0 */
665 tmp = load_cpu_field(VF);
666 tmp2 = load_cpu_field(NF);
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 dead_tmp(tmp2);
669 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
670 break;
671 case 11: /* lt: N != V -> N ^ V != 0 */
672 tmp = load_cpu_field(VF);
673 tmp2 = load_cpu_field(NF);
674 tcg_gen_xor_i32(tmp, tmp, tmp2);
675 dead_tmp(tmp2);
676 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
677 break;
678 case 12: /* gt: !Z && N == V */
679 inv = gen_new_label();
680 tmp = load_cpu_field(ZF);
681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
682 dead_tmp(tmp);
683 tmp = load_cpu_field(VF);
684 tmp2 = load_cpu_field(NF);
685 tcg_gen_xor_i32(tmp, tmp, tmp2);
686 dead_tmp(tmp2);
687 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
688 gen_set_label(inv);
689 break;
690 case 13: /* le: Z || N != V */
691 tmp = load_cpu_field(ZF);
692 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
693 dead_tmp(tmp);
694 tmp = load_cpu_field(VF);
695 tmp2 = load_cpu_field(NF);
696 tcg_gen_xor_i32(tmp, tmp, tmp2);
697 dead_tmp(tmp2);
698 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
699 break;
700 default:
701 fprintf(stderr, "Bad condition code 0x%x\n", cc);
702 abort();
704 dead_tmp(tmp);
707 static const uint8_t table_logic_cc[16] = {
708 1, /* and */
709 1, /* xor */
710 0, /* sub */
711 0, /* rsb */
712 0, /* add */
713 0, /* adc */
714 0, /* sbc */
715 0, /* rsc */
716 1, /* andl */
717 1, /* xorl */
718 0, /* cmp */
719 0, /* cmn */
720 1, /* orr */
721 1, /* mov */
722 1, /* bic */
723 1, /* mvn */
726 /* Set PC and Thumb state from an immediate address. */
727 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
729 TCGv tmp;
731 s->is_jmp = DISAS_UPDATE;
732 if (s->thumb != (addr & 1)) {
733 tmp = new_tmp();
734 tcg_gen_movi_i32(tmp, addr & 1);
735 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
736 dead_tmp(tmp);
738 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
741 /* Set PC and Thumb state from var. var is marked as dead. */
742 static inline void gen_bx(DisasContext *s, TCGv var)
744 s->is_jmp = DISAS_UPDATE;
745 tcg_gen_andi_i32(cpu_R[15], var, ~1);
746 tcg_gen_andi_i32(var, var, 1);
747 store_cpu_field(var, thumb);
750 /* Variant of store_reg which uses branch&exchange logic when storing
751 to r15 in ARM architecture v7 and above. The source must be a temporary
752 and will be marked as dead. */
753 static inline void store_reg_bx(CPUState *env, DisasContext *s,
754 int reg, TCGv var)
756 if (reg == 15 && ENABLE_ARCH_7) {
757 gen_bx(s, var);
758 } else {
759 store_reg(s, reg, var);
763 static inline TCGv gen_ld8s(TCGv addr, int index)
765 TCGv tmp = new_tmp();
766 tcg_gen_qemu_ld8s(tmp, addr, index);
767 return tmp;
769 static inline TCGv gen_ld8u(TCGv addr, int index)
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8u(tmp, addr, index);
773 return tmp;
775 static inline TCGv gen_ld16s(TCGv addr, int index)
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld16s(tmp, addr, index);
779 return tmp;
781 static inline TCGv gen_ld16u(TCGv addr, int index)
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16u(tmp, addr, index);
785 return tmp;
787 static inline TCGv gen_ld32(TCGv addr, int index)
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld32u(tmp, addr, index);
791 return tmp;
793 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
795 TCGv_i64 tmp = tcg_temp_new_i64();
796 tcg_gen_qemu_ld64(tmp, addr, index);
797 return tmp;
799 static inline void gen_st8(TCGv val, TCGv addr, int index)
801 tcg_gen_qemu_st8(val, addr, index);
802 dead_tmp(val);
804 static inline void gen_st16(TCGv val, TCGv addr, int index)
806 tcg_gen_qemu_st16(val, addr, index);
807 dead_tmp(val);
809 static inline void gen_st32(TCGv val, TCGv addr, int index)
811 tcg_gen_qemu_st32(val, addr, index);
812 dead_tmp(val);
814 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
816 tcg_gen_qemu_st64(val, addr, index);
817 tcg_temp_free_i64(val);
820 static inline void gen_set_pc_im(uint32_t val)
822 tcg_gen_movi_i32(cpu_R[15], val);
825 /* Force a TB lookup after an instruction that changes the CPU state. */
826 static inline void gen_lookup_tb(DisasContext *s)
828 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
829 s->is_jmp = DISAS_UPDATE;
832 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
833 TCGv var)
835 int val, rm, shift, shiftop;
836 TCGv offset;
838 if (!(insn & (1 << 25))) {
839 /* immediate */
840 val = insn & 0xfff;
841 if (!(insn & (1 << 23)))
842 val = -val;
843 if (val != 0)
844 tcg_gen_addi_i32(var, var, val);
845 } else {
846 /* shift/register */
847 rm = (insn) & 0xf;
848 shift = (insn >> 7) & 0x1f;
849 shiftop = (insn >> 5) & 3;
850 offset = load_reg(s, rm);
851 gen_arm_shift_im(offset, shiftop, shift, 0);
852 if (!(insn & (1 << 23)))
853 tcg_gen_sub_i32(var, var, offset);
854 else
855 tcg_gen_add_i32(var, var, offset);
856 dead_tmp(offset);
860 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
861 int extra, TCGv var)
863 int val, rm;
864 TCGv offset;
866 if (insn & (1 << 22)) {
867 /* immediate */
868 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
869 if (!(insn & (1 << 23)))
870 val = -val;
871 val += extra;
872 if (val != 0)
873 tcg_gen_addi_i32(var, var, val);
874 } else {
875 /* register */
876 if (extra)
877 tcg_gen_addi_i32(var, var, extra);
878 rm = (insn) & 0xf;
879 offset = load_reg(s, rm);
880 if (!(insn & (1 << 23)))
881 tcg_gen_sub_i32(var, var, offset);
882 else
883 tcg_gen_add_i32(var, var, offset);
884 dead_tmp(offset);
888 #define VFP_OP2(name) \
889 static inline void gen_vfp_##name(int dp) \
891 if (dp) \
892 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
893 else \
894 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
897 VFP_OP2(add)
898 VFP_OP2(sub)
899 VFP_OP2(mul)
900 VFP_OP2(div)
902 #undef VFP_OP2
904 static inline void gen_vfp_abs(int dp)
906 if (dp)
907 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
908 else
909 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
912 static inline void gen_vfp_neg(int dp)
914 if (dp)
915 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
920 static inline void gen_vfp_sqrt(int dp)
922 if (dp)
923 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
924 else
925 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
928 static inline void gen_vfp_cmp(int dp)
930 if (dp)
931 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
932 else
933 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
936 static inline void gen_vfp_cmpe(int dp)
938 if (dp)
939 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
944 static inline void gen_vfp_F1_ld0(int dp)
946 if (dp)
947 tcg_gen_movi_i64(cpu_F1d, 0);
948 else
949 tcg_gen_movi_i32(cpu_F1s, 0);
952 static inline void gen_vfp_uito(int dp)
954 if (dp)
955 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
956 else
957 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
960 static inline void gen_vfp_sito(int dp)
962 if (dp)
963 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
968 static inline void gen_vfp_toui(int dp)
970 if (dp)
971 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
972 else
973 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
976 static inline void gen_vfp_touiz(int dp)
978 if (dp)
979 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
984 static inline void gen_vfp_tosi(int dp)
986 if (dp)
987 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
992 static inline void gen_vfp_tosiz(int dp)
994 if (dp)
995 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1000 #define VFP_GEN_FIX(name) \
1001 static inline void gen_vfp_##name(int dp, int shift) \
1003 TCGv tmp_shift = tcg_const_i32(shift); \
1004 if (dp) \
1005 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1006 else \
1007 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1008 tcg_temp_free_i32(tmp_shift); \
1010 VFP_GEN_FIX(tosh)
1011 VFP_GEN_FIX(tosl)
1012 VFP_GEN_FIX(touh)
1013 VFP_GEN_FIX(toul)
1014 VFP_GEN_FIX(shto)
1015 VFP_GEN_FIX(slto)
1016 VFP_GEN_FIX(uhto)
1017 VFP_GEN_FIX(ulto)
1018 #undef VFP_GEN_FIX
1020 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1022 if (dp)
1023 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1024 else
1025 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1028 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1030 if (dp)
1031 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1032 else
1033 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1036 static inline long
1037 vfp_reg_offset (int dp, int reg)
1039 if (dp)
1040 return offsetof(CPUARMState, vfp.regs[reg]);
1041 else if (reg & 1) {
1042 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1043 + offsetof(CPU_DoubleU, l.upper);
1044 } else {
1045 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1046 + offsetof(CPU_DoubleU, l.lower);
1050 /* Return the offset of a 32-bit piece of a NEON register.
1051 zero is the least significant end of the register. */
1052 static inline long
1053 neon_reg_offset (int reg, int n)
1055 int sreg;
1056 sreg = reg * 2 + n;
1057 return vfp_reg_offset(0, sreg);
1060 static TCGv neon_load_reg(int reg, int pass)
1062 TCGv tmp = new_tmp();
1063 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1064 return tmp;
1067 static void neon_store_reg(int reg, int pass, TCGv var)
1069 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1070 dead_tmp(var);
1073 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1075 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1078 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1080 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1083 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1084 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1085 #define tcg_gen_st_f32 tcg_gen_st_i32
1086 #define tcg_gen_st_f64 tcg_gen_st_i64
1088 static inline void gen_mov_F0_vreg(int dp, int reg)
1090 if (dp)
1091 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1092 else
1093 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1096 static inline void gen_mov_F1_vreg(int dp, int reg)
1098 if (dp)
1099 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1100 else
1101 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1104 static inline void gen_mov_vreg_F0(int dp, int reg)
1106 if (dp)
1107 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1108 else
1109 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1112 #define ARM_CP_RW_BIT (1 << 20)
1114 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1116 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1119 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1121 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1124 static inline TCGv iwmmxt_load_creg(int reg)
1126 TCGv var = new_tmp();
1127 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1128 return var;
1131 static inline void iwmmxt_store_creg(int reg, TCGv var)
1133 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 dead_tmp(var);
1137 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1139 iwmmxt_store_reg(cpu_M0, rn);
1142 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1144 iwmmxt_load_reg(cpu_M0, rn);
1147 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1149 iwmmxt_load_reg(cpu_V1, rn);
1150 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1153 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1159 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1165 #define IWMMXT_OP(name) \
1166 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1168 iwmmxt_load_reg(cpu_V1, rn); \
1169 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1172 #define IWMMXT_OP_ENV(name) \
1173 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175 iwmmxt_load_reg(cpu_V1, rn); \
1176 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1179 #define IWMMXT_OP_ENV_SIZE(name) \
1180 IWMMXT_OP_ENV(name##b) \
1181 IWMMXT_OP_ENV(name##w) \
1182 IWMMXT_OP_ENV(name##l)
1184 #define IWMMXT_OP_ENV1(name) \
1185 static inline void gen_op_iwmmxt_##name##_M0(void) \
1187 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1190 IWMMXT_OP(maddsq)
1191 IWMMXT_OP(madduq)
1192 IWMMXT_OP(sadb)
1193 IWMMXT_OP(sadw)
1194 IWMMXT_OP(mulslw)
1195 IWMMXT_OP(mulshw)
1196 IWMMXT_OP(mululw)
1197 IWMMXT_OP(muluhw)
1198 IWMMXT_OP(macsw)
1199 IWMMXT_OP(macuw)
1201 IWMMXT_OP_ENV_SIZE(unpackl)
1202 IWMMXT_OP_ENV_SIZE(unpackh)
1204 IWMMXT_OP_ENV1(unpacklub)
1205 IWMMXT_OP_ENV1(unpackluw)
1206 IWMMXT_OP_ENV1(unpacklul)
1207 IWMMXT_OP_ENV1(unpackhub)
1208 IWMMXT_OP_ENV1(unpackhuw)
1209 IWMMXT_OP_ENV1(unpackhul)
1210 IWMMXT_OP_ENV1(unpacklsb)
1211 IWMMXT_OP_ENV1(unpacklsw)
1212 IWMMXT_OP_ENV1(unpacklsl)
1213 IWMMXT_OP_ENV1(unpackhsb)
1214 IWMMXT_OP_ENV1(unpackhsw)
1215 IWMMXT_OP_ENV1(unpackhsl)
1217 IWMMXT_OP_ENV_SIZE(cmpeq)
1218 IWMMXT_OP_ENV_SIZE(cmpgtu)
1219 IWMMXT_OP_ENV_SIZE(cmpgts)
1221 IWMMXT_OP_ENV_SIZE(mins)
1222 IWMMXT_OP_ENV_SIZE(minu)
1223 IWMMXT_OP_ENV_SIZE(maxs)
1224 IWMMXT_OP_ENV_SIZE(maxu)
1226 IWMMXT_OP_ENV_SIZE(subn)
1227 IWMMXT_OP_ENV_SIZE(addn)
1228 IWMMXT_OP_ENV_SIZE(subu)
1229 IWMMXT_OP_ENV_SIZE(addu)
1230 IWMMXT_OP_ENV_SIZE(subs)
1231 IWMMXT_OP_ENV_SIZE(adds)
1233 IWMMXT_OP_ENV(avgb0)
1234 IWMMXT_OP_ENV(avgb1)
1235 IWMMXT_OP_ENV(avgw0)
1236 IWMMXT_OP_ENV(avgw1)
1238 IWMMXT_OP(msadb)
1240 IWMMXT_OP_ENV(packuw)
1241 IWMMXT_OP_ENV(packul)
1242 IWMMXT_OP_ENV(packuq)
1243 IWMMXT_OP_ENV(packsw)
1244 IWMMXT_OP_ENV(packsl)
1245 IWMMXT_OP_ENV(packsq)
1247 static void gen_op_iwmmxt_set_mup(void)
1249 TCGv tmp;
1250 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1251 tcg_gen_ori_i32(tmp, tmp, 2);
1252 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1255 static void gen_op_iwmmxt_set_cup(void)
1257 TCGv tmp;
1258 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1259 tcg_gen_ori_i32(tmp, tmp, 1);
1260 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1263 static void gen_op_iwmmxt_setpsr_nz(void)
1265 TCGv tmp = new_tmp();
1266 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1267 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1270 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1272 iwmmxt_load_reg(cpu_V1, rn);
1273 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1274 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1277 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1279 int rd;
1280 uint32_t offset;
1281 TCGv tmp;
1283 rd = (insn >> 16) & 0xf;
1284 tmp = load_reg(s, rd);
1286 offset = (insn & 0xff) << ((insn >> 7) & 2);
1287 if (insn & (1 << 24)) {
1288 /* Pre indexed */
1289 if (insn & (1 << 23))
1290 tcg_gen_addi_i32(tmp, tmp, offset);
1291 else
1292 tcg_gen_addi_i32(tmp, tmp, -offset);
1293 tcg_gen_mov_i32(dest, tmp);
1294 if (insn & (1 << 21))
1295 store_reg(s, rd, tmp);
1296 else
1297 dead_tmp(tmp);
1298 } else if (insn & (1 << 21)) {
1299 /* Post indexed */
1300 tcg_gen_mov_i32(dest, tmp);
1301 if (insn & (1 << 23))
1302 tcg_gen_addi_i32(tmp, tmp, offset);
1303 else
1304 tcg_gen_addi_i32(tmp, tmp, -offset);
1305 store_reg(s, rd, tmp);
1306 } else if (!(insn & (1 << 23)))
1307 return 1;
1308 return 0;
1311 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1313 int rd = (insn >> 0) & 0xf;
1314 TCGv tmp;
1316 if (insn & (1 << 8)) {
1317 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1318 return 1;
1319 } else {
1320 tmp = iwmmxt_load_creg(rd);
1322 } else {
1323 tmp = new_tmp();
1324 iwmmxt_load_reg(cpu_V0, rd);
1325 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1327 tcg_gen_andi_i32(tmp, tmp, mask);
1328 tcg_gen_mov_i32(dest, tmp);
1329 dead_tmp(tmp);
1330 return 0;
1333 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1334 (ie. an undefined instruction). */
1335 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1337 int rd, wrd;
1338 int rdhi, rdlo, rd0, rd1, i;
1339 TCGv addr;
1340 TCGv tmp, tmp2, tmp3;
1342 if ((insn & 0x0e000e00) == 0x0c000000) {
1343 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1344 wrd = insn & 0xf;
1345 rdlo = (insn >> 12) & 0xf;
1346 rdhi = (insn >> 16) & 0xf;
1347 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1348 iwmmxt_load_reg(cpu_V0, wrd);
1349 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1350 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1351 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1352 } else { /* TMCRR */
1353 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1354 iwmmxt_store_reg(cpu_V0, wrd);
1355 gen_op_iwmmxt_set_mup();
1357 return 0;
1360 wrd = (insn >> 12) & 0xf;
1361 addr = new_tmp();
1362 if (gen_iwmmxt_address(s, insn, addr)) {
1363 dead_tmp(addr);
1364 return 1;
1366 if (insn & ARM_CP_RW_BIT) {
1367 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1368 tmp = new_tmp();
1369 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1370 iwmmxt_store_creg(wrd, tmp);
1371 } else {
1372 i = 1;
1373 if (insn & (1 << 8)) {
1374 if (insn & (1 << 22)) { /* WLDRD */
1375 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1376 i = 0;
1377 } else { /* WLDRW wRd */
1378 tmp = gen_ld32(addr, IS_USER(s));
1380 } else {
1381 if (insn & (1 << 22)) { /* WLDRH */
1382 tmp = gen_ld16u(addr, IS_USER(s));
1383 } else { /* WLDRB */
1384 tmp = gen_ld8u(addr, IS_USER(s));
1387 if (i) {
1388 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1389 dead_tmp(tmp);
1391 gen_op_iwmmxt_movq_wRn_M0(wrd);
1393 } else {
1394 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1395 tmp = iwmmxt_load_creg(wrd);
1396 gen_st32(tmp, addr, IS_USER(s));
1397 } else {
1398 gen_op_iwmmxt_movq_M0_wRn(wrd);
1399 tmp = new_tmp();
1400 if (insn & (1 << 8)) {
1401 if (insn & (1 << 22)) { /* WSTRD */
1402 dead_tmp(tmp);
1403 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1404 } else { /* WSTRW wRd */
1405 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1406 gen_st32(tmp, addr, IS_USER(s));
1408 } else {
1409 if (insn & (1 << 22)) { /* WSTRH */
1410 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1411 gen_st16(tmp, addr, IS_USER(s));
1412 } else { /* WSTRB */
1413 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1414 gen_st8(tmp, addr, IS_USER(s));
1419 dead_tmp(addr);
1420 return 0;
1423 if ((insn & 0x0f000000) != 0x0e000000)
1424 return 1;
1426 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1427 case 0x000: /* WOR */
1428 wrd = (insn >> 12) & 0xf;
1429 rd0 = (insn >> 0) & 0xf;
1430 rd1 = (insn >> 16) & 0xf;
1431 gen_op_iwmmxt_movq_M0_wRn(rd0);
1432 gen_op_iwmmxt_orq_M0_wRn(rd1);
1433 gen_op_iwmmxt_setpsr_nz();
1434 gen_op_iwmmxt_movq_wRn_M0(wrd);
1435 gen_op_iwmmxt_set_mup();
1436 gen_op_iwmmxt_set_cup();
1437 break;
1438 case 0x011: /* TMCR */
1439 if (insn & 0xf)
1440 return 1;
1441 rd = (insn >> 12) & 0xf;
1442 wrd = (insn >> 16) & 0xf;
1443 switch (wrd) {
1444 case ARM_IWMMXT_wCID:
1445 case ARM_IWMMXT_wCASF:
1446 break;
1447 case ARM_IWMMXT_wCon:
1448 gen_op_iwmmxt_set_cup();
1449 /* Fall through. */
1450 case ARM_IWMMXT_wCSSF:
1451 tmp = iwmmxt_load_creg(wrd);
1452 tmp2 = load_reg(s, rd);
1453 tcg_gen_andc_i32(tmp, tmp, tmp2);
1454 dead_tmp(tmp2);
1455 iwmmxt_store_creg(wrd, tmp);
1456 break;
1457 case ARM_IWMMXT_wCGR0:
1458 case ARM_IWMMXT_wCGR1:
1459 case ARM_IWMMXT_wCGR2:
1460 case ARM_IWMMXT_wCGR3:
1461 gen_op_iwmmxt_set_cup();
1462 tmp = load_reg(s, rd);
1463 iwmmxt_store_creg(wrd, tmp);
1464 break;
1465 default:
1466 return 1;
1468 break;
1469 case 0x100: /* WXOR */
1470 wrd = (insn >> 12) & 0xf;
1471 rd0 = (insn >> 0) & 0xf;
1472 rd1 = (insn >> 16) & 0xf;
1473 gen_op_iwmmxt_movq_M0_wRn(rd0);
1474 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1475 gen_op_iwmmxt_setpsr_nz();
1476 gen_op_iwmmxt_movq_wRn_M0(wrd);
1477 gen_op_iwmmxt_set_mup();
1478 gen_op_iwmmxt_set_cup();
1479 break;
1480 case 0x111: /* TMRC */
1481 if (insn & 0xf)
1482 return 1;
1483 rd = (insn >> 12) & 0xf;
1484 wrd = (insn >> 16) & 0xf;
1485 tmp = iwmmxt_load_creg(wrd);
1486 store_reg(s, rd, tmp);
1487 break;
1488 case 0x300: /* WANDN */
1489 wrd = (insn >> 12) & 0xf;
1490 rd0 = (insn >> 0) & 0xf;
1491 rd1 = (insn >> 16) & 0xf;
1492 gen_op_iwmmxt_movq_M0_wRn(rd0);
1493 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1494 gen_op_iwmmxt_andq_M0_wRn(rd1);
1495 gen_op_iwmmxt_setpsr_nz();
1496 gen_op_iwmmxt_movq_wRn_M0(wrd);
1497 gen_op_iwmmxt_set_mup();
1498 gen_op_iwmmxt_set_cup();
1499 break;
1500 case 0x200: /* WAND */
1501 wrd = (insn >> 12) & 0xf;
1502 rd0 = (insn >> 0) & 0xf;
1503 rd1 = (insn >> 16) & 0xf;
1504 gen_op_iwmmxt_movq_M0_wRn(rd0);
1505 gen_op_iwmmxt_andq_M0_wRn(rd1);
1506 gen_op_iwmmxt_setpsr_nz();
1507 gen_op_iwmmxt_movq_wRn_M0(wrd);
1508 gen_op_iwmmxt_set_mup();
1509 gen_op_iwmmxt_set_cup();
1510 break;
1511 case 0x810: case 0xa10: /* WMADD */
1512 wrd = (insn >> 12) & 0xf;
1513 rd0 = (insn >> 0) & 0xf;
1514 rd1 = (insn >> 16) & 0xf;
1515 gen_op_iwmmxt_movq_M0_wRn(rd0);
1516 if (insn & (1 << 21))
1517 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1518 else
1519 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1520 gen_op_iwmmxt_movq_wRn_M0(wrd);
1521 gen_op_iwmmxt_set_mup();
1522 break;
1523 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 16) & 0xf;
1526 rd1 = (insn >> 0) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 switch ((insn >> 22) & 3) {
1529 case 0:
1530 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1531 break;
1532 case 1:
1533 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1534 break;
1535 case 2:
1536 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1537 break;
1538 case 3:
1539 return 1;
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 16) & 0xf;
1548 rd1 = (insn >> 0) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 switch ((insn >> 22) & 3) {
1551 case 0:
1552 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1553 break;
1554 case 1:
1555 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1556 break;
1557 case 2:
1558 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1559 break;
1560 case 3:
1561 return 1;
1563 gen_op_iwmmxt_movq_wRn_M0(wrd);
1564 gen_op_iwmmxt_set_mup();
1565 gen_op_iwmmxt_set_cup();
1566 break;
1567 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1568 wrd = (insn >> 12) & 0xf;
1569 rd0 = (insn >> 16) & 0xf;
1570 rd1 = (insn >> 0) & 0xf;
1571 gen_op_iwmmxt_movq_M0_wRn(rd0);
1572 if (insn & (1 << 22))
1573 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1574 else
1575 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1576 if (!(insn & (1 << 20)))
1577 gen_op_iwmmxt_addl_M0_wRn(wrd);
1578 gen_op_iwmmxt_movq_wRn_M0(wrd);
1579 gen_op_iwmmxt_set_mup();
1580 break;
1581 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1582 wrd = (insn >> 12) & 0xf;
1583 rd0 = (insn >> 16) & 0xf;
1584 rd1 = (insn >> 0) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0);
1586 if (insn & (1 << 21)) {
1587 if (insn & (1 << 20))
1588 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1589 else
1590 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1591 } else {
1592 if (insn & (1 << 20))
1593 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1594 else
1595 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 break;
1600 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 16) & 0xf;
1603 rd1 = (insn >> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 if (insn & (1 << 21))
1606 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1607 else
1608 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1609 if (!(insn & (1 << 20))) {
1610 iwmmxt_load_reg(cpu_V1, wrd);
1611 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 switch ((insn >> 22) & 3) {
1622 case 0:
1623 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1624 break;
1625 case 1:
1626 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1627 break;
1628 case 2:
1629 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1630 break;
1631 case 3:
1632 return 1;
1634 gen_op_iwmmxt_movq_wRn_M0(wrd);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1637 break;
1638 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 16) & 0xf;
1641 rd1 = (insn >> 0) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
1643 if (insn & (1 << 22)) {
1644 if (insn & (1 << 20))
1645 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1646 else
1647 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1648 } else {
1649 if (insn & (1 << 20))
1650 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1651 else
1652 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1654 gen_op_iwmmxt_movq_wRn_M0(wrd);
1655 gen_op_iwmmxt_set_mup();
1656 gen_op_iwmmxt_set_cup();
1657 break;
1658 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 16) & 0xf;
1661 rd1 = (insn >> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1664 tcg_gen_andi_i32(tmp, tmp, 7);
1665 iwmmxt_load_reg(cpu_V1, rd1);
1666 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1667 dead_tmp(tmp);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1672 if (((insn >> 6) & 3) == 3)
1673 return 1;
1674 rd = (insn >> 12) & 0xf;
1675 wrd = (insn >> 16) & 0xf;
1676 tmp = load_reg(s, rd);
1677 gen_op_iwmmxt_movq_M0_wRn(wrd);
1678 switch ((insn >> 6) & 3) {
1679 case 0:
1680 tmp2 = tcg_const_i32(0xff);
1681 tmp3 = tcg_const_i32((insn & 7) << 3);
1682 break;
1683 case 1:
1684 tmp2 = tcg_const_i32(0xffff);
1685 tmp3 = tcg_const_i32((insn & 3) << 4);
1686 break;
1687 case 2:
1688 tmp2 = tcg_const_i32(0xffffffff);
1689 tmp3 = tcg_const_i32((insn & 1) << 5);
1690 break;
1691 default:
1692 TCGV_UNUSED(tmp2);
1693 TCGV_UNUSED(tmp3);
1695 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1696 tcg_temp_free(tmp3);
1697 tcg_temp_free(tmp2);
1698 dead_tmp(tmp);
1699 gen_op_iwmmxt_movq_wRn_M0(wrd);
1700 gen_op_iwmmxt_set_mup();
1701 break;
1702 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1703 rd = (insn >> 12) & 0xf;
1704 wrd = (insn >> 16) & 0xf;
1705 if (rd == 15 || ((insn >> 22) & 3) == 3)
1706 return 1;
1707 gen_op_iwmmxt_movq_M0_wRn(wrd);
1708 tmp = new_tmp();
1709 switch ((insn >> 22) & 3) {
1710 case 0:
1711 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1712 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1713 if (insn & 8) {
1714 tcg_gen_ext8s_i32(tmp, tmp);
1715 } else {
1716 tcg_gen_andi_i32(tmp, tmp, 0xff);
1718 break;
1719 case 1:
1720 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1721 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1722 if (insn & 8) {
1723 tcg_gen_ext16s_i32(tmp, tmp);
1724 } else {
1725 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1727 break;
1728 case 2:
1729 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1730 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1731 break;
1733 store_reg(s, rd, tmp);
1734 break;
1735 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1736 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1737 return 1;
1738 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1739 switch ((insn >> 22) & 3) {
1740 case 0:
1741 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1742 break;
1743 case 1:
1744 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1745 break;
1746 case 2:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1748 break;
1750 tcg_gen_shli_i32(tmp, tmp, 28);
1751 gen_set_nzcv(tmp);
1752 dead_tmp(tmp);
1753 break;
1754 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1755 if (((insn >> 6) & 3) == 3)
1756 return 1;
1757 rd = (insn >> 12) & 0xf;
1758 wrd = (insn >> 16) & 0xf;
1759 tmp = load_reg(s, rd);
1760 switch ((insn >> 6) & 3) {
1761 case 0:
1762 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1763 break;
1764 case 1:
1765 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1766 break;
1767 case 2:
1768 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1769 break;
1771 dead_tmp(tmp);
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 break;
1775 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1776 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1777 return 1;
1778 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1779 tmp2 = new_tmp();
1780 tcg_gen_mov_i32(tmp2, tmp);
1781 switch ((insn >> 22) & 3) {
1782 case 0:
1783 for (i = 0; i < 7; i ++) {
1784 tcg_gen_shli_i32(tmp2, tmp2, 4);
1785 tcg_gen_and_i32(tmp, tmp, tmp2);
1787 break;
1788 case 1:
1789 for (i = 0; i < 3; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 8);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1793 break;
1794 case 2:
1795 tcg_gen_shli_i32(tmp2, tmp2, 16);
1796 tcg_gen_and_i32(tmp, tmp, tmp2);
1797 break;
1799 gen_set_nzcv(tmp);
1800 dead_tmp(tmp2);
1801 dead_tmp(tmp);
1802 break;
1803 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0);
1807 switch ((insn >> 22) & 3) {
1808 case 0:
1809 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1810 break;
1811 case 1:
1812 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1813 break;
1814 case 2:
1815 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1816 break;
1817 case 3:
1818 return 1;
1820 gen_op_iwmmxt_movq_wRn_M0(wrd);
1821 gen_op_iwmmxt_set_mup();
1822 break;
1823 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1824 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1825 return 1;
1826 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1827 tmp2 = new_tmp();
1828 tcg_gen_mov_i32(tmp2, tmp);
1829 switch ((insn >> 22) & 3) {
1830 case 0:
1831 for (i = 0; i < 7; i ++) {
1832 tcg_gen_shli_i32(tmp2, tmp2, 4);
1833 tcg_gen_or_i32(tmp, tmp, tmp2);
1835 break;
1836 case 1:
1837 for (i = 0; i < 3; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 8);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1841 break;
1842 case 2:
1843 tcg_gen_shli_i32(tmp2, tmp2, 16);
1844 tcg_gen_or_i32(tmp, tmp, tmp2);
1845 break;
1847 gen_set_nzcv(tmp);
1848 dead_tmp(tmp2);
1849 dead_tmp(tmp);
1850 break;
1851 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1852 rd = (insn >> 12) & 0xf;
1853 rd0 = (insn >> 16) & 0xf;
1854 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1855 return 1;
1856 gen_op_iwmmxt_movq_M0_wRn(rd0);
1857 tmp = new_tmp();
1858 switch ((insn >> 22) & 3) {
1859 case 0:
1860 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1861 break;
1862 case 1:
1863 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1864 break;
1865 case 2:
1866 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1867 break;
1869 store_reg(s, rd, tmp);
1870 break;
1871 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1872 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1873 wrd = (insn >> 12) & 0xf;
1874 rd0 = (insn >> 16) & 0xf;
1875 rd1 = (insn >> 0) & 0xf;
1876 gen_op_iwmmxt_movq_M0_wRn(rd0);
1877 switch ((insn >> 22) & 3) {
1878 case 0:
1879 if (insn & (1 << 21))
1880 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1881 else
1882 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1883 break;
1884 case 1:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1889 break;
1890 case 2:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1895 break;
1896 case 3:
1897 return 1;
1899 gen_op_iwmmxt_movq_wRn_M0(wrd);
1900 gen_op_iwmmxt_set_mup();
1901 gen_op_iwmmxt_set_cup();
1902 break;
1903 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1904 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1905 wrd = (insn >> 12) & 0xf;
1906 rd0 = (insn >> 16) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0);
1908 switch ((insn >> 22) & 3) {
1909 case 0:
1910 if (insn & (1 << 21))
1911 gen_op_iwmmxt_unpacklsb_M0();
1912 else
1913 gen_op_iwmmxt_unpacklub_M0();
1914 break;
1915 case 1:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsw_M0();
1918 else
1919 gen_op_iwmmxt_unpackluw_M0();
1920 break;
1921 case 2:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsl_M0();
1924 else
1925 gen_op_iwmmxt_unpacklul_M0();
1926 break;
1927 case 3:
1928 return 1;
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 gen_op_iwmmxt_set_cup();
1933 break;
1934 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1935 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1936 wrd = (insn >> 12) & 0xf;
1937 rd0 = (insn >> 16) & 0xf;
1938 gen_op_iwmmxt_movq_M0_wRn(rd0);
1939 switch ((insn >> 22) & 3) {
1940 case 0:
1941 if (insn & (1 << 21))
1942 gen_op_iwmmxt_unpackhsb_M0();
1943 else
1944 gen_op_iwmmxt_unpackhub_M0();
1945 break;
1946 case 1:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsw_M0();
1949 else
1950 gen_op_iwmmxt_unpackhuw_M0();
1951 break;
1952 case 2:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsl_M0();
1955 else
1956 gen_op_iwmmxt_unpackhul_M0();
1957 break;
1958 case 3:
1959 return 1;
1961 gen_op_iwmmxt_movq_wRn_M0(wrd);
1962 gen_op_iwmmxt_set_mup();
1963 gen_op_iwmmxt_set_cup();
1964 break;
1965 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1966 case 0x214: case 0x614: case 0xa14: case 0xe14:
1967 if (((insn >> 22) & 3) == 0)
1968 return 1;
1969 wrd = (insn >> 12) & 0xf;
1970 rd0 = (insn >> 16) & 0xf;
1971 gen_op_iwmmxt_movq_M0_wRn(rd0);
1972 tmp = new_tmp();
1973 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1974 dead_tmp(tmp);
1975 return 1;
1977 switch ((insn >> 22) & 3) {
1978 case 1:
1979 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1980 break;
1981 case 2:
1982 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1983 break;
1984 case 3:
1985 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1988 dead_tmp(tmp);
1989 gen_op_iwmmxt_movq_wRn_M0(wrd);
1990 gen_op_iwmmxt_set_mup();
1991 gen_op_iwmmxt_set_cup();
1992 break;
1993 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1994 case 0x014: case 0x414: case 0x814: case 0xc14:
1995 if (((insn >> 22) & 3) == 0)
1996 return 1;
1997 wrd = (insn >> 12) & 0xf;
1998 rd0 = (insn >> 16) & 0xf;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0);
2000 tmp = new_tmp();
2001 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2002 dead_tmp(tmp);
2003 return 1;
2005 switch ((insn >> 22) & 3) {
2006 case 1:
2007 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2008 break;
2009 case 2:
2010 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2011 break;
2012 case 3:
2013 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2016 dead_tmp(tmp);
2017 gen_op_iwmmxt_movq_wRn_M0(wrd);
2018 gen_op_iwmmxt_set_mup();
2019 gen_op_iwmmxt_set_cup();
2020 break;
2021 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2022 case 0x114: case 0x514: case 0x914: case 0xd14:
2023 if (((insn >> 22) & 3) == 0)
2024 return 1;
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 tmp = new_tmp();
2029 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2030 dead_tmp(tmp);
2031 return 1;
2033 switch ((insn >> 22) & 3) {
2034 case 1:
2035 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2036 break;
2037 case 2:
2038 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2039 break;
2040 case 3:
2041 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2044 dead_tmp(tmp);
2045 gen_op_iwmmxt_movq_wRn_M0(wrd);
2046 gen_op_iwmmxt_set_mup();
2047 gen_op_iwmmxt_set_cup();
2048 break;
2049 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2050 case 0x314: case 0x714: case 0xb14: case 0xf14:
2051 if (((insn >> 22) & 3) == 0)
2052 return 1;
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0);
2056 tmp = new_tmp();
2057 switch ((insn >> 22) & 3) {
2058 case 1:
2059 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2060 dead_tmp(tmp);
2061 return 1;
2063 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2064 break;
2065 case 2:
2066 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2067 dead_tmp(tmp);
2068 return 1;
2070 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2071 break;
2072 case 3:
2073 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2074 dead_tmp(tmp);
2075 return 1;
2077 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2078 break;
2080 dead_tmp(tmp);
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2086 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 switch ((insn >> 22) & 3) {
2092 case 0:
2093 if (insn & (1 << 21))
2094 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2095 else
2096 gen_op_iwmmxt_minub_M0_wRn(rd1);
2097 break;
2098 case 1:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2103 break;
2104 case 2:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minul_M0_wRn(rd1);
2109 break;
2110 case 3:
2111 return 1;
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 break;
2116 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2117 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 rd1 = (insn >> 0) & 0xf;
2121 gen_op_iwmmxt_movq_M0_wRn(rd0);
2122 switch ((insn >> 22) & 3) {
2123 case 0:
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2128 break;
2129 case 1:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2134 break;
2135 case 2:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2140 break;
2141 case 3:
2142 return 1;
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 break;
2147 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2148 case 0x402: case 0x502: case 0x602: case 0x702:
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 16) & 0xf;
2151 rd1 = (insn >> 0) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
2153 tmp = tcg_const_i32((insn >> 20) & 3);
2154 iwmmxt_load_reg(cpu_V1, rd1);
2155 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2156 tcg_temp_free(tmp);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 break;
2160 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2161 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2162 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2163 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 rd1 = (insn >> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0);
2168 switch ((insn >> 20) & 0xf) {
2169 case 0x0:
2170 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2171 break;
2172 case 0x1:
2173 gen_op_iwmmxt_subub_M0_wRn(rd1);
2174 break;
2175 case 0x3:
2176 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2177 break;
2178 case 0x4:
2179 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2180 break;
2181 case 0x5:
2182 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2183 break;
2184 case 0x7:
2185 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2186 break;
2187 case 0x8:
2188 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2189 break;
2190 case 0x9:
2191 gen_op_iwmmxt_subul_M0_wRn(rd1);
2192 break;
2193 case 0xb:
2194 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2195 break;
2196 default:
2197 return 1;
2199 gen_op_iwmmxt_movq_wRn_M0(wrd);
2200 gen_op_iwmmxt_set_mup();
2201 gen_op_iwmmxt_set_cup();
2202 break;
2203 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2204 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2205 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2206 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2207 wrd = (insn >> 12) & 0xf;
2208 rd0 = (insn >> 16) & 0xf;
2209 gen_op_iwmmxt_movq_M0_wRn(rd0);
2210 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2211 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2212 tcg_temp_free(tmp);
2213 gen_op_iwmmxt_movq_wRn_M0(wrd);
2214 gen_op_iwmmxt_set_mup();
2215 gen_op_iwmmxt_set_cup();
2216 break;
2217 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2218 case 0x418: case 0x518: case 0x618: case 0x718:
2219 case 0x818: case 0x918: case 0xa18: case 0xb18:
2220 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2221 wrd = (insn >> 12) & 0xf;
2222 rd0 = (insn >> 16) & 0xf;
2223 rd1 = (insn >> 0) & 0xf;
2224 gen_op_iwmmxt_movq_M0_wRn(rd0);
2225 switch ((insn >> 20) & 0xf) {
2226 case 0x0:
2227 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2228 break;
2229 case 0x1:
2230 gen_op_iwmmxt_addub_M0_wRn(rd1);
2231 break;
2232 case 0x3:
2233 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2234 break;
2235 case 0x4:
2236 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2237 break;
2238 case 0x5:
2239 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2240 break;
2241 case 0x7:
2242 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2243 break;
2244 case 0x8:
2245 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2246 break;
2247 case 0x9:
2248 gen_op_iwmmxt_addul_M0_wRn(rd1);
2249 break;
2250 case 0xb:
2251 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2252 break;
2253 default:
2254 return 1;
2256 gen_op_iwmmxt_movq_wRn_M0(wrd);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2259 break;
2260 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2261 case 0x408: case 0x508: case 0x608: case 0x708:
2262 case 0x808: case 0x908: case 0xa08: case 0xb08:
2263 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2264 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2265 return 1;
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 rd1 = (insn >> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0);
2270 switch ((insn >> 22) & 3) {
2271 case 1:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2276 break;
2277 case 2:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packul_M0_wRn(rd1);
2282 break;
2283 case 3:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2288 break;
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x201: case 0x203: case 0x205: case 0x207:
2295 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2296 case 0x211: case 0x213: case 0x215: case 0x217:
2297 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2298 wrd = (insn >> 5) & 0xf;
2299 rd0 = (insn >> 12) & 0xf;
2300 rd1 = (insn >> 0) & 0xf;
2301 if (rd0 == 0xf || rd1 == 0xf)
2302 return 1;
2303 gen_op_iwmmxt_movq_M0_wRn(wrd);
2304 tmp = load_reg(s, rd0);
2305 tmp2 = load_reg(s, rd1);
2306 switch ((insn >> 16) & 0xf) {
2307 case 0x0: /* TMIA */
2308 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2309 break;
2310 case 0x8: /* TMIAPH */
2311 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2312 break;
2313 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2314 if (insn & (1 << 16))
2315 tcg_gen_shri_i32(tmp, tmp, 16);
2316 if (insn & (1 << 17))
2317 tcg_gen_shri_i32(tmp2, tmp2, 16);
2318 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2319 break;
2320 default:
2321 dead_tmp(tmp2);
2322 dead_tmp(tmp);
2323 return 1;
2325 dead_tmp(tmp2);
2326 dead_tmp(tmp);
2327 gen_op_iwmmxt_movq_wRn_M0(wrd);
2328 gen_op_iwmmxt_set_mup();
2329 break;
2330 default:
2331 return 1;
2334 return 0;
2337 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2338 (ie. an undefined instruction). */
2339 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2341 int acc, rd0, rd1, rdhi, rdlo;
2342 TCGv tmp, tmp2;
2344 if ((insn & 0x0ff00f10) == 0x0e200010) {
2345 /* Multiply with Internal Accumulate Format */
2346 rd0 = (insn >> 12) & 0xf;
2347 rd1 = insn & 0xf;
2348 acc = (insn >> 5) & 7;
2350 if (acc != 0)
2351 return 1;
2353 tmp = load_reg(s, rd0);
2354 tmp2 = load_reg(s, rd1);
2355 switch ((insn >> 16) & 0xf) {
2356 case 0x0: /* MIA */
2357 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2358 break;
2359 case 0x8: /* MIAPH */
2360 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2361 break;
2362 case 0xc: /* MIABB */
2363 case 0xd: /* MIABT */
2364 case 0xe: /* MIATB */
2365 case 0xf: /* MIATT */
2366 if (insn & (1 << 16))
2367 tcg_gen_shri_i32(tmp, tmp, 16);
2368 if (insn & (1 << 17))
2369 tcg_gen_shri_i32(tmp2, tmp2, 16);
2370 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2371 break;
2372 default:
2373 return 1;
2375 dead_tmp(tmp2);
2376 dead_tmp(tmp);
2378 gen_op_iwmmxt_movq_wRn_M0(acc);
2379 return 0;
2382 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2383 /* Internal Accumulator Access Format */
2384 rdhi = (insn >> 16) & 0xf;
2385 rdlo = (insn >> 12) & 0xf;
2386 acc = insn & 7;
2388 if (acc != 0)
2389 return 1;
2391 if (insn & ARM_CP_RW_BIT) { /* MRA */
2392 iwmmxt_load_reg(cpu_V0, acc);
2393 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2394 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2395 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2396 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2397 } else { /* MAR */
2398 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2399 iwmmxt_store_reg(cpu_V0, acc);
2401 return 0;
2404 return 1;
2407 /* Disassemble system coprocessor instruction. Return nonzero if
2408 instruction is not defined. */
2409 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2411 TCGv tmp, tmp2;
2412 uint32_t rd = (insn >> 12) & 0xf;
2413 uint32_t cp = (insn >> 8) & 0xf;
2414 if (IS_USER(s)) {
2415 return 1;
2418 if (insn & ARM_CP_RW_BIT) {
2419 if (!env->cp[cp].cp_read)
2420 return 1;
2421 gen_set_pc_im(s->pc);
2422 tmp = new_tmp();
2423 tmp2 = tcg_const_i32(insn);
2424 gen_helper_get_cp(tmp, cpu_env, tmp2);
2425 tcg_temp_free(tmp2);
2426 store_reg(s, rd, tmp);
2427 } else {
2428 if (!env->cp[cp].cp_write)
2429 return 1;
2430 gen_set_pc_im(s->pc);
2431 tmp = load_reg(s, rd);
2432 tmp2 = tcg_const_i32(insn);
2433 gen_helper_set_cp(cpu_env, tmp2, tmp);
2434 tcg_temp_free(tmp2);
2435 dead_tmp(tmp);
2437 return 0;
2440 static int cp15_user_ok(uint32_t insn)
2442 int cpn = (insn >> 16) & 0xf;
2443 int cpm = insn & 0xf;
2444 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2446 if (cpn == 13 && cpm == 0) {
2447 /* TLS register. */
2448 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2449 return 1;
2451 if (cpn == 7) {
2452 /* ISB, DSB, DMB. */
2453 if ((cpm == 5 && op == 4)
2454 || (cpm == 10 && (op == 4 || op == 5)))
2455 return 1;
2457 return 0;
2460 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2462 TCGv tmp;
2463 int cpn = (insn >> 16) & 0xf;
2464 int cpm = insn & 0xf;
2465 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2467 if (!arm_feature(env, ARM_FEATURE_V6K))
2468 return 0;
2470 if (!(cpn == 13 && cpm == 0))
2471 return 0;
2473 if (insn & ARM_CP_RW_BIT) {
2474 switch (op) {
2475 case 2:
2476 tmp = load_cpu_field(cp15.c13_tls1);
2477 break;
2478 case 3:
2479 tmp = load_cpu_field(cp15.c13_tls2);
2480 break;
2481 case 4:
2482 tmp = load_cpu_field(cp15.c13_tls3);
2483 break;
2484 default:
2485 return 0;
2487 store_reg(s, rd, tmp);
2489 } else {
2490 tmp = load_reg(s, rd);
2491 switch (op) {
2492 case 2:
2493 store_cpu_field(tmp, cp15.c13_tls1);
2494 break;
2495 case 3:
2496 store_cpu_field(tmp, cp15.c13_tls2);
2497 break;
2498 case 4:
2499 store_cpu_field(tmp, cp15.c13_tls3);
2500 break;
2501 default:
2502 dead_tmp(tmp);
2503 return 0;
2506 return 1;
2509 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2510 instruction is not defined. */
2511 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2513 uint32_t rd;
2514 TCGv tmp, tmp2;
2516 /* M profile cores use memory mapped registers instead of cp15. */
2517 if (arm_feature(env, ARM_FEATURE_M))
2518 return 1;
2520 if ((insn & (1 << 25)) == 0) {
2521 if (insn & (1 << 20)) {
2522 /* mrrc */
2523 return 1;
2525 /* mcrr. Used for block cache operations, so implement as no-op. */
2526 return 0;
2528 if ((insn & (1 << 4)) == 0) {
2529 /* cdp */
2530 return 1;
2532 if (IS_USER(s) && !cp15_user_ok(insn)) {
2533 return 1;
2535 if ((insn & 0x0fff0fff) == 0x0e070f90
2536 || (insn & 0x0fff0fff) == 0x0e070f58) {
2537 /* Wait for interrupt. */
2538 gen_set_pc_im(s->pc);
2539 s->is_jmp = DISAS_WFI;
2540 return 0;
2542 rd = (insn >> 12) & 0xf;
2544 if (cp15_tls_load_store(env, s, insn, rd))
2545 return 0;
2547 tmp2 = tcg_const_i32(insn);
2548 if (insn & ARM_CP_RW_BIT) {
2549 tmp = new_tmp();
2550 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2551 /* If the destination register is r15 then sets condition codes. */
2552 if (rd != 15)
2553 store_reg(s, rd, tmp);
2554 else
2555 dead_tmp(tmp);
2556 } else {
2557 tmp = load_reg(s, rd);
2558 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2559 dead_tmp(tmp);
2560 /* Normally we would always end the TB here, but Linux
2561 * arch/arm/mach-pxa/sleep.S expects two instructions following
2562 * an MMU enable to execute from cache. Imitate this behaviour. */
2563 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2564 (insn & 0x0fff0fff) != 0x0e010f10)
2565 gen_lookup_tb(s);
2567 tcg_temp_free_i32(tmp2);
2568 return 0;
2571 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2572 #define VFP_SREG(insn, bigbit, smallbit) \
2573 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2574 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2575 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2576 reg = (((insn) >> (bigbit)) & 0x0f) \
2577 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2578 } else { \
2579 if (insn & (1 << (smallbit))) \
2580 return 1; \
2581 reg = ((insn) >> (bigbit)) & 0x0f; \
2582 }} while (0)
2584 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2585 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2586 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2587 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2588 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2589 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2591 /* Move between integer and VFP cores. */
2592 static TCGv gen_vfp_mrs(void)
2594 TCGv tmp = new_tmp();
2595 tcg_gen_mov_i32(tmp, cpu_F0s);
2596 return tmp;
2599 static void gen_vfp_msr(TCGv tmp)
2601 tcg_gen_mov_i32(cpu_F0s, tmp);
2602 dead_tmp(tmp);
2605 static inline int
2606 vfp_enabled(CPUState * env)
2608 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2611 static void gen_neon_dup_u8(TCGv var, int shift)
2613 TCGv tmp = new_tmp();
2614 if (shift)
2615 tcg_gen_shri_i32(var, var, shift);
2616 tcg_gen_ext8u_i32(var, var);
2617 tcg_gen_shli_i32(tmp, var, 8);
2618 tcg_gen_or_i32(var, var, tmp);
2619 tcg_gen_shli_i32(tmp, var, 16);
2620 tcg_gen_or_i32(var, var, tmp);
2621 dead_tmp(tmp);
2624 static void gen_neon_dup_low16(TCGv var)
2626 TCGv tmp = new_tmp();
2627 tcg_gen_ext16u_i32(var, var);
2628 tcg_gen_shli_i32(tmp, var, 16);
2629 tcg_gen_or_i32(var, var, tmp);
2630 dead_tmp(tmp);
2633 static void gen_neon_dup_high16(TCGv var)
2635 TCGv tmp = new_tmp();
2636 tcg_gen_andi_i32(var, var, 0xffff0000);
2637 tcg_gen_shri_i32(tmp, var, 16);
2638 tcg_gen_or_i32(var, var, tmp);
2639 dead_tmp(tmp);
2642 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2643 (ie. an undefined instruction). */
2644 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2646 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2647 int dp, veclen;
2648 TCGv addr;
2649 TCGv tmp;
2650 TCGv tmp2;
2652 if (!arm_feature(env, ARM_FEATURE_VFP))
2653 return 1;
2655 if (!vfp_enabled(env)) {
2656 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2657 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2658 return 1;
2659 rn = (insn >> 16) & 0xf;
2660 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2661 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2662 return 1;
2664 dp = ((insn & 0xf00) == 0xb00);
2665 switch ((insn >> 24) & 0xf) {
2666 case 0xe:
2667 if (insn & (1 << 4)) {
2668 /* single register transfer */
2669 rd = (insn >> 12) & 0xf;
2670 if (dp) {
2671 int size;
2672 int pass;
2674 VFP_DREG_N(rn, insn);
2675 if (insn & 0xf)
2676 return 1;
2677 if (insn & 0x00c00060
2678 && !arm_feature(env, ARM_FEATURE_NEON))
2679 return 1;
2681 pass = (insn >> 21) & 1;
2682 if (insn & (1 << 22)) {
2683 size = 0;
2684 offset = ((insn >> 5) & 3) * 8;
2685 } else if (insn & (1 << 5)) {
2686 size = 1;
2687 offset = (insn & (1 << 6)) ? 16 : 0;
2688 } else {
2689 size = 2;
2690 offset = 0;
2692 if (insn & ARM_CP_RW_BIT) {
2693 /* vfp->arm */
2694 tmp = neon_load_reg(rn, pass);
2695 switch (size) {
2696 case 0:
2697 if (offset)
2698 tcg_gen_shri_i32(tmp, tmp, offset);
2699 if (insn & (1 << 23))
2700 gen_uxtb(tmp);
2701 else
2702 gen_sxtb(tmp);
2703 break;
2704 case 1:
2705 if (insn & (1 << 23)) {
2706 if (offset) {
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 } else {
2709 gen_uxth(tmp);
2711 } else {
2712 if (offset) {
2713 tcg_gen_sari_i32(tmp, tmp, 16);
2714 } else {
2715 gen_sxth(tmp);
2718 break;
2719 case 2:
2720 break;
2722 store_reg(s, rd, tmp);
2723 } else {
2724 /* arm->vfp */
2725 tmp = load_reg(s, rd);
2726 if (insn & (1 << 23)) {
2727 /* VDUP */
2728 if (size == 0) {
2729 gen_neon_dup_u8(tmp, 0);
2730 } else if (size == 1) {
2731 gen_neon_dup_low16(tmp);
2733 for (n = 0; n <= pass * 2; n++) {
2734 tmp2 = new_tmp();
2735 tcg_gen_mov_i32(tmp2, tmp);
2736 neon_store_reg(rn, n, tmp2);
2738 neon_store_reg(rn, n, tmp);
2739 } else {
2740 /* VMOV */
2741 switch (size) {
2742 case 0:
2743 tmp2 = neon_load_reg(rn, pass);
2744 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2745 dead_tmp(tmp2);
2746 break;
2747 case 1:
2748 tmp2 = neon_load_reg(rn, pass);
2749 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2750 dead_tmp(tmp2);
2751 break;
2752 case 2:
2753 break;
2755 neon_store_reg(rn, pass, tmp);
2758 } else { /* !dp */
2759 if ((insn & 0x6f) != 0x00)
2760 return 1;
2761 rn = VFP_SREG_N(insn);
2762 if (insn & ARM_CP_RW_BIT) {
2763 /* vfp->arm */
2764 if (insn & (1 << 21)) {
2765 /* system register */
2766 rn >>= 1;
2768 switch (rn) {
2769 case ARM_VFP_FPSID:
2770 /* VFP2 allows access to FSID from userspace.
2771 VFP3 restricts all id registers to privileged
2772 accesses. */
2773 if (IS_USER(s)
2774 && arm_feature(env, ARM_FEATURE_VFP3))
2775 return 1;
2776 tmp = load_cpu_field(vfp.xregs[rn]);
2777 break;
2778 case ARM_VFP_FPEXC:
2779 if (IS_USER(s))
2780 return 1;
2781 tmp = load_cpu_field(vfp.xregs[rn]);
2782 break;
2783 case ARM_VFP_FPINST:
2784 case ARM_VFP_FPINST2:
2785 /* Not present in VFP3. */
2786 if (IS_USER(s)
2787 || arm_feature(env, ARM_FEATURE_VFP3))
2788 return 1;
2789 tmp = load_cpu_field(vfp.xregs[rn]);
2790 break;
2791 case ARM_VFP_FPSCR:
2792 if (rd == 15) {
2793 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2794 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2795 } else {
2796 tmp = new_tmp();
2797 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2799 break;
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 if (IS_USER(s)
2803 || !arm_feature(env, ARM_FEATURE_VFP3))
2804 return 1;
2805 tmp = load_cpu_field(vfp.xregs[rn]);
2806 break;
2807 default:
2808 return 1;
2810 } else {
2811 gen_mov_F0_vreg(0, rn);
2812 tmp = gen_vfp_mrs();
2814 if (rd == 15) {
2815 /* Set the 4 flag bits in the CPSR. */
2816 gen_set_nzcv(tmp);
2817 dead_tmp(tmp);
2818 } else {
2819 store_reg(s, rd, tmp);
2821 } else {
2822 /* arm->vfp */
2823 tmp = load_reg(s, rd);
2824 if (insn & (1 << 21)) {
2825 rn >>= 1;
2826 /* system register */
2827 switch (rn) {
2828 case ARM_VFP_FPSID:
2829 case ARM_VFP_MVFR0:
2830 case ARM_VFP_MVFR1:
2831 /* Writes are ignored. */
2832 break;
2833 case ARM_VFP_FPSCR:
2834 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2835 dead_tmp(tmp);
2836 gen_lookup_tb(s);
2837 break;
2838 case ARM_VFP_FPEXC:
2839 if (IS_USER(s))
2840 return 1;
2841 /* TODO: VFP subarchitecture support.
2842 * For now, keep the EN bit only */
2843 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2844 store_cpu_field(tmp, vfp.xregs[rn]);
2845 gen_lookup_tb(s);
2846 break;
2847 case ARM_VFP_FPINST:
2848 case ARM_VFP_FPINST2:
2849 store_cpu_field(tmp, vfp.xregs[rn]);
2850 break;
2851 default:
2852 return 1;
2854 } else {
2855 gen_vfp_msr(tmp);
2856 gen_mov_vreg_F0(0, rn);
2860 } else {
2861 /* data processing */
2862 /* The opcode is in bits 23, 21, 20 and 6. */
2863 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2864 if (dp) {
2865 if (op == 15) {
2866 /* rn is opcode */
2867 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2868 } else {
2869 /* rn is register number */
2870 VFP_DREG_N(rn, insn);
2873 if (op == 15 && (rn == 15 || rn > 17)) {
2874 /* Integer or single precision destination. */
2875 rd = VFP_SREG_D(insn);
2876 } else {
2877 VFP_DREG_D(rd, insn);
2880 if (op == 15 && (rn == 16 || rn == 17)) {
2881 /* Integer source. */
2882 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2883 } else {
2884 VFP_DREG_M(rm, insn);
2886 } else {
2887 rn = VFP_SREG_N(insn);
2888 if (op == 15 && rn == 15) {
2889 /* Double precision destination. */
2890 VFP_DREG_D(rd, insn);
2891 } else {
2892 rd = VFP_SREG_D(insn);
2894 rm = VFP_SREG_M(insn);
2897 veclen = env->vfp.vec_len;
2898 if (op == 15 && rn > 3)
2899 veclen = 0;
2901 /* Shut up compiler warnings. */
2902 delta_m = 0;
2903 delta_d = 0;
2904 bank_mask = 0;
2906 if (veclen > 0) {
2907 if (dp)
2908 bank_mask = 0xc;
2909 else
2910 bank_mask = 0x18;
2912 /* Figure out what type of vector operation this is. */
2913 if ((rd & bank_mask) == 0) {
2914 /* scalar */
2915 veclen = 0;
2916 } else {
2917 if (dp)
2918 delta_d = (env->vfp.vec_stride >> 1) + 1;
2919 else
2920 delta_d = env->vfp.vec_stride + 1;
2922 if ((rm & bank_mask) == 0) {
2923 /* mixed scalar/vector */
2924 delta_m = 0;
2925 } else {
2926 /* vector */
2927 delta_m = delta_d;
2932 /* Load the initial operands. */
2933 if (op == 15) {
2934 switch (rn) {
2935 case 16:
2936 case 17:
2937 /* Integer source */
2938 gen_mov_F0_vreg(0, rm);
2939 break;
2940 case 8:
2941 case 9:
2942 /* Compare */
2943 gen_mov_F0_vreg(dp, rd);
2944 gen_mov_F1_vreg(dp, rm);
2945 break;
2946 case 10:
2947 case 11:
2948 /* Compare with zero */
2949 gen_mov_F0_vreg(dp, rd);
2950 gen_vfp_F1_ld0(dp);
2951 break;
2952 case 20:
2953 case 21:
2954 case 22:
2955 case 23:
2956 case 28:
2957 case 29:
2958 case 30:
2959 case 31:
2960 /* Source and destination the same. */
2961 gen_mov_F0_vreg(dp, rd);
2962 break;
2963 default:
2964 /* One source operand. */
2965 gen_mov_F0_vreg(dp, rm);
2966 break;
2968 } else {
2969 /* Two source operands. */
2970 gen_mov_F0_vreg(dp, rn);
2971 gen_mov_F1_vreg(dp, rm);
2974 for (;;) {
2975 /* Perform the calculation. */
2976 switch (op) {
2977 case 0: /* mac: fd + (fn * fm) */
2978 gen_vfp_mul(dp);
2979 gen_mov_F1_vreg(dp, rd);
2980 gen_vfp_add(dp);
2981 break;
2982 case 1: /* nmac: fd - (fn * fm) */
2983 gen_vfp_mul(dp);
2984 gen_vfp_neg(dp);
2985 gen_mov_F1_vreg(dp, rd);
2986 gen_vfp_add(dp);
2987 break;
2988 case 2: /* msc: -fd + (fn * fm) */
2989 gen_vfp_mul(dp);
2990 gen_mov_F1_vreg(dp, rd);
2991 gen_vfp_sub(dp);
2992 break;
2993 case 3: /* nmsc: -fd - (fn * fm) */
2994 gen_vfp_mul(dp);
2995 gen_vfp_neg(dp);
2996 gen_mov_F1_vreg(dp, rd);
2997 gen_vfp_sub(dp);
2998 break;
2999 case 4: /* mul: fn * fm */
3000 gen_vfp_mul(dp);
3001 break;
3002 case 5: /* nmul: -(fn * fm) */
3003 gen_vfp_mul(dp);
3004 gen_vfp_neg(dp);
3005 break;
3006 case 6: /* add: fn + fm */
3007 gen_vfp_add(dp);
3008 break;
3009 case 7: /* sub: fn - fm */
3010 gen_vfp_sub(dp);
3011 break;
3012 case 8: /* div: fn / fm */
3013 gen_vfp_div(dp);
3014 break;
3015 case 14: /* fconst */
3016 if (!arm_feature(env, ARM_FEATURE_VFP3))
3017 return 1;
3019 n = (insn << 12) & 0x80000000;
3020 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3021 if (dp) {
3022 if (i & 0x40)
3023 i |= 0x3f80;
3024 else
3025 i |= 0x4000;
3026 n |= i << 16;
3027 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3028 } else {
3029 if (i & 0x40)
3030 i |= 0x780;
3031 else
3032 i |= 0x800;
3033 n |= i << 19;
3034 tcg_gen_movi_i32(cpu_F0s, n);
3036 break;
3037 case 15: /* extension space */
3038 switch (rn) {
3039 case 0: /* cpy */
3040 /* no-op */
3041 break;
3042 case 1: /* abs */
3043 gen_vfp_abs(dp);
3044 break;
3045 case 2: /* neg */
3046 gen_vfp_neg(dp);
3047 break;
3048 case 3: /* sqrt */
3049 gen_vfp_sqrt(dp);
3050 break;
3051 case 4: /* vcvtb.f32.f16 */
3052 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3053 return 1;
3054 tmp = gen_vfp_mrs();
3055 tcg_gen_ext16u_i32(tmp, tmp);
3056 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3057 dead_tmp(tmp);
3058 break;
3059 case 5: /* vcvtt.f32.f16 */
3060 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3061 return 1;
3062 tmp = gen_vfp_mrs();
3063 tcg_gen_shri_i32(tmp, tmp, 16);
3064 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3065 dead_tmp(tmp);
3066 break;
3067 case 6: /* vcvtb.f16.f32 */
3068 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3069 return 1;
3070 tmp = new_tmp();
3071 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3072 gen_mov_F0_vreg(0, rd);
3073 tmp2 = gen_vfp_mrs();
3074 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3075 tcg_gen_or_i32(tmp, tmp, tmp2);
3076 dead_tmp(tmp2);
3077 gen_vfp_msr(tmp);
3078 break;
3079 case 7: /* vcvtt.f16.f32 */
3080 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3081 return 1;
3082 tmp = new_tmp();
3083 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3084 tcg_gen_shli_i32(tmp, tmp, 16);
3085 gen_mov_F0_vreg(0, rd);
3086 tmp2 = gen_vfp_mrs();
3087 tcg_gen_ext16u_i32(tmp2, tmp2);
3088 tcg_gen_or_i32(tmp, tmp, tmp2);
3089 dead_tmp(tmp2);
3090 gen_vfp_msr(tmp);
3091 break;
3092 case 8: /* cmp */
3093 gen_vfp_cmp(dp);
3094 break;
3095 case 9: /* cmpe */
3096 gen_vfp_cmpe(dp);
3097 break;
3098 case 10: /* cmpz */
3099 gen_vfp_cmp(dp);
3100 break;
3101 case 11: /* cmpez */
3102 gen_vfp_F1_ld0(dp);
3103 gen_vfp_cmpe(dp);
3104 break;
3105 case 15: /* single<->double conversion */
3106 if (dp)
3107 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3108 else
3109 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3110 break;
3111 case 16: /* fuito */
3112 gen_vfp_uito(dp);
3113 break;
3114 case 17: /* fsito */
3115 gen_vfp_sito(dp);
3116 break;
3117 case 20: /* fshto */
3118 if (!arm_feature(env, ARM_FEATURE_VFP3))
3119 return 1;
3120 gen_vfp_shto(dp, 16 - rm);
3121 break;
3122 case 21: /* fslto */
3123 if (!arm_feature(env, ARM_FEATURE_VFP3))
3124 return 1;
3125 gen_vfp_slto(dp, 32 - rm);
3126 break;
3127 case 22: /* fuhto */
3128 if (!arm_feature(env, ARM_FEATURE_VFP3))
3129 return 1;
3130 gen_vfp_uhto(dp, 16 - rm);
3131 break;
3132 case 23: /* fulto */
3133 if (!arm_feature(env, ARM_FEATURE_VFP3))
3134 return 1;
3135 gen_vfp_ulto(dp, 32 - rm);
3136 break;
3137 case 24: /* ftoui */
3138 gen_vfp_toui(dp);
3139 break;
3140 case 25: /* ftouiz */
3141 gen_vfp_touiz(dp);
3142 break;
3143 case 26: /* ftosi */
3144 gen_vfp_tosi(dp);
3145 break;
3146 case 27: /* ftosiz */
3147 gen_vfp_tosiz(dp);
3148 break;
3149 case 28: /* ftosh */
3150 if (!arm_feature(env, ARM_FEATURE_VFP3))
3151 return 1;
3152 gen_vfp_tosh(dp, 16 - rm);
3153 break;
3154 case 29: /* ftosl */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_tosl(dp, 32 - rm);
3158 break;
3159 case 30: /* ftouh */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_touh(dp, 16 - rm);
3163 break;
3164 case 31: /* ftoul */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_toul(dp, 32 - rm);
3168 break;
3169 default: /* undefined */
3170 printf ("rn:%d\n", rn);
3171 return 1;
3173 break;
3174 default: /* undefined */
3175 printf ("op:%d\n", op);
3176 return 1;
3179 /* Write back the result. */
3180 if (op == 15 && (rn >= 8 && rn <= 11))
3181 ; /* Comparison, do nothing. */
3182 else if (op == 15 && rn > 17)
3183 /* Integer result. */
3184 gen_mov_vreg_F0(0, rd);
3185 else if (op == 15 && rn == 15)
3186 /* conversion */
3187 gen_mov_vreg_F0(!dp, rd);
3188 else
3189 gen_mov_vreg_F0(dp, rd);
3191 /* break out of the loop if we have finished */
3192 if (veclen == 0)
3193 break;
3195 if (op == 15 && delta_m == 0) {
3196 /* single source one-many */
3197 while (veclen--) {
3198 rd = ((rd + delta_d) & (bank_mask - 1))
3199 | (rd & bank_mask);
3200 gen_mov_vreg_F0(dp, rd);
3202 break;
3204 /* Setup the next operands. */
3205 veclen--;
3206 rd = ((rd + delta_d) & (bank_mask - 1))
3207 | (rd & bank_mask);
3209 if (op == 15) {
3210 /* One source operand. */
3211 rm = ((rm + delta_m) & (bank_mask - 1))
3212 | (rm & bank_mask);
3213 gen_mov_F0_vreg(dp, rm);
3214 } else {
3215 /* Two source operands. */
3216 rn = ((rn + delta_d) & (bank_mask - 1))
3217 | (rn & bank_mask);
3218 gen_mov_F0_vreg(dp, rn);
3219 if (delta_m) {
3220 rm = ((rm + delta_m) & (bank_mask - 1))
3221 | (rm & bank_mask);
3222 gen_mov_F1_vreg(dp, rm);
3227 break;
3228 case 0xc:
3229 case 0xd:
3230 if (dp && (insn & 0x03e00000) == 0x00400000) {
3231 /* two-register transfer */
3232 rn = (insn >> 16) & 0xf;
3233 rd = (insn >> 12) & 0xf;
3234 if (dp) {
3235 VFP_DREG_M(rm, insn);
3236 } else {
3237 rm = VFP_SREG_M(insn);
3240 if (insn & ARM_CP_RW_BIT) {
3241 /* vfp->arm */
3242 if (dp) {
3243 gen_mov_F0_vreg(0, rm * 2);
3244 tmp = gen_vfp_mrs();
3245 store_reg(s, rd, tmp);
3246 gen_mov_F0_vreg(0, rm * 2 + 1);
3247 tmp = gen_vfp_mrs();
3248 store_reg(s, rn, tmp);
3249 } else {
3250 gen_mov_F0_vreg(0, rm);
3251 tmp = gen_vfp_mrs();
3252 store_reg(s, rn, tmp);
3253 gen_mov_F0_vreg(0, rm + 1);
3254 tmp = gen_vfp_mrs();
3255 store_reg(s, rd, tmp);
3257 } else {
3258 /* arm->vfp */
3259 if (dp) {
3260 tmp = load_reg(s, rd);
3261 gen_vfp_msr(tmp);
3262 gen_mov_vreg_F0(0, rm * 2);
3263 tmp = load_reg(s, rn);
3264 gen_vfp_msr(tmp);
3265 gen_mov_vreg_F0(0, rm * 2 + 1);
3266 } else {
3267 tmp = load_reg(s, rn);
3268 gen_vfp_msr(tmp);
3269 gen_mov_vreg_F0(0, rm);
3270 tmp = load_reg(s, rd);
3271 gen_vfp_msr(tmp);
3272 gen_mov_vreg_F0(0, rm + 1);
3275 } else {
3276 /* Load/store */
3277 rn = (insn >> 16) & 0xf;
3278 if (dp)
3279 VFP_DREG_D(rd, insn);
3280 else
3281 rd = VFP_SREG_D(insn);
3282 if (s->thumb && rn == 15) {
3283 addr = new_tmp();
3284 tcg_gen_movi_i32(addr, s->pc & ~2);
3285 } else {
3286 addr = load_reg(s, rn);
3288 if ((insn & 0x01200000) == 0x01000000) {
3289 /* Single load/store */
3290 offset = (insn & 0xff) << 2;
3291 if ((insn & (1 << 23)) == 0)
3292 offset = -offset;
3293 tcg_gen_addi_i32(addr, addr, offset);
3294 if (insn & (1 << 20)) {
3295 gen_vfp_ld(s, dp, addr);
3296 gen_mov_vreg_F0(dp, rd);
3297 } else {
3298 gen_mov_F0_vreg(dp, rd);
3299 gen_vfp_st(s, dp, addr);
3301 dead_tmp(addr);
3302 } else {
3303 /* load/store multiple */
3304 if (dp)
3305 n = (insn >> 1) & 0x7f;
3306 else
3307 n = insn & 0xff;
3309 if (insn & (1 << 24)) /* pre-decrement */
3310 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3312 if (dp)
3313 offset = 8;
3314 else
3315 offset = 4;
3316 for (i = 0; i < n; i++) {
3317 if (insn & ARM_CP_RW_BIT) {
3318 /* load */
3319 gen_vfp_ld(s, dp, addr);
3320 gen_mov_vreg_F0(dp, rd + i);
3321 } else {
3322 /* store */
3323 gen_mov_F0_vreg(dp, rd + i);
3324 gen_vfp_st(s, dp, addr);
3326 tcg_gen_addi_i32(addr, addr, offset);
3328 if (insn & (1 << 21)) {
3329 /* writeback */
3330 if (insn & (1 << 24))
3331 offset = -offset * n;
3332 else if (dp && (insn & 1))
3333 offset = 4;
3334 else
3335 offset = 0;
3337 if (offset != 0)
3338 tcg_gen_addi_i32(addr, addr, offset);
3339 store_reg(s, rn, addr);
3340 } else {
3341 dead_tmp(addr);
3345 break;
3346 default:
3347 /* Should never happen. */
3348 return 1;
3350 return 0;
3353 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3355 TranslationBlock *tb;
3357 tb = s->tb;
3358 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3359 tcg_gen_goto_tb(n);
3360 gen_set_pc_im(dest);
3361 tcg_gen_exit_tb((long)tb + n);
3362 } else {
3363 gen_set_pc_im(dest);
3364 tcg_gen_exit_tb(0);
3368 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3370 if (unlikely(s->singlestep_enabled)) {
3371 /* An indirect jump so that we still trigger the debug exception. */
3372 if (s->thumb)
3373 dest |= 1;
3374 gen_bx_im(s, dest);
3375 } else {
3376 gen_goto_tb(s, 0, dest);
3377 s->is_jmp = DISAS_TB_JUMP;
3381 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3383 if (x)
3384 tcg_gen_sari_i32(t0, t0, 16);
3385 else
3386 gen_sxth(t0);
3387 if (y)
3388 tcg_gen_sari_i32(t1, t1, 16);
3389 else
3390 gen_sxth(t1);
3391 tcg_gen_mul_i32(t0, t0, t1);
3394 /* Return the mask of PSR bits set by a MSR instruction. */
3395 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3396 uint32_t mask;
3398 mask = 0;
3399 if (flags & (1 << 0))
3400 mask |= 0xff;
3401 if (flags & (1 << 1))
3402 mask |= 0xff00;
3403 if (flags & (1 << 2))
3404 mask |= 0xff0000;
3405 if (flags & (1 << 3))
3406 mask |= 0xff000000;
3408 /* Mask out undefined bits. */
3409 mask &= ~CPSR_RESERVED;
3410 if (!arm_feature(env, ARM_FEATURE_V6))
3411 mask &= ~(CPSR_E | CPSR_GE);
3412 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3413 mask &= ~CPSR_IT;
3414 /* Mask out execution state bits. */
3415 if (!spsr)
3416 mask &= ~CPSR_EXEC;
3417 /* Mask out privileged bits. */
3418 if (IS_USER(s))
3419 mask &= CPSR_USER;
3420 return mask;
3423 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3424 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3426 TCGv tmp;
3427 if (spsr) {
3428 /* ??? This is also undefined in system mode. */
3429 if (IS_USER(s))
3430 return 1;
3432 tmp = load_cpu_field(spsr);
3433 tcg_gen_andi_i32(tmp, tmp, ~mask);
3434 tcg_gen_andi_i32(t0, t0, mask);
3435 tcg_gen_or_i32(tmp, tmp, t0);
3436 store_cpu_field(tmp, spsr);
3437 } else {
3438 gen_set_cpsr(t0, mask);
3440 dead_tmp(t0);
3441 gen_lookup_tb(s);
3442 return 0;
3445 /* Returns nonzero if access to the PSR is not permitted. */
3446 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3448 TCGv tmp;
3449 tmp = new_tmp();
3450 tcg_gen_movi_i32(tmp, val);
3451 return gen_set_psr(s, mask, spsr, tmp);
3454 /* Generate an old-style exception return. Marks pc as dead. */
3455 static void gen_exception_return(DisasContext *s, TCGv pc)
3457 TCGv tmp;
3458 store_reg(s, 15, pc);
3459 tmp = load_cpu_field(spsr);
3460 gen_set_cpsr(tmp, 0xffffffff);
3461 dead_tmp(tmp);
3462 s->is_jmp = DISAS_UPDATE;
3465 /* Generate a v6 exception return. Marks both values as dead. */
3466 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3468 gen_set_cpsr(cpsr, 0xffffffff);
3469 dead_tmp(cpsr);
3470 store_reg(s, 15, pc);
3471 s->is_jmp = DISAS_UPDATE;
3474 static inline void
3475 gen_set_condexec (DisasContext *s)
3477 if (s->condexec_mask) {
3478 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3479 TCGv tmp = new_tmp();
3480 tcg_gen_movi_i32(tmp, val);
3481 store_cpu_field(tmp, condexec_bits);
3485 static void gen_nop_hint(DisasContext *s, int val)
3487 switch (val) {
3488 case 3: /* wfi */
3489 gen_set_pc_im(s->pc);
3490 s->is_jmp = DISAS_WFI;
3491 break;
3492 case 2: /* wfe */
3493 case 4: /* sev */
3494 /* TODO: Implement SEV and WFE. May help SMP performance. */
3495 default: /* nop */
3496 break;
3500 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3502 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3504 switch (size) {
3505 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3506 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3507 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3508 default: return 1;
3510 return 0;
3513 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3515 switch (size) {
3516 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3517 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3518 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3519 default: return;
3523 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3524 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3525 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3526 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3527 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3529 /* FIXME: This is wrong. They set the wrong overflow bit. */
3530 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3531 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3532 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3533 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3535 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3536 switch ((size << 1) | u) { \
3537 case 0: \
3538 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3539 break; \
3540 case 1: \
3541 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3542 break; \
3543 case 2: \
3544 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3545 break; \
3546 case 3: \
3547 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3548 break; \
3549 case 4: \
3550 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3551 break; \
3552 case 5: \
3553 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3554 break; \
3555 default: return 1; \
3556 }} while (0)
3558 #define GEN_NEON_INTEGER_OP(name) do { \
3559 switch ((size << 1) | u) { \
3560 case 0: \
3561 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3562 break; \
3563 case 1: \
3564 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3565 break; \
3566 case 2: \
3567 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3568 break; \
3569 case 3: \
3570 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3571 break; \
3572 case 4: \
3573 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3574 break; \
3575 case 5: \
3576 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3577 break; \
3578 default: return 1; \
3579 }} while (0)
3581 static TCGv neon_load_scratch(int scratch)
3583 TCGv tmp = new_tmp();
3584 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3585 return tmp;
3588 static void neon_store_scratch(int scratch, TCGv var)
3590 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3591 dead_tmp(var);
3594 static inline TCGv neon_get_scalar(int size, int reg)
3596 TCGv tmp;
3597 if (size == 1) {
3598 tmp = neon_load_reg(reg >> 1, reg & 1);
3599 } else {
3600 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3601 if (reg & 1) {
3602 gen_neon_dup_low16(tmp);
3603 } else {
3604 gen_neon_dup_high16(tmp);
3607 return tmp;
3610 static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3612 TCGv rd, rm, tmp;
3614 rd = new_tmp();
3615 rm = new_tmp();
3616 tmp = new_tmp();
3618 tcg_gen_andi_i32(rd, t0, 0xff);
3619 tcg_gen_shri_i32(tmp, t0, 8);
3620 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3621 tcg_gen_or_i32(rd, rd, tmp);
3622 tcg_gen_shli_i32(tmp, t1, 16);
3623 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3624 tcg_gen_or_i32(rd, rd, tmp);
3625 tcg_gen_shli_i32(tmp, t1, 8);
3626 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3627 tcg_gen_or_i32(rd, rd, tmp);
3629 tcg_gen_shri_i32(rm, t0, 8);
3630 tcg_gen_andi_i32(rm, rm, 0xff);
3631 tcg_gen_shri_i32(tmp, t0, 16);
3632 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3633 tcg_gen_or_i32(rm, rm, tmp);
3634 tcg_gen_shli_i32(tmp, t1, 8);
3635 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3636 tcg_gen_or_i32(rm, rm, tmp);
3637 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3638 tcg_gen_or_i32(t1, rm, tmp);
3639 tcg_gen_mov_i32(t0, rd);
3641 dead_tmp(tmp);
3642 dead_tmp(rm);
3643 dead_tmp(rd);
3646 static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3648 TCGv rd, rm, tmp;
3650 rd = new_tmp();
3651 rm = new_tmp();
3652 tmp = new_tmp();
3654 tcg_gen_andi_i32(rd, t0, 0xff);
3655 tcg_gen_shli_i32(tmp, t1, 8);
3656 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3657 tcg_gen_or_i32(rd, rd, tmp);
3658 tcg_gen_shli_i32(tmp, t0, 16);
3659 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3660 tcg_gen_or_i32(rd, rd, tmp);
3661 tcg_gen_shli_i32(tmp, t1, 24);
3662 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3663 tcg_gen_or_i32(rd, rd, tmp);
3665 tcg_gen_andi_i32(rm, t1, 0xff000000);
3666 tcg_gen_shri_i32(tmp, t0, 8);
3667 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3668 tcg_gen_or_i32(rm, rm, tmp);
3669 tcg_gen_shri_i32(tmp, t1, 8);
3670 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3671 tcg_gen_or_i32(rm, rm, tmp);
3672 tcg_gen_shri_i32(tmp, t0, 16);
3673 tcg_gen_andi_i32(tmp, tmp, 0xff);
3674 tcg_gen_or_i32(t1, rm, tmp);
3675 tcg_gen_mov_i32(t0, rd);
3677 dead_tmp(tmp);
3678 dead_tmp(rm);
3679 dead_tmp(rd);
3682 static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3684 TCGv tmp, tmp2;
3686 tmp = new_tmp();
3687 tmp2 = new_tmp();
3689 tcg_gen_andi_i32(tmp, t0, 0xffff);
3690 tcg_gen_shli_i32(tmp2, t1, 16);
3691 tcg_gen_or_i32(tmp, tmp, tmp2);
3692 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3693 tcg_gen_shri_i32(tmp2, t0, 16);
3694 tcg_gen_or_i32(t1, t1, tmp2);
3695 tcg_gen_mov_i32(t0, tmp);
3697 dead_tmp(tmp2);
3698 dead_tmp(tmp);
3701 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3703 int n;
3704 TCGv t0, t1;
3706 for (n = 0; n < q + 1; n += 2) {
3707 t0 = neon_load_reg(reg, n);
3708 t1 = neon_load_reg(reg, n + 1);
3709 switch (size) {
3710 case 0: gen_neon_unzip_u8(t0, t1); break;
3711 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
3712 case 2: /* no-op */; break;
3713 default: abort();
3715 neon_store_scratch(tmp + n, t0);
3716 neon_store_scratch(tmp + n + 1, t1);
3720 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3722 TCGv rd, tmp;
3724 rd = new_tmp();
3725 tmp = new_tmp();
3727 tcg_gen_shli_i32(rd, t0, 8);
3728 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3729 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3730 tcg_gen_or_i32(rd, rd, tmp);
3732 tcg_gen_shri_i32(t1, t1, 8);
3733 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3734 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3735 tcg_gen_or_i32(t1, t1, tmp);
3736 tcg_gen_mov_i32(t0, rd);
3738 dead_tmp(tmp);
3739 dead_tmp(rd);
3742 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3744 TCGv rd, tmp;
3746 rd = new_tmp();
3747 tmp = new_tmp();
3749 tcg_gen_shli_i32(rd, t0, 16);
3750 tcg_gen_andi_i32(tmp, t1, 0xffff);
3751 tcg_gen_or_i32(rd, rd, tmp);
3752 tcg_gen_shri_i32(t1, t1, 16);
3753 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3754 tcg_gen_or_i32(t1, t1, tmp);
3755 tcg_gen_mov_i32(t0, rd);
3757 dead_tmp(tmp);
3758 dead_tmp(rd);
3762 static struct {
3763 int nregs;
3764 int interleave;
3765 int spacing;
3766 } neon_ls_element_type[11] = {
3767 {4, 4, 1},
3768 {4, 4, 2},
3769 {4, 1, 1},
3770 {4, 2, 1},
3771 {3, 3, 1},
3772 {3, 3, 2},
3773 {3, 1, 1},
3774 {1, 1, 1},
3775 {2, 2, 1},
3776 {2, 2, 2},
3777 {2, 1, 1}
3780 /* Translate a NEON load/store element instruction. Return nonzero if the
3781 instruction is invalid. */
3782 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3784 int rd, rn, rm;
3785 int op;
3786 int nregs;
3787 int interleave;
3788 int spacing;
3789 int stride;
3790 int size;
3791 int reg;
3792 int pass;
3793 int load;
3794 int shift;
3795 int n;
3796 TCGv addr;
3797 TCGv tmp;
3798 TCGv tmp2;
3799 TCGv_i64 tmp64;
3801 if (!vfp_enabled(env))
3802 return 1;
3803 VFP_DREG_D(rd, insn);
3804 rn = (insn >> 16) & 0xf;
3805 rm = insn & 0xf;
3806 load = (insn & (1 << 21)) != 0;
3807 addr = new_tmp();
3808 if ((insn & (1 << 23)) == 0) {
3809 /* Load store all elements. */
3810 op = (insn >> 8) & 0xf;
3811 size = (insn >> 6) & 3;
3812 if (op > 10)
3813 return 1;
3814 nregs = neon_ls_element_type[op].nregs;
3815 interleave = neon_ls_element_type[op].interleave;
3816 spacing = neon_ls_element_type[op].spacing;
3817 if (size == 3 && (interleave | spacing) != 1)
3818 return 1;
3819 load_reg_var(s, addr, rn);
3820 stride = (1 << size) * interleave;
3821 for (reg = 0; reg < nregs; reg++) {
3822 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3823 load_reg_var(s, addr, rn);
3824 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3825 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3826 load_reg_var(s, addr, rn);
3827 tcg_gen_addi_i32(addr, addr, 1 << size);
3829 if (size == 3) {
3830 if (load) {
3831 tmp64 = gen_ld64(addr, IS_USER(s));
3832 neon_store_reg64(tmp64, rd);
3833 tcg_temp_free_i64(tmp64);
3834 } else {
3835 tmp64 = tcg_temp_new_i64();
3836 neon_load_reg64(tmp64, rd);
3837 gen_st64(tmp64, addr, IS_USER(s));
3839 tcg_gen_addi_i32(addr, addr, stride);
3840 } else {
3841 for (pass = 0; pass < 2; pass++) {
3842 if (size == 2) {
3843 if (load) {
3844 tmp = gen_ld32(addr, IS_USER(s));
3845 neon_store_reg(rd, pass, tmp);
3846 } else {
3847 tmp = neon_load_reg(rd, pass);
3848 gen_st32(tmp, addr, IS_USER(s));
3850 tcg_gen_addi_i32(addr, addr, stride);
3851 } else if (size == 1) {
3852 if (load) {
3853 tmp = gen_ld16u(addr, IS_USER(s));
3854 tcg_gen_addi_i32(addr, addr, stride);
3855 tmp2 = gen_ld16u(addr, IS_USER(s));
3856 tcg_gen_addi_i32(addr, addr, stride);
3857 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3858 dead_tmp(tmp2);
3859 neon_store_reg(rd, pass, tmp);
3860 } else {
3861 tmp = neon_load_reg(rd, pass);
3862 tmp2 = new_tmp();
3863 tcg_gen_shri_i32(tmp2, tmp, 16);
3864 gen_st16(tmp, addr, IS_USER(s));
3865 tcg_gen_addi_i32(addr, addr, stride);
3866 gen_st16(tmp2, addr, IS_USER(s));
3867 tcg_gen_addi_i32(addr, addr, stride);
3869 } else /* size == 0 */ {
3870 if (load) {
3871 TCGV_UNUSED(tmp2);
3872 for (n = 0; n < 4; n++) {
3873 tmp = gen_ld8u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 if (n == 0) {
3876 tmp2 = tmp;
3877 } else {
3878 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3879 dead_tmp(tmp);
3882 neon_store_reg(rd, pass, tmp2);
3883 } else {
3884 tmp2 = neon_load_reg(rd, pass);
3885 for (n = 0; n < 4; n++) {
3886 tmp = new_tmp();
3887 if (n == 0) {
3888 tcg_gen_mov_i32(tmp, tmp2);
3889 } else {
3890 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3892 gen_st8(tmp, addr, IS_USER(s));
3893 tcg_gen_addi_i32(addr, addr, stride);
3895 dead_tmp(tmp2);
3900 rd += spacing;
3902 stride = nregs * 8;
3903 } else {
3904 size = (insn >> 10) & 3;
3905 if (size == 3) {
3906 /* Load single element to all lanes. */
3907 if (!load)
3908 return 1;
3909 size = (insn >> 6) & 3;
3910 nregs = ((insn >> 8) & 3) + 1;
3911 stride = (insn & (1 << 5)) ? 2 : 1;
3912 load_reg_var(s, addr, rn);
3913 for (reg = 0; reg < nregs; reg++) {
3914 switch (size) {
3915 case 0:
3916 tmp = gen_ld8u(addr, IS_USER(s));
3917 gen_neon_dup_u8(tmp, 0);
3918 break;
3919 case 1:
3920 tmp = gen_ld16u(addr, IS_USER(s));
3921 gen_neon_dup_low16(tmp);
3922 break;
3923 case 2:
3924 tmp = gen_ld32(addr, IS_USER(s));
3925 break;
3926 case 3:
3927 return 1;
3928 default: /* Avoid compiler warnings. */
3929 abort();
3931 tcg_gen_addi_i32(addr, addr, 1 << size);
3932 tmp2 = new_tmp();
3933 tcg_gen_mov_i32(tmp2, tmp);
3934 neon_store_reg(rd, 0, tmp2);
3935 neon_store_reg(rd, 1, tmp);
3936 rd += stride;
3938 stride = (1 << size) * nregs;
3939 } else {
3940 /* Single element. */
3941 pass = (insn >> 7) & 1;
3942 switch (size) {
3943 case 0:
3944 shift = ((insn >> 5) & 3) * 8;
3945 stride = 1;
3946 break;
3947 case 1:
3948 shift = ((insn >> 6) & 1) * 16;
3949 stride = (insn & (1 << 5)) ? 2 : 1;
3950 break;
3951 case 2:
3952 shift = 0;
3953 stride = (insn & (1 << 6)) ? 2 : 1;
3954 break;
3955 default:
3956 abort();
3958 nregs = ((insn >> 8) & 3) + 1;
3959 load_reg_var(s, addr, rn);
3960 for (reg = 0; reg < nregs; reg++) {
3961 if (load) {
3962 switch (size) {
3963 case 0:
3964 tmp = gen_ld8u(addr, IS_USER(s));
3965 break;
3966 case 1:
3967 tmp = gen_ld16u(addr, IS_USER(s));
3968 break;
3969 case 2:
3970 tmp = gen_ld32(addr, IS_USER(s));
3971 break;
3972 default: /* Avoid compiler warnings. */
3973 abort();
3975 if (size != 2) {
3976 tmp2 = neon_load_reg(rd, pass);
3977 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3978 dead_tmp(tmp2);
3980 neon_store_reg(rd, pass, tmp);
3981 } else { /* Store */
3982 tmp = neon_load_reg(rd, pass);
3983 if (shift)
3984 tcg_gen_shri_i32(tmp, tmp, shift);
3985 switch (size) {
3986 case 0:
3987 gen_st8(tmp, addr, IS_USER(s));
3988 break;
3989 case 1:
3990 gen_st16(tmp, addr, IS_USER(s));
3991 break;
3992 case 2:
3993 gen_st32(tmp, addr, IS_USER(s));
3994 break;
3997 rd += stride;
3998 tcg_gen_addi_i32(addr, addr, 1 << size);
4000 stride = nregs * (1 << size);
4003 dead_tmp(addr);
4004 if (rm != 15) {
4005 TCGv base;
4007 base = load_reg(s, rn);
4008 if (rm == 13) {
4009 tcg_gen_addi_i32(base, base, stride);
4010 } else {
4011 TCGv index;
4012 index = load_reg(s, rm);
4013 tcg_gen_add_i32(base, base, index);
4014 dead_tmp(index);
4016 store_reg(s, rn, base);
4018 return 0;
4021 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4022 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4024 tcg_gen_and_i32(t, t, c);
4025 tcg_gen_andc_i32(f, f, c);
4026 tcg_gen_or_i32(dest, t, f);
4029 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4031 switch (size) {
4032 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4033 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4034 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4035 default: abort();
4039 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4041 switch (size) {
4042 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4043 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4044 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4045 default: abort();
4049 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4051 switch (size) {
4052 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4053 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4054 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4055 default: abort();
4059 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4060 int q, int u)
4062 if (q) {
4063 if (u) {
4064 switch (size) {
4065 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4066 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4067 default: abort();
4069 } else {
4070 switch (size) {
4071 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4072 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4073 default: abort();
4076 } else {
4077 if (u) {
4078 switch (size) {
4079 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4080 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4081 default: abort();
4083 } else {
4084 switch (size) {
4085 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4086 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4087 default: abort();
4093 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4095 if (u) {
4096 switch (size) {
4097 case 0: gen_helper_neon_widen_u8(dest, src); break;
4098 case 1: gen_helper_neon_widen_u16(dest, src); break;
4099 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4100 default: abort();
4102 } else {
4103 switch (size) {
4104 case 0: gen_helper_neon_widen_s8(dest, src); break;
4105 case 1: gen_helper_neon_widen_s16(dest, src); break;
4106 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4107 default: abort();
4110 dead_tmp(src);
4113 static inline void gen_neon_addl(int size)
4115 switch (size) {
4116 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4117 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4118 case 2: tcg_gen_add_i64(CPU_V001); break;
4119 default: abort();
4123 static inline void gen_neon_subl(int size)
4125 switch (size) {
4126 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4127 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4128 case 2: tcg_gen_sub_i64(CPU_V001); break;
4129 default: abort();
4133 static inline void gen_neon_negl(TCGv_i64 var, int size)
4135 switch (size) {
4136 case 0: gen_helper_neon_negl_u16(var, var); break;
4137 case 1: gen_helper_neon_negl_u32(var, var); break;
4138 case 2: gen_helper_neon_negl_u64(var, var); break;
4139 default: abort();
4143 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4145 switch (size) {
4146 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4147 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4148 default: abort();
4152 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4154 TCGv_i64 tmp;
4156 switch ((size << 1) | u) {
4157 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4158 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4159 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4160 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4161 case 4:
4162 tmp = gen_muls_i64_i32(a, b);
4163 tcg_gen_mov_i64(dest, tmp);
4164 break;
4165 case 5:
4166 tmp = gen_mulu_i64_i32(a, b);
4167 tcg_gen_mov_i64(dest, tmp);
4168 break;
4169 default: abort();
4173 /* Translate a NEON data processing instruction. Return nonzero if the
4174 instruction is invalid.
4175 We process data in a mixture of 32-bit and 64-bit chunks.
4176 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4178 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4180 int op;
4181 int q;
4182 int rd, rn, rm;
4183 int size;
4184 int shift;
4185 int pass;
4186 int count;
4187 int pairwise;
4188 int u;
4189 int n;
4190 uint32_t imm, mask;
4191 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4192 TCGv_i64 tmp64;
4194 if (!vfp_enabled(env))
4195 return 1;
4196 q = (insn & (1 << 6)) != 0;
4197 u = (insn >> 24) & 1;
4198 VFP_DREG_D(rd, insn);
4199 VFP_DREG_N(rn, insn);
4200 VFP_DREG_M(rm, insn);
4201 size = (insn >> 20) & 3;
4202 if ((insn & (1 << 23)) == 0) {
4203 /* Three register same length. */
4204 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4205 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4206 || op == 10 || op == 11 || op == 16)) {
4207 /* 64-bit element instructions. */
4208 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4209 neon_load_reg64(cpu_V0, rn + pass);
4210 neon_load_reg64(cpu_V1, rm + pass);
4211 switch (op) {
4212 case 1: /* VQADD */
4213 if (u) {
4214 gen_helper_neon_add_saturate_u64(CPU_V001);
4215 } else {
4216 gen_helper_neon_add_saturate_s64(CPU_V001);
4218 break;
4219 case 5: /* VQSUB */
4220 if (u) {
4221 gen_helper_neon_sub_saturate_u64(CPU_V001);
4222 } else {
4223 gen_helper_neon_sub_saturate_s64(CPU_V001);
4225 break;
4226 case 8: /* VSHL */
4227 if (u) {
4228 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4229 } else {
4230 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4232 break;
4233 case 9: /* VQSHL */
4234 if (u) {
4235 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4236 cpu_V0, cpu_V0);
4237 } else {
4238 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4239 cpu_V1, cpu_V0);
4241 break;
4242 case 10: /* VRSHL */
4243 if (u) {
4244 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4245 } else {
4246 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4248 break;
4249 case 11: /* VQRSHL */
4250 if (u) {
4251 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4252 cpu_V1, cpu_V0);
4253 } else {
4254 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4255 cpu_V1, cpu_V0);
4257 break;
4258 case 16:
4259 if (u) {
4260 tcg_gen_sub_i64(CPU_V001);
4261 } else {
4262 tcg_gen_add_i64(CPU_V001);
4264 break;
4265 default:
4266 abort();
4268 neon_store_reg64(cpu_V0, rd + pass);
4270 return 0;
4272 switch (op) {
4273 case 8: /* VSHL */
4274 case 9: /* VQSHL */
4275 case 10: /* VRSHL */
4276 case 11: /* VQRSHL */
4278 int rtmp;
4279 /* Shift instruction operands are reversed. */
4280 rtmp = rn;
4281 rn = rm;
4282 rm = rtmp;
4283 pairwise = 0;
4285 break;
4286 case 20: /* VPMAX */
4287 case 21: /* VPMIN */
4288 case 23: /* VPADD */
4289 pairwise = 1;
4290 break;
4291 case 26: /* VPADD (float) */
4292 pairwise = (u && size < 2);
4293 break;
4294 case 30: /* VPMIN/VPMAX (float) */
4295 pairwise = u;
4296 break;
4297 default:
4298 pairwise = 0;
4299 break;
4302 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4304 if (pairwise) {
4305 /* Pairwise. */
4306 if (q)
4307 n = (pass & 1) * 2;
4308 else
4309 n = 0;
4310 if (pass < q + 1) {
4311 tmp = neon_load_reg(rn, n);
4312 tmp2 = neon_load_reg(rn, n + 1);
4313 } else {
4314 tmp = neon_load_reg(rm, n);
4315 tmp2 = neon_load_reg(rm, n + 1);
4317 } else {
4318 /* Elementwise. */
4319 tmp = neon_load_reg(rn, pass);
4320 tmp2 = neon_load_reg(rm, pass);
4322 switch (op) {
4323 case 0: /* VHADD */
4324 GEN_NEON_INTEGER_OP(hadd);
4325 break;
4326 case 1: /* VQADD */
4327 GEN_NEON_INTEGER_OP_ENV(qadd);
4328 break;
4329 case 2: /* VRHADD */
4330 GEN_NEON_INTEGER_OP(rhadd);
4331 break;
4332 case 3: /* Logic ops. */
4333 switch ((u << 2) | size) {
4334 case 0: /* VAND */
4335 tcg_gen_and_i32(tmp, tmp, tmp2);
4336 break;
4337 case 1: /* BIC */
4338 tcg_gen_andc_i32(tmp, tmp, tmp2);
4339 break;
4340 case 2: /* VORR */
4341 tcg_gen_or_i32(tmp, tmp, tmp2);
4342 break;
4343 case 3: /* VORN */
4344 tcg_gen_orc_i32(tmp, tmp, tmp2);
4345 break;
4346 case 4: /* VEOR */
4347 tcg_gen_xor_i32(tmp, tmp, tmp2);
4348 break;
4349 case 5: /* VBSL */
4350 tmp3 = neon_load_reg(rd, pass);
4351 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4352 dead_tmp(tmp3);
4353 break;
4354 case 6: /* VBIT */
4355 tmp3 = neon_load_reg(rd, pass);
4356 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4357 dead_tmp(tmp3);
4358 break;
4359 case 7: /* VBIF */
4360 tmp3 = neon_load_reg(rd, pass);
4361 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4362 dead_tmp(tmp3);
4363 break;
4365 break;
4366 case 4: /* VHSUB */
4367 GEN_NEON_INTEGER_OP(hsub);
4368 break;
4369 case 5: /* VQSUB */
4370 GEN_NEON_INTEGER_OP_ENV(qsub);
4371 break;
4372 case 6: /* VCGT */
4373 GEN_NEON_INTEGER_OP(cgt);
4374 break;
4375 case 7: /* VCGE */
4376 GEN_NEON_INTEGER_OP(cge);
4377 break;
4378 case 8: /* VSHL */
4379 GEN_NEON_INTEGER_OP(shl);
4380 break;
4381 case 9: /* VQSHL */
4382 GEN_NEON_INTEGER_OP_ENV(qshl);
4383 break;
4384 case 10: /* VRSHL */
4385 GEN_NEON_INTEGER_OP(rshl);
4386 break;
4387 case 11: /* VQRSHL */
4388 GEN_NEON_INTEGER_OP_ENV(qrshl);
4389 break;
4390 case 12: /* VMAX */
4391 GEN_NEON_INTEGER_OP(max);
4392 break;
4393 case 13: /* VMIN */
4394 GEN_NEON_INTEGER_OP(min);
4395 break;
4396 case 14: /* VABD */
4397 GEN_NEON_INTEGER_OP(abd);
4398 break;
4399 case 15: /* VABA */
4400 GEN_NEON_INTEGER_OP(abd);
4401 dead_tmp(tmp2);
4402 tmp2 = neon_load_reg(rd, pass);
4403 gen_neon_add(size, tmp, tmp2);
4404 break;
4405 case 16:
4406 if (!u) { /* VADD */
4407 if (gen_neon_add(size, tmp, tmp2))
4408 return 1;
4409 } else { /* VSUB */
4410 switch (size) {
4411 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4412 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4413 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4414 default: return 1;
4417 break;
4418 case 17:
4419 if (!u) { /* VTST */
4420 switch (size) {
4421 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4422 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4423 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4424 default: return 1;
4426 } else { /* VCEQ */
4427 switch (size) {
4428 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4429 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4430 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4431 default: return 1;
4434 break;
4435 case 18: /* Multiply. */
4436 switch (size) {
4437 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4438 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4439 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4440 default: return 1;
4442 dead_tmp(tmp2);
4443 tmp2 = neon_load_reg(rd, pass);
4444 if (u) { /* VMLS */
4445 gen_neon_rsb(size, tmp, tmp2);
4446 } else { /* VMLA */
4447 gen_neon_add(size, tmp, tmp2);
4449 break;
4450 case 19: /* VMUL */
4451 if (u) { /* polynomial */
4452 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4453 } else { /* Integer */
4454 switch (size) {
4455 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4456 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4457 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4458 default: return 1;
4461 break;
4462 case 20: /* VPMAX */
4463 GEN_NEON_INTEGER_OP(pmax);
4464 break;
4465 case 21: /* VPMIN */
4466 GEN_NEON_INTEGER_OP(pmin);
4467 break;
4468 case 22: /* Hultiply high. */
4469 if (!u) { /* VQDMULH */
4470 switch (size) {
4471 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4472 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4473 default: return 1;
4475 } else { /* VQRDHMUL */
4476 switch (size) {
4477 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4478 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4479 default: return 1;
4482 break;
4483 case 23: /* VPADD */
4484 if (u)
4485 return 1;
4486 switch (size) {
4487 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4488 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4489 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4490 default: return 1;
4492 break;
4493 case 26: /* Floating point arithnetic. */
4494 switch ((u << 2) | size) {
4495 case 0: /* VADD */
4496 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4497 break;
4498 case 2: /* VSUB */
4499 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4500 break;
4501 case 4: /* VPADD */
4502 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4503 break;
4504 case 6: /* VABD */
4505 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4506 break;
4507 default:
4508 return 1;
4510 break;
4511 case 27: /* Float multiply. */
4512 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4513 if (!u) {
4514 dead_tmp(tmp2);
4515 tmp2 = neon_load_reg(rd, pass);
4516 if (size == 0) {
4517 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4518 } else {
4519 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4522 break;
4523 case 28: /* Float compare. */
4524 if (!u) {
4525 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4526 } else {
4527 if (size == 0)
4528 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4529 else
4530 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4532 break;
4533 case 29: /* Float compare absolute. */
4534 if (!u)
4535 return 1;
4536 if (size == 0)
4537 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4538 else
4539 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4540 break;
4541 case 30: /* Float min/max. */
4542 if (size == 0)
4543 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4544 else
4545 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4546 break;
4547 case 31:
4548 if (size == 0)
4549 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4550 else
4551 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4552 break;
4553 default:
4554 abort();
4556 dead_tmp(tmp2);
4558 /* Save the result. For elementwise operations we can put it
4559 straight into the destination register. For pairwise operations
4560 we have to be careful to avoid clobbering the source operands. */
4561 if (pairwise && rd == rm) {
4562 neon_store_scratch(pass, tmp);
4563 } else {
4564 neon_store_reg(rd, pass, tmp);
4567 } /* for pass */
4568 if (pairwise && rd == rm) {
4569 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4570 tmp = neon_load_scratch(pass);
4571 neon_store_reg(rd, pass, tmp);
4574 /* End of 3 register same size operations. */
4575 } else if (insn & (1 << 4)) {
4576 if ((insn & 0x00380080) != 0) {
4577 /* Two registers and shift. */
4578 op = (insn >> 8) & 0xf;
4579 if (insn & (1 << 7)) {
4580 /* 64-bit shift. */
4581 size = 3;
4582 } else {
4583 size = 2;
4584 while ((insn & (1 << (size + 19))) == 0)
4585 size--;
4587 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4588 /* To avoid excessive dumplication of ops we implement shift
4589 by immediate using the variable shift operations. */
4590 if (op < 8) {
4591 /* Shift by immediate:
4592 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4593 /* Right shifts are encoded as N - shift, where N is the
4594 element size in bits. */
4595 if (op <= 4)
4596 shift = shift - (1 << (size + 3));
4597 if (size == 3) {
4598 count = q + 1;
4599 } else {
4600 count = q ? 4: 2;
4602 switch (size) {
4603 case 0:
4604 imm = (uint8_t) shift;
4605 imm |= imm << 8;
4606 imm |= imm << 16;
4607 break;
4608 case 1:
4609 imm = (uint16_t) shift;
4610 imm |= imm << 16;
4611 break;
4612 case 2:
4613 case 3:
4614 imm = shift;
4615 break;
4616 default:
4617 abort();
4620 for (pass = 0; pass < count; pass++) {
4621 if (size == 3) {
4622 neon_load_reg64(cpu_V0, rm + pass);
4623 tcg_gen_movi_i64(cpu_V1, imm);
4624 switch (op) {
4625 case 0: /* VSHR */
4626 case 1: /* VSRA */
4627 if (u)
4628 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4629 else
4630 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4631 break;
4632 case 2: /* VRSHR */
4633 case 3: /* VRSRA */
4634 if (u)
4635 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4636 else
4637 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4638 break;
4639 case 4: /* VSRI */
4640 if (!u)
4641 return 1;
4642 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4643 break;
4644 case 5: /* VSHL, VSLI */
4645 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4646 break;
4647 case 6: /* VQSHL */
4648 if (u)
4649 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4650 else
4651 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4652 break;
4653 case 7: /* VQSHLU */
4654 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4655 break;
4657 if (op == 1 || op == 3) {
4658 /* Accumulate. */
4659 neon_load_reg64(cpu_V0, rd + pass);
4660 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4661 } else if (op == 4 || (op == 5 && u)) {
4662 /* Insert */
4663 cpu_abort(env, "VS[LR]I.64 not implemented");
4665 neon_store_reg64(cpu_V0, rd + pass);
4666 } else { /* size < 3 */
4667 /* Operands in T0 and T1. */
4668 tmp = neon_load_reg(rm, pass);
4669 tmp2 = new_tmp();
4670 tcg_gen_movi_i32(tmp2, imm);
4671 switch (op) {
4672 case 0: /* VSHR */
4673 case 1: /* VSRA */
4674 GEN_NEON_INTEGER_OP(shl);
4675 break;
4676 case 2: /* VRSHR */
4677 case 3: /* VRSRA */
4678 GEN_NEON_INTEGER_OP(rshl);
4679 break;
4680 case 4: /* VSRI */
4681 if (!u)
4682 return 1;
4683 GEN_NEON_INTEGER_OP(shl);
4684 break;
4685 case 5: /* VSHL, VSLI */
4686 switch (size) {
4687 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4688 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4689 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4690 default: return 1;
4692 break;
4693 case 6: /* VQSHL */
4694 GEN_NEON_INTEGER_OP_ENV(qshl);
4695 break;
4696 case 7: /* VQSHLU */
4697 switch (size) {
4698 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4699 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4700 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
4701 default: return 1;
4703 break;
4705 dead_tmp(tmp2);
4707 if (op == 1 || op == 3) {
4708 /* Accumulate. */
4709 tmp2 = neon_load_reg(rd, pass);
4710 gen_neon_add(size, tmp2, tmp);
4711 dead_tmp(tmp2);
4712 } else if (op == 4 || (op == 5 && u)) {
4713 /* Insert */
4714 switch (size) {
4715 case 0:
4716 if (op == 4)
4717 mask = 0xff >> -shift;
4718 else
4719 mask = (uint8_t)(0xff << shift);
4720 mask |= mask << 8;
4721 mask |= mask << 16;
4722 break;
4723 case 1:
4724 if (op == 4)
4725 mask = 0xffff >> -shift;
4726 else
4727 mask = (uint16_t)(0xffff << shift);
4728 mask |= mask << 16;
4729 break;
4730 case 2:
4731 if (shift < -31 || shift > 31) {
4732 mask = 0;
4733 } else {
4734 if (op == 4)
4735 mask = 0xffffffffu >> -shift;
4736 else
4737 mask = 0xffffffffu << shift;
4739 break;
4740 default:
4741 abort();
4743 tmp2 = neon_load_reg(rd, pass);
4744 tcg_gen_andi_i32(tmp, tmp, mask);
4745 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4746 tcg_gen_or_i32(tmp, tmp, tmp2);
4747 dead_tmp(tmp2);
4749 neon_store_reg(rd, pass, tmp);
4751 } /* for pass */
4752 } else if (op < 10) {
4753 /* Shift by immediate and narrow:
4754 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4755 shift = shift - (1 << (size + 3));
4756 size++;
4757 switch (size) {
4758 case 1:
4759 imm = (uint16_t)shift;
4760 imm |= imm << 16;
4761 tmp2 = tcg_const_i32(imm);
4762 TCGV_UNUSED_I64(tmp64);
4763 break;
4764 case 2:
4765 imm = (uint32_t)shift;
4766 tmp2 = tcg_const_i32(imm);
4767 TCGV_UNUSED_I64(tmp64);
4768 break;
4769 case 3:
4770 tmp64 = tcg_const_i64(shift);
4771 TCGV_UNUSED(tmp2);
4772 break;
4773 default:
4774 abort();
4777 for (pass = 0; pass < 2; pass++) {
4778 if (size == 3) {
4779 neon_load_reg64(cpu_V0, rm + pass);
4780 if (q) {
4781 if (u)
4782 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4783 else
4784 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4785 } else {
4786 if (u)
4787 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4788 else
4789 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4791 } else {
4792 tmp = neon_load_reg(rm + pass, 0);
4793 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4794 tmp3 = neon_load_reg(rm + pass, 1);
4795 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4796 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4797 dead_tmp(tmp);
4798 dead_tmp(tmp3);
4800 tmp = new_tmp();
4801 if (op == 8 && !u) {
4802 gen_neon_narrow(size - 1, tmp, cpu_V0);
4803 } else {
4804 if (op == 8)
4805 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4806 else
4807 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4809 neon_store_reg(rd, pass, tmp);
4810 } /* for pass */
4811 if (size == 3) {
4812 tcg_temp_free_i64(tmp64);
4813 } else {
4814 dead_tmp(tmp2);
4816 } else if (op == 10) {
4817 /* VSHLL */
4818 if (q || size == 3)
4819 return 1;
4820 tmp = neon_load_reg(rm, 0);
4821 tmp2 = neon_load_reg(rm, 1);
4822 for (pass = 0; pass < 2; pass++) {
4823 if (pass == 1)
4824 tmp = tmp2;
4826 gen_neon_widen(cpu_V0, tmp, size, u);
4828 if (shift != 0) {
4829 /* The shift is less than the width of the source
4830 type, so we can just shift the whole register. */
4831 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4832 if (size < 2 || !u) {
4833 uint64_t imm64;
4834 if (size == 0) {
4835 imm = (0xffu >> (8 - shift));
4836 imm |= imm << 16;
4837 } else {
4838 imm = 0xffff >> (16 - shift);
4840 imm64 = imm | (((uint64_t)imm) << 32);
4841 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4844 neon_store_reg64(cpu_V0, rd + pass);
4846 } else if (op == 15 || op == 16) {
4847 /* VCVT fixed-point. */
4848 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4849 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4850 if (op & 1) {
4851 if (u)
4852 gen_vfp_ulto(0, shift);
4853 else
4854 gen_vfp_slto(0, shift);
4855 } else {
4856 if (u)
4857 gen_vfp_toul(0, shift);
4858 else
4859 gen_vfp_tosl(0, shift);
4861 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4863 } else {
4864 return 1;
4866 } else { /* (insn & 0x00380080) == 0 */
4867 int invert;
4869 op = (insn >> 8) & 0xf;
4870 /* One register and immediate. */
4871 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4872 invert = (insn & (1 << 5)) != 0;
4873 switch (op) {
4874 case 0: case 1:
4875 /* no-op */
4876 break;
4877 case 2: case 3:
4878 imm <<= 8;
4879 break;
4880 case 4: case 5:
4881 imm <<= 16;
4882 break;
4883 case 6: case 7:
4884 imm <<= 24;
4885 break;
4886 case 8: case 9:
4887 imm |= imm << 16;
4888 break;
4889 case 10: case 11:
4890 imm = (imm << 8) | (imm << 24);
4891 break;
4892 case 12:
4893 imm = (imm << 8) | 0xff;
4894 break;
4895 case 13:
4896 imm = (imm << 16) | 0xffff;
4897 break;
4898 case 14:
4899 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4900 if (invert)
4901 imm = ~imm;
4902 break;
4903 case 15:
4904 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4905 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4906 break;
4908 if (invert)
4909 imm = ~imm;
4911 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4912 if (op & 1 && op < 12) {
4913 tmp = neon_load_reg(rd, pass);
4914 if (invert) {
4915 /* The immediate value has already been inverted, so
4916 BIC becomes AND. */
4917 tcg_gen_andi_i32(tmp, tmp, imm);
4918 } else {
4919 tcg_gen_ori_i32(tmp, tmp, imm);
4921 } else {
4922 /* VMOV, VMVN. */
4923 tmp = new_tmp();
4924 if (op == 14 && invert) {
4925 uint32_t val;
4926 val = 0;
4927 for (n = 0; n < 4; n++) {
4928 if (imm & (1 << (n + (pass & 1) * 4)))
4929 val |= 0xff << (n * 8);
4931 tcg_gen_movi_i32(tmp, val);
4932 } else {
4933 tcg_gen_movi_i32(tmp, imm);
4936 neon_store_reg(rd, pass, tmp);
4939 } else { /* (insn & 0x00800010 == 0x00800000) */
4940 if (size != 3) {
4941 op = (insn >> 8) & 0xf;
4942 if ((insn & (1 << 6)) == 0) {
4943 /* Three registers of different lengths. */
4944 int src1_wide;
4945 int src2_wide;
4946 int prewiden;
4947 /* prewiden, src1_wide, src2_wide */
4948 static const int neon_3reg_wide[16][3] = {
4949 {1, 0, 0}, /* VADDL */
4950 {1, 1, 0}, /* VADDW */
4951 {1, 0, 0}, /* VSUBL */
4952 {1, 1, 0}, /* VSUBW */
4953 {0, 1, 1}, /* VADDHN */
4954 {0, 0, 0}, /* VABAL */
4955 {0, 1, 1}, /* VSUBHN */
4956 {0, 0, 0}, /* VABDL */
4957 {0, 0, 0}, /* VMLAL */
4958 {0, 0, 0}, /* VQDMLAL */
4959 {0, 0, 0}, /* VMLSL */
4960 {0, 0, 0}, /* VQDMLSL */
4961 {0, 0, 0}, /* Integer VMULL */
4962 {0, 0, 0}, /* VQDMULL */
4963 {0, 0, 0} /* Polynomial VMULL */
4966 prewiden = neon_3reg_wide[op][0];
4967 src1_wide = neon_3reg_wide[op][1];
4968 src2_wide = neon_3reg_wide[op][2];
4970 if (size == 0 && (op == 9 || op == 11 || op == 13))
4971 return 1;
4973 /* Avoid overlapping operands. Wide source operands are
4974 always aligned so will never overlap with wide
4975 destinations in problematic ways. */
4976 if (rd == rm && !src2_wide) {
4977 tmp = neon_load_reg(rm, 1);
4978 neon_store_scratch(2, tmp);
4979 } else if (rd == rn && !src1_wide) {
4980 tmp = neon_load_reg(rn, 1);
4981 neon_store_scratch(2, tmp);
4983 TCGV_UNUSED(tmp3);
4984 for (pass = 0; pass < 2; pass++) {
4985 if (src1_wide) {
4986 neon_load_reg64(cpu_V0, rn + pass);
4987 TCGV_UNUSED(tmp);
4988 } else {
4989 if (pass == 1 && rd == rn) {
4990 tmp = neon_load_scratch(2);
4991 } else {
4992 tmp = neon_load_reg(rn, pass);
4994 if (prewiden) {
4995 gen_neon_widen(cpu_V0, tmp, size, u);
4998 if (src2_wide) {
4999 neon_load_reg64(cpu_V1, rm + pass);
5000 TCGV_UNUSED(tmp2);
5001 } else {
5002 if (pass == 1 && rd == rm) {
5003 tmp2 = neon_load_scratch(2);
5004 } else {
5005 tmp2 = neon_load_reg(rm, pass);
5007 if (prewiden) {
5008 gen_neon_widen(cpu_V1, tmp2, size, u);
5011 switch (op) {
5012 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5013 gen_neon_addl(size);
5014 break;
5015 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5016 gen_neon_subl(size);
5017 break;
5018 case 5: case 7: /* VABAL, VABDL */
5019 switch ((size << 1) | u) {
5020 case 0:
5021 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5022 break;
5023 case 1:
5024 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5025 break;
5026 case 2:
5027 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5028 break;
5029 case 3:
5030 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5031 break;
5032 case 4:
5033 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5034 break;
5035 case 5:
5036 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5037 break;
5038 default: abort();
5040 dead_tmp(tmp2);
5041 dead_tmp(tmp);
5042 break;
5043 case 8: case 9: case 10: case 11: case 12: case 13:
5044 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5045 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5046 dead_tmp(tmp2);
5047 dead_tmp(tmp);
5048 break;
5049 case 14: /* Polynomial VMULL */
5050 cpu_abort(env, "Polynomial VMULL not implemented");
5052 default: /* 15 is RESERVED. */
5053 return 1;
5055 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5056 /* Accumulate. */
5057 if (op == 10 || op == 11) {
5058 gen_neon_negl(cpu_V0, size);
5061 if (op != 13) {
5062 neon_load_reg64(cpu_V1, rd + pass);
5065 switch (op) {
5066 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5067 gen_neon_addl(size);
5068 break;
5069 case 9: case 11: /* VQDMLAL, VQDMLSL */
5070 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5071 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5072 break;
5073 /* Fall through. */
5074 case 13: /* VQDMULL */
5075 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5076 break;
5077 default:
5078 abort();
5080 neon_store_reg64(cpu_V0, rd + pass);
5081 } else if (op == 4 || op == 6) {
5082 /* Narrowing operation. */
5083 tmp = new_tmp();
5084 if (!u) {
5085 switch (size) {
5086 case 0:
5087 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5088 break;
5089 case 1:
5090 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5091 break;
5092 case 2:
5093 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5094 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5095 break;
5096 default: abort();
5098 } else {
5099 switch (size) {
5100 case 0:
5101 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5102 break;
5103 case 1:
5104 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5105 break;
5106 case 2:
5107 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5108 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5109 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5110 break;
5111 default: abort();
5114 if (pass == 0) {
5115 tmp3 = tmp;
5116 } else {
5117 neon_store_reg(rd, 0, tmp3);
5118 neon_store_reg(rd, 1, tmp);
5120 } else {
5121 /* Write back the result. */
5122 neon_store_reg64(cpu_V0, rd + pass);
5125 } else {
5126 /* Two registers and a scalar. */
5127 switch (op) {
5128 case 0: /* Integer VMLA scalar */
5129 case 1: /* Float VMLA scalar */
5130 case 4: /* Integer VMLS scalar */
5131 case 5: /* Floating point VMLS scalar */
5132 case 8: /* Integer VMUL scalar */
5133 case 9: /* Floating point VMUL scalar */
5134 case 12: /* VQDMULH scalar */
5135 case 13: /* VQRDMULH scalar */
5136 tmp = neon_get_scalar(size, rm);
5137 neon_store_scratch(0, tmp);
5138 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5139 tmp = neon_load_scratch(0);
5140 tmp2 = neon_load_reg(rn, pass);
5141 if (op == 12) {
5142 if (size == 1) {
5143 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5144 } else {
5145 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5147 } else if (op == 13) {
5148 if (size == 1) {
5149 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5150 } else {
5151 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5153 } else if (op & 1) {
5154 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5155 } else {
5156 switch (size) {
5157 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5158 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5159 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5160 default: return 1;
5163 dead_tmp(tmp2);
5164 if (op < 8) {
5165 /* Accumulate. */
5166 tmp2 = neon_load_reg(rd, pass);
5167 switch (op) {
5168 case 0:
5169 gen_neon_add(size, tmp, tmp2);
5170 break;
5171 case 1:
5172 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5173 break;
5174 case 4:
5175 gen_neon_rsb(size, tmp, tmp2);
5176 break;
5177 case 5:
5178 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5179 break;
5180 default:
5181 abort();
5183 dead_tmp(tmp2);
5185 neon_store_reg(rd, pass, tmp);
5187 break;
5188 case 2: /* VMLAL sclar */
5189 case 3: /* VQDMLAL scalar */
5190 case 6: /* VMLSL scalar */
5191 case 7: /* VQDMLSL scalar */
5192 case 10: /* VMULL scalar */
5193 case 11: /* VQDMULL scalar */
5194 if (size == 0 && (op == 3 || op == 7 || op == 11))
5195 return 1;
5197 tmp2 = neon_get_scalar(size, rm);
5198 tmp3 = neon_load_reg(rn, 1);
5200 for (pass = 0; pass < 2; pass++) {
5201 if (pass == 0) {
5202 tmp = neon_load_reg(rn, 0);
5203 } else {
5204 tmp = tmp3;
5206 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5207 dead_tmp(tmp);
5208 if (op == 6 || op == 7) {
5209 gen_neon_negl(cpu_V0, size);
5211 if (op != 11) {
5212 neon_load_reg64(cpu_V1, rd + pass);
5214 switch (op) {
5215 case 2: case 6:
5216 gen_neon_addl(size);
5217 break;
5218 case 3: case 7:
5219 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5220 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5221 break;
5222 case 10:
5223 /* no-op */
5224 break;
5225 case 11:
5226 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5227 break;
5228 default:
5229 abort();
5231 neon_store_reg64(cpu_V0, rd + pass);
5234 dead_tmp(tmp2);
5236 break;
5237 default: /* 14 and 15 are RESERVED */
5238 return 1;
5241 } else { /* size == 3 */
5242 if (!u) {
5243 /* Extract. */
5244 imm = (insn >> 8) & 0xf;
5246 if (imm > 7 && !q)
5247 return 1;
5249 if (imm == 0) {
5250 neon_load_reg64(cpu_V0, rn);
5251 if (q) {
5252 neon_load_reg64(cpu_V1, rn + 1);
5254 } else if (imm == 8) {
5255 neon_load_reg64(cpu_V0, rn + 1);
5256 if (q) {
5257 neon_load_reg64(cpu_V1, rm);
5259 } else if (q) {
5260 tmp64 = tcg_temp_new_i64();
5261 if (imm < 8) {
5262 neon_load_reg64(cpu_V0, rn);
5263 neon_load_reg64(tmp64, rn + 1);
5264 } else {
5265 neon_load_reg64(cpu_V0, rn + 1);
5266 neon_load_reg64(tmp64, rm);
5268 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5269 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5270 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5271 if (imm < 8) {
5272 neon_load_reg64(cpu_V1, rm);
5273 } else {
5274 neon_load_reg64(cpu_V1, rm + 1);
5275 imm -= 8;
5277 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5278 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5279 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5280 tcg_temp_free_i64(tmp64);
5281 } else {
5282 /* BUGFIX */
5283 neon_load_reg64(cpu_V0, rn);
5284 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5285 neon_load_reg64(cpu_V1, rm);
5286 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5287 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5289 neon_store_reg64(cpu_V0, rd);
5290 if (q) {
5291 neon_store_reg64(cpu_V1, rd + 1);
5293 } else if ((insn & (1 << 11)) == 0) {
5294 /* Two register misc. */
5295 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5296 size = (insn >> 18) & 3;
5297 switch (op) {
5298 case 0: /* VREV64 */
5299 if (size == 3)
5300 return 1;
5301 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5302 tmp = neon_load_reg(rm, pass * 2);
5303 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5304 switch (size) {
5305 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5306 case 1: gen_swap_half(tmp); break;
5307 case 2: /* no-op */ break;
5308 default: abort();
5310 neon_store_reg(rd, pass * 2 + 1, tmp);
5311 if (size == 2) {
5312 neon_store_reg(rd, pass * 2, tmp2);
5313 } else {
5314 switch (size) {
5315 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5316 case 1: gen_swap_half(tmp2); break;
5317 default: abort();
5319 neon_store_reg(rd, pass * 2, tmp2);
5322 break;
5323 case 4: case 5: /* VPADDL */
5324 case 12: case 13: /* VPADAL */
5325 if (size == 3)
5326 return 1;
5327 for (pass = 0; pass < q + 1; pass++) {
5328 tmp = neon_load_reg(rm, pass * 2);
5329 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5330 tmp = neon_load_reg(rm, pass * 2 + 1);
5331 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5332 switch (size) {
5333 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5334 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5335 case 2: tcg_gen_add_i64(CPU_V001); break;
5336 default: abort();
5338 if (op >= 12) {
5339 /* Accumulate. */
5340 neon_load_reg64(cpu_V1, rd + pass);
5341 gen_neon_addl(size);
5343 neon_store_reg64(cpu_V0, rd + pass);
5345 break;
5346 case 33: /* VTRN */
5347 if (size == 2) {
5348 for (n = 0; n < (q ? 4 : 2); n += 2) {
5349 tmp = neon_load_reg(rm, n);
5350 tmp2 = neon_load_reg(rd, n + 1);
5351 neon_store_reg(rm, n, tmp2);
5352 neon_store_reg(rd, n + 1, tmp);
5354 } else {
5355 goto elementwise;
5357 break;
5358 case 34: /* VUZP */
5359 /* Reg Before After
5360 Rd A3 A2 A1 A0 B2 B0 A2 A0
5361 Rm B3 B2 B1 B0 B3 B1 A3 A1
5363 if (size == 3)
5364 return 1;
5365 gen_neon_unzip(rd, q, 0, size);
5366 gen_neon_unzip(rm, q, 4, size);
5367 if (q) {
5368 static int unzip_order_q[8] =
5369 {0, 2, 4, 6, 1, 3, 5, 7};
5370 for (n = 0; n < 8; n++) {
5371 int reg = (n < 4) ? rd : rm;
5372 tmp = neon_load_scratch(unzip_order_q[n]);
5373 neon_store_reg(reg, n % 4, tmp);
5375 } else {
5376 static int unzip_order[4] =
5377 {0, 4, 1, 5};
5378 for (n = 0; n < 4; n++) {
5379 int reg = (n < 2) ? rd : rm;
5380 tmp = neon_load_scratch(unzip_order[n]);
5381 neon_store_reg(reg, n % 2, tmp);
5384 break;
5385 case 35: /* VZIP */
5386 /* Reg Before After
5387 Rd A3 A2 A1 A0 B1 A1 B0 A0
5388 Rm B3 B2 B1 B0 B3 A3 B2 A2
5390 if (size == 3)
5391 return 1;
5392 count = (q ? 4 : 2);
5393 for (n = 0; n < count; n++) {
5394 tmp = neon_load_reg(rd, n);
5395 tmp2 = neon_load_reg(rd, n);
5396 switch (size) {
5397 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5398 case 1: gen_neon_zip_u16(tmp, tmp2); break;
5399 case 2: /* no-op */; break;
5400 default: abort();
5402 neon_store_scratch(n * 2, tmp);
5403 neon_store_scratch(n * 2 + 1, tmp2);
5405 for (n = 0; n < count * 2; n++) {
5406 int reg = (n < count) ? rd : rm;
5407 tmp = neon_load_scratch(n);
5408 neon_store_reg(reg, n % count, tmp);
5410 break;
5411 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5412 if (size == 3)
5413 return 1;
5414 TCGV_UNUSED(tmp2);
5415 for (pass = 0; pass < 2; pass++) {
5416 neon_load_reg64(cpu_V0, rm + pass);
5417 tmp = new_tmp();
5418 if (op == 36 && q == 0) {
5419 gen_neon_narrow(size, tmp, cpu_V0);
5420 } else if (q) {
5421 gen_neon_narrow_satu(size, tmp, cpu_V0);
5422 } else {
5423 gen_neon_narrow_sats(size, tmp, cpu_V0);
5425 if (pass == 0) {
5426 tmp2 = tmp;
5427 } else {
5428 neon_store_reg(rd, 0, tmp2);
5429 neon_store_reg(rd, 1, tmp);
5432 break;
5433 case 38: /* VSHLL */
5434 if (q || size == 3)
5435 return 1;
5436 tmp = neon_load_reg(rm, 0);
5437 tmp2 = neon_load_reg(rm, 1);
5438 for (pass = 0; pass < 2; pass++) {
5439 if (pass == 1)
5440 tmp = tmp2;
5441 gen_neon_widen(cpu_V0, tmp, size, 1);
5442 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5443 neon_store_reg64(cpu_V0, rd + pass);
5445 break;
5446 case 44: /* VCVT.F16.F32 */
5447 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5448 return 1;
5449 tmp = new_tmp();
5450 tmp2 = new_tmp();
5451 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5452 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5453 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5454 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5455 tcg_gen_shli_i32(tmp2, tmp2, 16);
5456 tcg_gen_or_i32(tmp2, tmp2, tmp);
5457 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5458 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5459 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5460 neon_store_reg(rd, 0, tmp2);
5461 tmp2 = new_tmp();
5462 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5463 tcg_gen_shli_i32(tmp2, tmp2, 16);
5464 tcg_gen_or_i32(tmp2, tmp2, tmp);
5465 neon_store_reg(rd, 1, tmp2);
5466 dead_tmp(tmp);
5467 break;
5468 case 46: /* VCVT.F32.F16 */
5469 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5470 return 1;
5471 tmp3 = new_tmp();
5472 tmp = neon_load_reg(rm, 0);
5473 tmp2 = neon_load_reg(rm, 1);
5474 tcg_gen_ext16u_i32(tmp3, tmp);
5475 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5476 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5477 tcg_gen_shri_i32(tmp3, tmp, 16);
5478 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5479 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5480 dead_tmp(tmp);
5481 tcg_gen_ext16u_i32(tmp3, tmp2);
5482 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5483 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5484 tcg_gen_shri_i32(tmp3, tmp2, 16);
5485 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5486 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5487 dead_tmp(tmp2);
5488 dead_tmp(tmp3);
5489 break;
5490 default:
5491 elementwise:
5492 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5493 if (op == 30 || op == 31 || op >= 58) {
5494 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5495 neon_reg_offset(rm, pass));
5496 TCGV_UNUSED(tmp);
5497 } else {
5498 tmp = neon_load_reg(rm, pass);
5500 switch (op) {
5501 case 1: /* VREV32 */
5502 switch (size) {
5503 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5504 case 1: gen_swap_half(tmp); break;
5505 default: return 1;
5507 break;
5508 case 2: /* VREV16 */
5509 if (size != 0)
5510 return 1;
5511 gen_rev16(tmp);
5512 break;
5513 case 8: /* CLS */
5514 switch (size) {
5515 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5516 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5517 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5518 default: return 1;
5520 break;
5521 case 9: /* CLZ */
5522 switch (size) {
5523 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5524 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5525 case 2: gen_helper_clz(tmp, tmp); break;
5526 default: return 1;
5528 break;
5529 case 10: /* CNT */
5530 if (size != 0)
5531 return 1;
5532 gen_helper_neon_cnt_u8(tmp, tmp);
5533 break;
5534 case 11: /* VNOT */
5535 if (size != 0)
5536 return 1;
5537 tcg_gen_not_i32(tmp, tmp);
5538 break;
5539 case 14: /* VQABS */
5540 switch (size) {
5541 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5542 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5543 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5544 default: return 1;
5546 break;
5547 case 15: /* VQNEG */
5548 switch (size) {
5549 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5550 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5551 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5552 default: return 1;
5554 break;
5555 case 16: case 19: /* VCGT #0, VCLE #0 */
5556 tmp2 = tcg_const_i32(0);
5557 switch(size) {
5558 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5559 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5560 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5561 default: return 1;
5563 tcg_temp_free(tmp2);
5564 if (op == 19)
5565 tcg_gen_not_i32(tmp, tmp);
5566 break;
5567 case 17: case 20: /* VCGE #0, VCLT #0 */
5568 tmp2 = tcg_const_i32(0);
5569 switch(size) {
5570 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5571 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5572 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5573 default: return 1;
5575 tcg_temp_free(tmp2);
5576 if (op == 20)
5577 tcg_gen_not_i32(tmp, tmp);
5578 break;
5579 case 18: /* VCEQ #0 */
5580 tmp2 = tcg_const_i32(0);
5581 switch(size) {
5582 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5583 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5584 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5585 default: return 1;
5587 tcg_temp_free(tmp2);
5588 break;
5589 case 22: /* VABS */
5590 switch(size) {
5591 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5592 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5593 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5594 default: return 1;
5596 break;
5597 case 23: /* VNEG */
5598 if (size == 3)
5599 return 1;
5600 tmp2 = tcg_const_i32(0);
5601 gen_neon_rsb(size, tmp, tmp2);
5602 tcg_temp_free(tmp2);
5603 break;
5604 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5605 tmp2 = tcg_const_i32(0);
5606 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5607 tcg_temp_free(tmp2);
5608 if (op == 27)
5609 tcg_gen_not_i32(tmp, tmp);
5610 break;
5611 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5612 tmp2 = tcg_const_i32(0);
5613 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5614 tcg_temp_free(tmp2);
5615 if (op == 28)
5616 tcg_gen_not_i32(tmp, tmp);
5617 break;
5618 case 26: /* Float VCEQ #0 */
5619 tmp2 = tcg_const_i32(0);
5620 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5621 tcg_temp_free(tmp2);
5622 break;
5623 case 30: /* Float VABS */
5624 gen_vfp_abs(0);
5625 break;
5626 case 31: /* Float VNEG */
5627 gen_vfp_neg(0);
5628 break;
5629 case 32: /* VSWP */
5630 tmp2 = neon_load_reg(rd, pass);
5631 neon_store_reg(rm, pass, tmp2);
5632 break;
5633 case 33: /* VTRN */
5634 tmp2 = neon_load_reg(rd, pass);
5635 switch (size) {
5636 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5637 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5638 case 2: abort();
5639 default: return 1;
5641 neon_store_reg(rm, pass, tmp2);
5642 break;
5643 case 56: /* Integer VRECPE */
5644 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5645 break;
5646 case 57: /* Integer VRSQRTE */
5647 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5648 break;
5649 case 58: /* Float VRECPE */
5650 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5651 break;
5652 case 59: /* Float VRSQRTE */
5653 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5654 break;
5655 case 60: /* VCVT.F32.S32 */
5656 gen_vfp_tosiz(0);
5657 break;
5658 case 61: /* VCVT.F32.U32 */
5659 gen_vfp_touiz(0);
5660 break;
5661 case 62: /* VCVT.S32.F32 */
5662 gen_vfp_sito(0);
5663 break;
5664 case 63: /* VCVT.U32.F32 */
5665 gen_vfp_uito(0);
5666 break;
5667 default:
5668 /* Reserved: 21, 29, 39-56 */
5669 return 1;
5671 if (op == 30 || op == 31 || op >= 58) {
5672 tcg_gen_st_f32(cpu_F0s, cpu_env,
5673 neon_reg_offset(rd, pass));
5674 } else {
5675 neon_store_reg(rd, pass, tmp);
5678 break;
5680 } else if ((insn & (1 << 10)) == 0) {
5681 /* VTBL, VTBX. */
5682 n = ((insn >> 5) & 0x18) + 8;
5683 if (insn & (1 << 6)) {
5684 tmp = neon_load_reg(rd, 0);
5685 } else {
5686 tmp = new_tmp();
5687 tcg_gen_movi_i32(tmp, 0);
5689 tmp2 = neon_load_reg(rm, 0);
5690 tmp4 = tcg_const_i32(rn);
5691 tmp5 = tcg_const_i32(n);
5692 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5693 dead_tmp(tmp);
5694 if (insn & (1 << 6)) {
5695 tmp = neon_load_reg(rd, 1);
5696 } else {
5697 tmp = new_tmp();
5698 tcg_gen_movi_i32(tmp, 0);
5700 tmp3 = neon_load_reg(rm, 1);
5701 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5702 tcg_temp_free_i32(tmp5);
5703 tcg_temp_free_i32(tmp4);
5704 neon_store_reg(rd, 0, tmp2);
5705 neon_store_reg(rd, 1, tmp3);
5706 dead_tmp(tmp);
5707 } else if ((insn & 0x380) == 0) {
5708 /* VDUP */
5709 if (insn & (1 << 19)) {
5710 tmp = neon_load_reg(rm, 1);
5711 } else {
5712 tmp = neon_load_reg(rm, 0);
5714 if (insn & (1 << 16)) {
5715 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5716 } else if (insn & (1 << 17)) {
5717 if ((insn >> 18) & 1)
5718 gen_neon_dup_high16(tmp);
5719 else
5720 gen_neon_dup_low16(tmp);
5722 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5723 tmp2 = new_tmp();
5724 tcg_gen_mov_i32(tmp2, tmp);
5725 neon_store_reg(rd, pass, tmp2);
5727 dead_tmp(tmp);
5728 } else {
5729 return 1;
5733 return 0;
5736 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5738 int crn = (insn >> 16) & 0xf;
5739 int crm = insn & 0xf;
5740 int op1 = (insn >> 21) & 7;
5741 int op2 = (insn >> 5) & 7;
5742 int rt = (insn >> 12) & 0xf;
5743 TCGv tmp;
5745 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5746 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5747 /* TEECR */
5748 if (IS_USER(s))
5749 return 1;
5750 tmp = load_cpu_field(teecr);
5751 store_reg(s, rt, tmp);
5752 return 0;
5754 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5755 /* TEEHBR */
5756 if (IS_USER(s) && (env->teecr & 1))
5757 return 1;
5758 tmp = load_cpu_field(teehbr);
5759 store_reg(s, rt, tmp);
5760 return 0;
5763 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5764 op1, crn, crm, op2);
5765 return 1;
5768 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5770 int crn = (insn >> 16) & 0xf;
5771 int crm = insn & 0xf;
5772 int op1 = (insn >> 21) & 7;
5773 int op2 = (insn >> 5) & 7;
5774 int rt = (insn >> 12) & 0xf;
5775 TCGv tmp;
5777 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5778 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5779 /* TEECR */
5780 if (IS_USER(s))
5781 return 1;
5782 tmp = load_reg(s, rt);
5783 gen_helper_set_teecr(cpu_env, tmp);
5784 dead_tmp(tmp);
5785 return 0;
5787 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5788 /* TEEHBR */
5789 if (IS_USER(s) && (env->teecr & 1))
5790 return 1;
5791 tmp = load_reg(s, rt);
5792 store_cpu_field(tmp, teehbr);
5793 return 0;
5796 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5797 op1, crn, crm, op2);
5798 return 1;
5801 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5803 int cpnum;
5805 cpnum = (insn >> 8) & 0xf;
5806 if (arm_feature(env, ARM_FEATURE_XSCALE)
5807 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5808 return 1;
5810 switch (cpnum) {
5811 case 0:
5812 case 1:
5813 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5814 return disas_iwmmxt_insn(env, s, insn);
5815 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5816 return disas_dsp_insn(env, s, insn);
5818 return 1;
5819 case 10:
5820 case 11:
5821 return disas_vfp_insn (env, s, insn);
5822 case 14:
5823 /* Coprocessors 7-15 are architecturally reserved by ARM.
5824 Unfortunately Intel decided to ignore this. */
5825 if (arm_feature(env, ARM_FEATURE_XSCALE))
5826 goto board;
5827 if (insn & (1 << 20))
5828 return disas_cp14_read(env, s, insn);
5829 else
5830 return disas_cp14_write(env, s, insn);
5831 case 15:
5832 return disas_cp15_insn (env, s, insn);
5833 default:
5834 board:
5835 /* Unknown coprocessor. See if the board has hooked it. */
5836 return disas_cp_insn (env, s, insn);
5841 /* Store a 64-bit value to a register pair. Clobbers val. */
5842 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5844 TCGv tmp;
5845 tmp = new_tmp();
5846 tcg_gen_trunc_i64_i32(tmp, val);
5847 store_reg(s, rlow, tmp);
5848 tmp = new_tmp();
5849 tcg_gen_shri_i64(val, val, 32);
5850 tcg_gen_trunc_i64_i32(tmp, val);
5851 store_reg(s, rhigh, tmp);
5854 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5855 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5857 TCGv_i64 tmp;
5858 TCGv tmp2;
5860 /* Load value and extend to 64 bits. */
5861 tmp = tcg_temp_new_i64();
5862 tmp2 = load_reg(s, rlow);
5863 tcg_gen_extu_i32_i64(tmp, tmp2);
5864 dead_tmp(tmp2);
5865 tcg_gen_add_i64(val, val, tmp);
5866 tcg_temp_free_i64(tmp);
5869 /* load and add a 64-bit value from a register pair. */
5870 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5872 TCGv_i64 tmp;
5873 TCGv tmpl;
5874 TCGv tmph;
5876 /* Load 64-bit value rd:rn. */
5877 tmpl = load_reg(s, rlow);
5878 tmph = load_reg(s, rhigh);
5879 tmp = tcg_temp_new_i64();
5880 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5881 dead_tmp(tmpl);
5882 dead_tmp(tmph);
5883 tcg_gen_add_i64(val, val, tmp);
5884 tcg_temp_free_i64(tmp);
5887 /* Set N and Z flags from a 64-bit value. */
5888 static void gen_logicq_cc(TCGv_i64 val)
5890 TCGv tmp = new_tmp();
5891 gen_helper_logicq_cc(tmp, val);
5892 gen_logic_CC(tmp);
5893 dead_tmp(tmp);
5896 /* Load/Store exclusive instructions are implemented by remembering
5897 the value/address loaded, and seeing if these are the same
5898 when the store is performed. This should be is sufficient to implement
5899 the architecturally mandated semantics, and avoids having to monitor
5900 regular stores.
5902 In system emulation mode only one CPU will be running at once, so
5903 this sequence is effectively atomic. In user emulation mode we
5904 throw an exception and handle the atomic operation elsewhere. */
5905 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5906 TCGv addr, int size)
5908 TCGv tmp;
5910 switch (size) {
5911 case 0:
5912 tmp = gen_ld8u(addr, IS_USER(s));
5913 break;
5914 case 1:
5915 tmp = gen_ld16u(addr, IS_USER(s));
5916 break;
5917 case 2:
5918 case 3:
5919 tmp = gen_ld32(addr, IS_USER(s));
5920 break;
5921 default:
5922 abort();
5924 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5925 store_reg(s, rt, tmp);
5926 if (size == 3) {
5927 tcg_gen_addi_i32(addr, addr, 4);
5928 tmp = gen_ld32(addr, IS_USER(s));
5929 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5930 store_reg(s, rt2, tmp);
5932 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5935 static void gen_clrex(DisasContext *s)
5937 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5940 #ifdef CONFIG_USER_ONLY
5941 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5942 TCGv addr, int size)
5944 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5945 tcg_gen_movi_i32(cpu_exclusive_info,
5946 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5947 gen_set_condexec(s);
5948 gen_set_pc_im(s->pc - 4);
5949 gen_exception(EXCP_STREX);
5950 s->is_jmp = DISAS_JUMP;
5952 #else
5953 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5954 TCGv addr, int size)
5956 TCGv tmp;
5957 int done_label;
5958 int fail_label;
5960 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5961 [addr] = {Rt};
5962 {Rd} = 0;
5963 } else {
5964 {Rd} = 1;
5965 } */
5966 fail_label = gen_new_label();
5967 done_label = gen_new_label();
5968 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5969 switch (size) {
5970 case 0:
5971 tmp = gen_ld8u(addr, IS_USER(s));
5972 break;
5973 case 1:
5974 tmp = gen_ld16u(addr, IS_USER(s));
5975 break;
5976 case 2:
5977 case 3:
5978 tmp = gen_ld32(addr, IS_USER(s));
5979 break;
5980 default:
5981 abort();
5983 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5984 dead_tmp(tmp);
5985 if (size == 3) {
5986 TCGv tmp2 = new_tmp();
5987 tcg_gen_addi_i32(tmp2, addr, 4);
5988 tmp = gen_ld32(addr, IS_USER(s));
5989 dead_tmp(tmp2);
5990 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
5991 dead_tmp(tmp);
5993 tmp = load_reg(s, rt);
5994 switch (size) {
5995 case 0:
5996 gen_st8(tmp, addr, IS_USER(s));
5997 break;
5998 case 1:
5999 gen_st16(tmp, addr, IS_USER(s));
6000 break;
6001 case 2:
6002 case 3:
6003 gen_st32(tmp, addr, IS_USER(s));
6004 break;
6005 default:
6006 abort();
6008 if (size == 3) {
6009 tcg_gen_addi_i32(addr, addr, 4);
6010 tmp = load_reg(s, rt2);
6011 gen_st32(tmp, addr, IS_USER(s));
6013 tcg_gen_movi_i32(cpu_R[rd], 0);
6014 tcg_gen_br(done_label);
6015 gen_set_label(fail_label);
6016 tcg_gen_movi_i32(cpu_R[rd], 1);
6017 gen_set_label(done_label);
6018 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6020 #endif
6022 static void disas_arm_insn(CPUState * env, DisasContext *s)
6024 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6025 TCGv tmp;
6026 TCGv tmp2;
6027 TCGv tmp3;
6028 TCGv addr;
6029 TCGv_i64 tmp64;
6031 insn = ldl_code(s->pc);
6032 s->pc += 4;
6034 /* M variants do not implement ARM mode. */
6035 if (IS_M(env))
6036 goto illegal_op;
6037 cond = insn >> 28;
6038 if (cond == 0xf){
6039 /* Unconditional instructions. */
6040 if (((insn >> 25) & 7) == 1) {
6041 /* NEON Data processing. */
6042 if (!arm_feature(env, ARM_FEATURE_NEON))
6043 goto illegal_op;
6045 if (disas_neon_data_insn(env, s, insn))
6046 goto illegal_op;
6047 return;
6049 if ((insn & 0x0f100000) == 0x04000000) {
6050 /* NEON load/store. */
6051 if (!arm_feature(env, ARM_FEATURE_NEON))
6052 goto illegal_op;
6054 if (disas_neon_ls_insn(env, s, insn))
6055 goto illegal_op;
6056 return;
6058 if ((insn & 0x0d70f000) == 0x0550f000)
6059 return; /* PLD */
6060 else if ((insn & 0x0ffffdff) == 0x01010000) {
6061 ARCH(6);
6062 /* setend */
6063 if (insn & (1 << 9)) {
6064 /* BE8 mode not implemented. */
6065 goto illegal_op;
6067 return;
6068 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6069 switch ((insn >> 4) & 0xf) {
6070 case 1: /* clrex */
6071 ARCH(6K);
6072 gen_clrex(s);
6073 return;
6074 case 4: /* dsb */
6075 case 5: /* dmb */
6076 case 6: /* isb */
6077 ARCH(7);
6078 /* We don't emulate caches so these are a no-op. */
6079 return;
6080 default:
6081 goto illegal_op;
6083 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6084 /* srs */
6085 int32_t offset;
6086 if (IS_USER(s))
6087 goto illegal_op;
6088 ARCH(6);
6089 op1 = (insn & 0x1f);
6090 if (op1 == (env->uncached_cpsr & CPSR_M)) {
6091 addr = load_reg(s, 13);
6092 } else {
6093 addr = new_tmp();
6094 tmp = tcg_const_i32(op1);
6095 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6096 tcg_temp_free_i32(tmp);
6098 i = (insn >> 23) & 3;
6099 switch (i) {
6100 case 0: offset = -4; break; /* DA */
6101 case 1: offset = 0; break; /* IA */
6102 case 2: offset = -8; break; /* DB */
6103 case 3: offset = 4; break; /* IB */
6104 default: abort();
6106 if (offset)
6107 tcg_gen_addi_i32(addr, addr, offset);
6108 tmp = load_reg(s, 14);
6109 gen_st32(tmp, addr, 0);
6110 tmp = load_cpu_field(spsr);
6111 tcg_gen_addi_i32(addr, addr, 4);
6112 gen_st32(tmp, addr, 0);
6113 if (insn & (1 << 21)) {
6114 /* Base writeback. */
6115 switch (i) {
6116 case 0: offset = -8; break;
6117 case 1: offset = 4; break;
6118 case 2: offset = -4; break;
6119 case 3: offset = 0; break;
6120 default: abort();
6122 if (offset)
6123 tcg_gen_addi_i32(addr, addr, offset);
6124 if (op1 == (env->uncached_cpsr & CPSR_M)) {
6125 store_reg(s, 13, addr);
6126 } else {
6127 tmp = tcg_const_i32(op1);
6128 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6129 tcg_temp_free_i32(tmp);
6130 dead_tmp(addr);
6132 } else {
6133 dead_tmp(addr);
6135 return;
6136 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6137 /* rfe */
6138 int32_t offset;
6139 if (IS_USER(s))
6140 goto illegal_op;
6141 ARCH(6);
6142 rn = (insn >> 16) & 0xf;
6143 addr = load_reg(s, rn);
6144 i = (insn >> 23) & 3;
6145 switch (i) {
6146 case 0: offset = -4; break; /* DA */
6147 case 1: offset = 0; break; /* IA */
6148 case 2: offset = -8; break; /* DB */
6149 case 3: offset = 4; break; /* IB */
6150 default: abort();
6152 if (offset)
6153 tcg_gen_addi_i32(addr, addr, offset);
6154 /* Load PC into tmp and CPSR into tmp2. */
6155 tmp = gen_ld32(addr, 0);
6156 tcg_gen_addi_i32(addr, addr, 4);
6157 tmp2 = gen_ld32(addr, 0);
6158 if (insn & (1 << 21)) {
6159 /* Base writeback. */
6160 switch (i) {
6161 case 0: offset = -8; break;
6162 case 1: offset = 4; break;
6163 case 2: offset = -4; break;
6164 case 3: offset = 0; break;
6165 default: abort();
6167 if (offset)
6168 tcg_gen_addi_i32(addr, addr, offset);
6169 store_reg(s, rn, addr);
6170 } else {
6171 dead_tmp(addr);
6173 gen_rfe(s, tmp, tmp2);
6174 return;
6175 } else if ((insn & 0x0e000000) == 0x0a000000) {
6176 /* branch link and change to thumb (blx <offset>) */
6177 int32_t offset;
6179 val = (uint32_t)s->pc;
6180 tmp = new_tmp();
6181 tcg_gen_movi_i32(tmp, val);
6182 store_reg(s, 14, tmp);
6183 /* Sign-extend the 24-bit offset */
6184 offset = (((int32_t)insn) << 8) >> 8;
6185 /* offset * 4 + bit24 * 2 + (thumb bit) */
6186 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6187 /* pipeline offset */
6188 val += 4;
6189 gen_bx_im(s, val);
6190 return;
6191 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6192 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6193 /* iWMMXt register transfer. */
6194 if (env->cp15.c15_cpar & (1 << 1))
6195 if (!disas_iwmmxt_insn(env, s, insn))
6196 return;
6198 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6199 /* Coprocessor double register transfer. */
6200 } else if ((insn & 0x0f000010) == 0x0e000010) {
6201 /* Additional coprocessor register transfer. */
6202 } else if ((insn & 0x0ff10020) == 0x01000000) {
6203 uint32_t mask;
6204 uint32_t val;
6205 /* cps (privileged) */
6206 if (IS_USER(s))
6207 return;
6208 mask = val = 0;
6209 if (insn & (1 << 19)) {
6210 if (insn & (1 << 8))
6211 mask |= CPSR_A;
6212 if (insn & (1 << 7))
6213 mask |= CPSR_I;
6214 if (insn & (1 << 6))
6215 mask |= CPSR_F;
6216 if (insn & (1 << 18))
6217 val |= mask;
6219 if (insn & (1 << 17)) {
6220 mask |= CPSR_M;
6221 val |= (insn & 0x1f);
6223 if (mask) {
6224 gen_set_psr_im(s, mask, 0, val);
6226 return;
6228 goto illegal_op;
6230 if (cond != 0xe) {
6231 /* if not always execute, we generate a conditional jump to
6232 next instruction */
6233 s->condlabel = gen_new_label();
6234 gen_test_cc(cond ^ 1, s->condlabel);
6235 s->condjmp = 1;
6237 if ((insn & 0x0f900000) == 0x03000000) {
6238 if ((insn & (1 << 21)) == 0) {
6239 ARCH(6T2);
6240 rd = (insn >> 12) & 0xf;
6241 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6242 if ((insn & (1 << 22)) == 0) {
6243 /* MOVW */
6244 tmp = new_tmp();
6245 tcg_gen_movi_i32(tmp, val);
6246 } else {
6247 /* MOVT */
6248 tmp = load_reg(s, rd);
6249 tcg_gen_ext16u_i32(tmp, tmp);
6250 tcg_gen_ori_i32(tmp, tmp, val << 16);
6252 store_reg(s, rd, tmp);
6253 } else {
6254 if (((insn >> 12) & 0xf) != 0xf)
6255 goto illegal_op;
6256 if (((insn >> 16) & 0xf) == 0) {
6257 gen_nop_hint(s, insn & 0xff);
6258 } else {
6259 /* CPSR = immediate */
6260 val = insn & 0xff;
6261 shift = ((insn >> 8) & 0xf) * 2;
6262 if (shift)
6263 val = (val >> shift) | (val << (32 - shift));
6264 i = ((insn & (1 << 22)) != 0);
6265 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6266 goto illegal_op;
6269 } else if ((insn & 0x0f900000) == 0x01000000
6270 && (insn & 0x00000090) != 0x00000090) {
6271 /* miscellaneous instructions */
6272 op1 = (insn >> 21) & 3;
6273 sh = (insn >> 4) & 0xf;
6274 rm = insn & 0xf;
6275 switch (sh) {
6276 case 0x0: /* move program status register */
6277 if (op1 & 1) {
6278 /* PSR = reg */
6279 tmp = load_reg(s, rm);
6280 i = ((op1 & 2) != 0);
6281 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6282 goto illegal_op;
6283 } else {
6284 /* reg = PSR */
6285 rd = (insn >> 12) & 0xf;
6286 if (op1 & 2) {
6287 if (IS_USER(s))
6288 goto illegal_op;
6289 tmp = load_cpu_field(spsr);
6290 } else {
6291 tmp = new_tmp();
6292 gen_helper_cpsr_read(tmp);
6294 store_reg(s, rd, tmp);
6296 break;
6297 case 0x1:
6298 if (op1 == 1) {
6299 /* branch/exchange thumb (bx). */
6300 tmp = load_reg(s, rm);
6301 gen_bx(s, tmp);
6302 } else if (op1 == 3) {
6303 /* clz */
6304 rd = (insn >> 12) & 0xf;
6305 tmp = load_reg(s, rm);
6306 gen_helper_clz(tmp, tmp);
6307 store_reg(s, rd, tmp);
6308 } else {
6309 goto illegal_op;
6311 break;
6312 case 0x2:
6313 if (op1 == 1) {
6314 ARCH(5J); /* bxj */
6315 /* Trivial implementation equivalent to bx. */
6316 tmp = load_reg(s, rm);
6317 gen_bx(s, tmp);
6318 } else {
6319 goto illegal_op;
6321 break;
6322 case 0x3:
6323 if (op1 != 1)
6324 goto illegal_op;
6326 /* branch link/exchange thumb (blx) */
6327 tmp = load_reg(s, rm);
6328 tmp2 = new_tmp();
6329 tcg_gen_movi_i32(tmp2, s->pc);
6330 store_reg(s, 14, tmp2);
6331 gen_bx(s, tmp);
6332 break;
6333 case 0x5: /* saturating add/subtract */
6334 rd = (insn >> 12) & 0xf;
6335 rn = (insn >> 16) & 0xf;
6336 tmp = load_reg(s, rm);
6337 tmp2 = load_reg(s, rn);
6338 if (op1 & 2)
6339 gen_helper_double_saturate(tmp2, tmp2);
6340 if (op1 & 1)
6341 gen_helper_sub_saturate(tmp, tmp, tmp2);
6342 else
6343 gen_helper_add_saturate(tmp, tmp, tmp2);
6344 dead_tmp(tmp2);
6345 store_reg(s, rd, tmp);
6346 break;
6347 case 7: /* bkpt */
6348 gen_set_condexec(s);
6349 gen_set_pc_im(s->pc - 4);
6350 gen_exception(EXCP_BKPT);
6351 s->is_jmp = DISAS_JUMP;
6352 break;
6353 case 0x8: /* signed multiply */
6354 case 0xa:
6355 case 0xc:
6356 case 0xe:
6357 rs = (insn >> 8) & 0xf;
6358 rn = (insn >> 12) & 0xf;
6359 rd = (insn >> 16) & 0xf;
6360 if (op1 == 1) {
6361 /* (32 * 16) >> 16 */
6362 tmp = load_reg(s, rm);
6363 tmp2 = load_reg(s, rs);
6364 if (sh & 4)
6365 tcg_gen_sari_i32(tmp2, tmp2, 16);
6366 else
6367 gen_sxth(tmp2);
6368 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6369 tcg_gen_shri_i64(tmp64, tmp64, 16);
6370 tmp = new_tmp();
6371 tcg_gen_trunc_i64_i32(tmp, tmp64);
6372 tcg_temp_free_i64(tmp64);
6373 if ((sh & 2) == 0) {
6374 tmp2 = load_reg(s, rn);
6375 gen_helper_add_setq(tmp, tmp, tmp2);
6376 dead_tmp(tmp2);
6378 store_reg(s, rd, tmp);
6379 } else {
6380 /* 16 * 16 */
6381 tmp = load_reg(s, rm);
6382 tmp2 = load_reg(s, rs);
6383 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6384 dead_tmp(tmp2);
6385 if (op1 == 2) {
6386 tmp64 = tcg_temp_new_i64();
6387 tcg_gen_ext_i32_i64(tmp64, tmp);
6388 dead_tmp(tmp);
6389 gen_addq(s, tmp64, rn, rd);
6390 gen_storeq_reg(s, rn, rd, tmp64);
6391 tcg_temp_free_i64(tmp64);
6392 } else {
6393 if (op1 == 0) {
6394 tmp2 = load_reg(s, rn);
6395 gen_helper_add_setq(tmp, tmp, tmp2);
6396 dead_tmp(tmp2);
6398 store_reg(s, rd, tmp);
6401 break;
6402 default:
6403 goto illegal_op;
6405 } else if (((insn & 0x0e000000) == 0 &&
6406 (insn & 0x00000090) != 0x90) ||
6407 ((insn & 0x0e000000) == (1 << 25))) {
6408 int set_cc, logic_cc, shiftop;
6410 op1 = (insn >> 21) & 0xf;
6411 set_cc = (insn >> 20) & 1;
6412 logic_cc = table_logic_cc[op1] & set_cc;
6414 /* data processing instruction */
6415 if (insn & (1 << 25)) {
6416 /* immediate operand */
6417 val = insn & 0xff;
6418 shift = ((insn >> 8) & 0xf) * 2;
6419 if (shift) {
6420 val = (val >> shift) | (val << (32 - shift));
6422 tmp2 = new_tmp();
6423 tcg_gen_movi_i32(tmp2, val);
6424 if (logic_cc && shift) {
6425 gen_set_CF_bit31(tmp2);
6427 } else {
6428 /* register */
6429 rm = (insn) & 0xf;
6430 tmp2 = load_reg(s, rm);
6431 shiftop = (insn >> 5) & 3;
6432 if (!(insn & (1 << 4))) {
6433 shift = (insn >> 7) & 0x1f;
6434 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6435 } else {
6436 rs = (insn >> 8) & 0xf;
6437 tmp = load_reg(s, rs);
6438 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6441 if (op1 != 0x0f && op1 != 0x0d) {
6442 rn = (insn >> 16) & 0xf;
6443 tmp = load_reg(s, rn);
6444 } else {
6445 TCGV_UNUSED(tmp);
6447 rd = (insn >> 12) & 0xf;
6448 switch(op1) {
6449 case 0x00:
6450 tcg_gen_and_i32(tmp, tmp, tmp2);
6451 if (logic_cc) {
6452 gen_logic_CC(tmp);
6454 store_reg_bx(env, s, rd, tmp);
6455 break;
6456 case 0x01:
6457 tcg_gen_xor_i32(tmp, tmp, tmp2);
6458 if (logic_cc) {
6459 gen_logic_CC(tmp);
6461 store_reg_bx(env, s, rd, tmp);
6462 break;
6463 case 0x02:
6464 if (set_cc && rd == 15) {
6465 /* SUBS r15, ... is used for exception return. */
6466 if (IS_USER(s)) {
6467 goto illegal_op;
6469 gen_helper_sub_cc(tmp, tmp, tmp2);
6470 gen_exception_return(s, tmp);
6471 } else {
6472 if (set_cc) {
6473 gen_helper_sub_cc(tmp, tmp, tmp2);
6474 } else {
6475 tcg_gen_sub_i32(tmp, tmp, tmp2);
6477 store_reg_bx(env, s, rd, tmp);
6479 break;
6480 case 0x03:
6481 if (set_cc) {
6482 gen_helper_sub_cc(tmp, tmp2, tmp);
6483 } else {
6484 tcg_gen_sub_i32(tmp, tmp2, tmp);
6486 store_reg_bx(env, s, rd, tmp);
6487 break;
6488 case 0x04:
6489 if (set_cc) {
6490 gen_helper_add_cc(tmp, tmp, tmp2);
6491 } else {
6492 tcg_gen_add_i32(tmp, tmp, tmp2);
6494 store_reg_bx(env, s, rd, tmp);
6495 break;
6496 case 0x05:
6497 if (set_cc) {
6498 gen_helper_adc_cc(tmp, tmp, tmp2);
6499 } else {
6500 gen_add_carry(tmp, tmp, tmp2);
6502 store_reg_bx(env, s, rd, tmp);
6503 break;
6504 case 0x06:
6505 if (set_cc) {
6506 gen_helper_sbc_cc(tmp, tmp, tmp2);
6507 } else {
6508 gen_sub_carry(tmp, tmp, tmp2);
6510 store_reg_bx(env, s, rd, tmp);
6511 break;
6512 case 0x07:
6513 if (set_cc) {
6514 gen_helper_sbc_cc(tmp, tmp2, tmp);
6515 } else {
6516 gen_sub_carry(tmp, tmp2, tmp);
6518 store_reg_bx(env, s, rd, tmp);
6519 break;
6520 case 0x08:
6521 if (set_cc) {
6522 tcg_gen_and_i32(tmp, tmp, tmp2);
6523 gen_logic_CC(tmp);
6525 dead_tmp(tmp);
6526 break;
6527 case 0x09:
6528 if (set_cc) {
6529 tcg_gen_xor_i32(tmp, tmp, tmp2);
6530 gen_logic_CC(tmp);
6532 dead_tmp(tmp);
6533 break;
6534 case 0x0a:
6535 if (set_cc) {
6536 gen_helper_sub_cc(tmp, tmp, tmp2);
6538 dead_tmp(tmp);
6539 break;
6540 case 0x0b:
6541 if (set_cc) {
6542 gen_helper_add_cc(tmp, tmp, tmp2);
6544 dead_tmp(tmp);
6545 break;
6546 case 0x0c:
6547 tcg_gen_or_i32(tmp, tmp, tmp2);
6548 if (logic_cc) {
6549 gen_logic_CC(tmp);
6551 store_reg_bx(env, s, rd, tmp);
6552 break;
6553 case 0x0d:
6554 if (logic_cc && rd == 15) {
6555 /* MOVS r15, ... is used for exception return. */
6556 if (IS_USER(s)) {
6557 goto illegal_op;
6559 gen_exception_return(s, tmp2);
6560 } else {
6561 if (logic_cc) {
6562 gen_logic_CC(tmp2);
6564 store_reg_bx(env, s, rd, tmp2);
6566 break;
6567 case 0x0e:
6568 tcg_gen_andc_i32(tmp, tmp, tmp2);
6569 if (logic_cc) {
6570 gen_logic_CC(tmp);
6572 store_reg_bx(env, s, rd, tmp);
6573 break;
6574 default:
6575 case 0x0f:
6576 tcg_gen_not_i32(tmp2, tmp2);
6577 if (logic_cc) {
6578 gen_logic_CC(tmp2);
6580 store_reg_bx(env, s, rd, tmp2);
6581 break;
6583 if (op1 != 0x0f && op1 != 0x0d) {
6584 dead_tmp(tmp2);
6586 } else {
6587 /* other instructions */
6588 op1 = (insn >> 24) & 0xf;
6589 switch(op1) {
6590 case 0x0:
6591 case 0x1:
6592 /* multiplies, extra load/stores */
6593 sh = (insn >> 5) & 3;
6594 if (sh == 0) {
6595 if (op1 == 0x0) {
6596 rd = (insn >> 16) & 0xf;
6597 rn = (insn >> 12) & 0xf;
6598 rs = (insn >> 8) & 0xf;
6599 rm = (insn) & 0xf;
6600 op1 = (insn >> 20) & 0xf;
6601 switch (op1) {
6602 case 0: case 1: case 2: case 3: case 6:
6603 /* 32 bit mul */
6604 tmp = load_reg(s, rs);
6605 tmp2 = load_reg(s, rm);
6606 tcg_gen_mul_i32(tmp, tmp, tmp2);
6607 dead_tmp(tmp2);
6608 if (insn & (1 << 22)) {
6609 /* Subtract (mls) */
6610 ARCH(6T2);
6611 tmp2 = load_reg(s, rn);
6612 tcg_gen_sub_i32(tmp, tmp2, tmp);
6613 dead_tmp(tmp2);
6614 } else if (insn & (1 << 21)) {
6615 /* Add */
6616 tmp2 = load_reg(s, rn);
6617 tcg_gen_add_i32(tmp, tmp, tmp2);
6618 dead_tmp(tmp2);
6620 if (insn & (1 << 20))
6621 gen_logic_CC(tmp);
6622 store_reg(s, rd, tmp);
6623 break;
6624 default:
6625 /* 64 bit mul */
6626 tmp = load_reg(s, rs);
6627 tmp2 = load_reg(s, rm);
6628 if (insn & (1 << 22))
6629 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6630 else
6631 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6632 if (insn & (1 << 21)) /* mult accumulate */
6633 gen_addq(s, tmp64, rn, rd);
6634 if (!(insn & (1 << 23))) { /* double accumulate */
6635 ARCH(6);
6636 gen_addq_lo(s, tmp64, rn);
6637 gen_addq_lo(s, tmp64, rd);
6639 if (insn & (1 << 20))
6640 gen_logicq_cc(tmp64);
6641 gen_storeq_reg(s, rn, rd, tmp64);
6642 tcg_temp_free_i64(tmp64);
6643 break;
6645 } else {
6646 rn = (insn >> 16) & 0xf;
6647 rd = (insn >> 12) & 0xf;
6648 if (insn & (1 << 23)) {
6649 /* load/store exclusive */
6650 op1 = (insn >> 21) & 0x3;
6651 if (op1)
6652 ARCH(6K);
6653 else
6654 ARCH(6);
6655 addr = tcg_temp_local_new_i32();
6656 load_reg_var(s, addr, rn);
6657 if (insn & (1 << 20)) {
6658 switch (op1) {
6659 case 0: /* ldrex */
6660 gen_load_exclusive(s, rd, 15, addr, 2);
6661 break;
6662 case 1: /* ldrexd */
6663 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6664 break;
6665 case 2: /* ldrexb */
6666 gen_load_exclusive(s, rd, 15, addr, 0);
6667 break;
6668 case 3: /* ldrexh */
6669 gen_load_exclusive(s, rd, 15, addr, 1);
6670 break;
6671 default:
6672 abort();
6674 } else {
6675 rm = insn & 0xf;
6676 switch (op1) {
6677 case 0: /* strex */
6678 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6679 break;
6680 case 1: /* strexd */
6681 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6682 break;
6683 case 2: /* strexb */
6684 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6685 break;
6686 case 3: /* strexh */
6687 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6688 break;
6689 default:
6690 abort();
6693 tcg_temp_free(addr);
6694 } else {
6695 /* SWP instruction */
6696 rm = (insn) & 0xf;
6698 /* ??? This is not really atomic. However we know
6699 we never have multiple CPUs running in parallel,
6700 so it is good enough. */
6701 addr = load_reg(s, rn);
6702 tmp = load_reg(s, rm);
6703 if (insn & (1 << 22)) {
6704 tmp2 = gen_ld8u(addr, IS_USER(s));
6705 gen_st8(tmp, addr, IS_USER(s));
6706 } else {
6707 tmp2 = gen_ld32(addr, IS_USER(s));
6708 gen_st32(tmp, addr, IS_USER(s));
6710 dead_tmp(addr);
6711 store_reg(s, rd, tmp2);
6714 } else {
6715 int address_offset;
6716 int load;
6717 /* Misc load/store */
6718 rn = (insn >> 16) & 0xf;
6719 rd = (insn >> 12) & 0xf;
6720 addr = load_reg(s, rn);
6721 if (insn & (1 << 24))
6722 gen_add_datah_offset(s, insn, 0, addr);
6723 address_offset = 0;
6724 if (insn & (1 << 20)) {
6725 /* load */
6726 switch(sh) {
6727 case 1:
6728 tmp = gen_ld16u(addr, IS_USER(s));
6729 break;
6730 case 2:
6731 tmp = gen_ld8s(addr, IS_USER(s));
6732 break;
6733 default:
6734 case 3:
6735 tmp = gen_ld16s(addr, IS_USER(s));
6736 break;
6738 load = 1;
6739 } else if (sh & 2) {
6740 /* doubleword */
6741 if (sh & 1) {
6742 /* store */
6743 tmp = load_reg(s, rd);
6744 gen_st32(tmp, addr, IS_USER(s));
6745 tcg_gen_addi_i32(addr, addr, 4);
6746 tmp = load_reg(s, rd + 1);
6747 gen_st32(tmp, addr, IS_USER(s));
6748 load = 0;
6749 } else {
6750 /* load */
6751 tmp = gen_ld32(addr, IS_USER(s));
6752 store_reg(s, rd, tmp);
6753 tcg_gen_addi_i32(addr, addr, 4);
6754 tmp = gen_ld32(addr, IS_USER(s));
6755 rd++;
6756 load = 1;
6758 address_offset = -4;
6759 } else {
6760 /* store */
6761 tmp = load_reg(s, rd);
6762 gen_st16(tmp, addr, IS_USER(s));
6763 load = 0;
6765 /* Perform base writeback before the loaded value to
6766 ensure correct behavior with overlapping index registers.
6767 ldrd with base writeback is is undefined if the
6768 destination and index registers overlap. */
6769 if (!(insn & (1 << 24))) {
6770 gen_add_datah_offset(s, insn, address_offset, addr);
6771 store_reg(s, rn, addr);
6772 } else if (insn & (1 << 21)) {
6773 if (address_offset)
6774 tcg_gen_addi_i32(addr, addr, address_offset);
6775 store_reg(s, rn, addr);
6776 } else {
6777 dead_tmp(addr);
6779 if (load) {
6780 /* Complete the load. */
6781 store_reg(s, rd, tmp);
6784 break;
6785 case 0x4:
6786 case 0x5:
6787 goto do_ldst;
6788 case 0x6:
6789 case 0x7:
6790 if (insn & (1 << 4)) {
6791 ARCH(6);
6792 /* Armv6 Media instructions. */
6793 rm = insn & 0xf;
6794 rn = (insn >> 16) & 0xf;
6795 rd = (insn >> 12) & 0xf;
6796 rs = (insn >> 8) & 0xf;
6797 switch ((insn >> 23) & 3) {
6798 case 0: /* Parallel add/subtract. */
6799 op1 = (insn >> 20) & 7;
6800 tmp = load_reg(s, rn);
6801 tmp2 = load_reg(s, rm);
6802 sh = (insn >> 5) & 7;
6803 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6804 goto illegal_op;
6805 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6806 dead_tmp(tmp2);
6807 store_reg(s, rd, tmp);
6808 break;
6809 case 1:
6810 if ((insn & 0x00700020) == 0) {
6811 /* Halfword pack. */
6812 tmp = load_reg(s, rn);
6813 tmp2 = load_reg(s, rm);
6814 shift = (insn >> 7) & 0x1f;
6815 if (insn & (1 << 6)) {
6816 /* pkhtb */
6817 if (shift == 0)
6818 shift = 31;
6819 tcg_gen_sari_i32(tmp2, tmp2, shift);
6820 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6821 tcg_gen_ext16u_i32(tmp2, tmp2);
6822 } else {
6823 /* pkhbt */
6824 if (shift)
6825 tcg_gen_shli_i32(tmp2, tmp2, shift);
6826 tcg_gen_ext16u_i32(tmp, tmp);
6827 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6829 tcg_gen_or_i32(tmp, tmp, tmp2);
6830 dead_tmp(tmp2);
6831 store_reg(s, rd, tmp);
6832 } else if ((insn & 0x00200020) == 0x00200000) {
6833 /* [us]sat */
6834 tmp = load_reg(s, rm);
6835 shift = (insn >> 7) & 0x1f;
6836 if (insn & (1 << 6)) {
6837 if (shift == 0)
6838 shift = 31;
6839 tcg_gen_sari_i32(tmp, tmp, shift);
6840 } else {
6841 tcg_gen_shli_i32(tmp, tmp, shift);
6843 sh = (insn >> 16) & 0x1f;
6844 if (sh != 0) {
6845 tmp2 = tcg_const_i32(sh);
6846 if (insn & (1 << 22))
6847 gen_helper_usat(tmp, tmp, tmp2);
6848 else
6849 gen_helper_ssat(tmp, tmp, tmp2);
6850 tcg_temp_free_i32(tmp2);
6852 store_reg(s, rd, tmp);
6853 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6854 /* [us]sat16 */
6855 tmp = load_reg(s, rm);
6856 sh = (insn >> 16) & 0x1f;
6857 if (sh != 0) {
6858 tmp2 = tcg_const_i32(sh);
6859 if (insn & (1 << 22))
6860 gen_helper_usat16(tmp, tmp, tmp2);
6861 else
6862 gen_helper_ssat16(tmp, tmp, tmp2);
6863 tcg_temp_free_i32(tmp2);
6865 store_reg(s, rd, tmp);
6866 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6867 /* Select bytes. */
6868 tmp = load_reg(s, rn);
6869 tmp2 = load_reg(s, rm);
6870 tmp3 = new_tmp();
6871 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6872 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6873 dead_tmp(tmp3);
6874 dead_tmp(tmp2);
6875 store_reg(s, rd, tmp);
6876 } else if ((insn & 0x000003e0) == 0x00000060) {
6877 tmp = load_reg(s, rm);
6878 shift = (insn >> 10) & 3;
6879 /* ??? In many cases it's not neccessary to do a
6880 rotate, a shift is sufficient. */
6881 if (shift != 0)
6882 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6883 op1 = (insn >> 20) & 7;
6884 switch (op1) {
6885 case 0: gen_sxtb16(tmp); break;
6886 case 2: gen_sxtb(tmp); break;
6887 case 3: gen_sxth(tmp); break;
6888 case 4: gen_uxtb16(tmp); break;
6889 case 6: gen_uxtb(tmp); break;
6890 case 7: gen_uxth(tmp); break;
6891 default: goto illegal_op;
6893 if (rn != 15) {
6894 tmp2 = load_reg(s, rn);
6895 if ((op1 & 3) == 0) {
6896 gen_add16(tmp, tmp2);
6897 } else {
6898 tcg_gen_add_i32(tmp, tmp, tmp2);
6899 dead_tmp(tmp2);
6902 store_reg(s, rd, tmp);
6903 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6904 /* rev */
6905 tmp = load_reg(s, rm);
6906 if (insn & (1 << 22)) {
6907 if (insn & (1 << 7)) {
6908 gen_revsh(tmp);
6909 } else {
6910 ARCH(6T2);
6911 gen_helper_rbit(tmp, tmp);
6913 } else {
6914 if (insn & (1 << 7))
6915 gen_rev16(tmp);
6916 else
6917 tcg_gen_bswap32_i32(tmp, tmp);
6919 store_reg(s, rd, tmp);
6920 } else {
6921 goto illegal_op;
6923 break;
6924 case 2: /* Multiplies (Type 3). */
6925 tmp = load_reg(s, rm);
6926 tmp2 = load_reg(s, rs);
6927 if (insn & (1 << 20)) {
6928 /* Signed multiply most significant [accumulate]. */
6929 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6930 if (insn & (1 << 5))
6931 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6932 tcg_gen_shri_i64(tmp64, tmp64, 32);
6933 tmp = new_tmp();
6934 tcg_gen_trunc_i64_i32(tmp, tmp64);
6935 tcg_temp_free_i64(tmp64);
6936 if (rd != 15) {
6937 tmp2 = load_reg(s, rd);
6938 if (insn & (1 << 6)) {
6939 tcg_gen_sub_i32(tmp, tmp, tmp2);
6940 } else {
6941 tcg_gen_add_i32(tmp, tmp, tmp2);
6943 dead_tmp(tmp2);
6945 store_reg(s, rn, tmp);
6946 } else {
6947 if (insn & (1 << 5))
6948 gen_swap_half(tmp2);
6949 gen_smul_dual(tmp, tmp2);
6950 /* This addition cannot overflow. */
6951 if (insn & (1 << 6)) {
6952 tcg_gen_sub_i32(tmp, tmp, tmp2);
6953 } else {
6954 tcg_gen_add_i32(tmp, tmp, tmp2);
6956 dead_tmp(tmp2);
6957 if (insn & (1 << 22)) {
6958 /* smlald, smlsld */
6959 tmp64 = tcg_temp_new_i64();
6960 tcg_gen_ext_i32_i64(tmp64, tmp);
6961 dead_tmp(tmp);
6962 gen_addq(s, tmp64, rd, rn);
6963 gen_storeq_reg(s, rd, rn, tmp64);
6964 tcg_temp_free_i64(tmp64);
6965 } else {
6966 /* smuad, smusd, smlad, smlsd */
6967 if (rd != 15)
6969 tmp2 = load_reg(s, rd);
6970 gen_helper_add_setq(tmp, tmp, tmp2);
6971 dead_tmp(tmp2);
6973 store_reg(s, rn, tmp);
6976 break;
6977 case 3:
6978 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6979 switch (op1) {
6980 case 0: /* Unsigned sum of absolute differences. */
6981 ARCH(6);
6982 tmp = load_reg(s, rm);
6983 tmp2 = load_reg(s, rs);
6984 gen_helper_usad8(tmp, tmp, tmp2);
6985 dead_tmp(tmp2);
6986 if (rd != 15) {
6987 tmp2 = load_reg(s, rd);
6988 tcg_gen_add_i32(tmp, tmp, tmp2);
6989 dead_tmp(tmp2);
6991 store_reg(s, rn, tmp);
6992 break;
6993 case 0x20: case 0x24: case 0x28: case 0x2c:
6994 /* Bitfield insert/clear. */
6995 ARCH(6T2);
6996 shift = (insn >> 7) & 0x1f;
6997 i = (insn >> 16) & 0x1f;
6998 i = i + 1 - shift;
6999 if (rm == 15) {
7000 tmp = new_tmp();
7001 tcg_gen_movi_i32(tmp, 0);
7002 } else {
7003 tmp = load_reg(s, rm);
7005 if (i != 32) {
7006 tmp2 = load_reg(s, rd);
7007 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7008 dead_tmp(tmp2);
7010 store_reg(s, rd, tmp);
7011 break;
7012 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7013 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7014 ARCH(6T2);
7015 tmp = load_reg(s, rm);
7016 shift = (insn >> 7) & 0x1f;
7017 i = ((insn >> 16) & 0x1f) + 1;
7018 if (shift + i > 32)
7019 goto illegal_op;
7020 if (i < 32) {
7021 if (op1 & 0x20) {
7022 gen_ubfx(tmp, shift, (1u << i) - 1);
7023 } else {
7024 gen_sbfx(tmp, shift, i);
7027 store_reg(s, rd, tmp);
7028 break;
7029 default:
7030 goto illegal_op;
7032 break;
7034 break;
7036 do_ldst:
7037 /* Check for undefined extension instructions
7038 * per the ARM Bible IE:
7039 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7041 sh = (0xf << 20) | (0xf << 4);
7042 if (op1 == 0x7 && ((insn & sh) == sh))
7044 goto illegal_op;
7046 /* load/store byte/word */
7047 rn = (insn >> 16) & 0xf;
7048 rd = (insn >> 12) & 0xf;
7049 tmp2 = load_reg(s, rn);
7050 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7051 if (insn & (1 << 24))
7052 gen_add_data_offset(s, insn, tmp2);
7053 if (insn & (1 << 20)) {
7054 /* load */
7055 if (insn & (1 << 22)) {
7056 tmp = gen_ld8u(tmp2, i);
7057 } else {
7058 tmp = gen_ld32(tmp2, i);
7060 } else {
7061 /* store */
7062 tmp = load_reg(s, rd);
7063 if (insn & (1 << 22))
7064 gen_st8(tmp, tmp2, i);
7065 else
7066 gen_st32(tmp, tmp2, i);
7068 if (!(insn & (1 << 24))) {
7069 gen_add_data_offset(s, insn, tmp2);
7070 store_reg(s, rn, tmp2);
7071 } else if (insn & (1 << 21)) {
7072 store_reg(s, rn, tmp2);
7073 } else {
7074 dead_tmp(tmp2);
7076 if (insn & (1 << 20)) {
7077 /* Complete the load. */
7078 if (rd == 15)
7079 gen_bx(s, tmp);
7080 else
7081 store_reg(s, rd, tmp);
7083 break;
7084 case 0x08:
7085 case 0x09:
7087 int j, n, user, loaded_base;
7088 TCGv loaded_var;
7089 /* load/store multiple words */
7090 /* XXX: store correct base if write back */
7091 user = 0;
7092 if (insn & (1 << 22)) {
7093 if (IS_USER(s))
7094 goto illegal_op; /* only usable in supervisor mode */
7096 if ((insn & (1 << 15)) == 0)
7097 user = 1;
7099 rn = (insn >> 16) & 0xf;
7100 addr = load_reg(s, rn);
7102 /* compute total size */
7103 loaded_base = 0;
7104 TCGV_UNUSED(loaded_var);
7105 n = 0;
7106 for(i=0;i<16;i++) {
7107 if (insn & (1 << i))
7108 n++;
7110 /* XXX: test invalid n == 0 case ? */
7111 if (insn & (1 << 23)) {
7112 if (insn & (1 << 24)) {
7113 /* pre increment */
7114 tcg_gen_addi_i32(addr, addr, 4);
7115 } else {
7116 /* post increment */
7118 } else {
7119 if (insn & (1 << 24)) {
7120 /* pre decrement */
7121 tcg_gen_addi_i32(addr, addr, -(n * 4));
7122 } else {
7123 /* post decrement */
7124 if (n != 1)
7125 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7128 j = 0;
7129 for(i=0;i<16;i++) {
7130 if (insn & (1 << i)) {
7131 if (insn & (1 << 20)) {
7132 /* load */
7133 tmp = gen_ld32(addr, IS_USER(s));
7134 if (i == 15) {
7135 gen_bx(s, tmp);
7136 } else if (user) {
7137 tmp2 = tcg_const_i32(i);
7138 gen_helper_set_user_reg(tmp2, tmp);
7139 tcg_temp_free_i32(tmp2);
7140 dead_tmp(tmp);
7141 } else if (i == rn) {
7142 loaded_var = tmp;
7143 loaded_base = 1;
7144 } else {
7145 store_reg(s, i, tmp);
7147 } else {
7148 /* store */
7149 if (i == 15) {
7150 /* special case: r15 = PC + 8 */
7151 val = (long)s->pc + 4;
7152 tmp = new_tmp();
7153 tcg_gen_movi_i32(tmp, val);
7154 } else if (user) {
7155 tmp = new_tmp();
7156 tmp2 = tcg_const_i32(i);
7157 gen_helper_get_user_reg(tmp, tmp2);
7158 tcg_temp_free_i32(tmp2);
7159 } else {
7160 tmp = load_reg(s, i);
7162 gen_st32(tmp, addr, IS_USER(s));
7164 j++;
7165 /* no need to add after the last transfer */
7166 if (j != n)
7167 tcg_gen_addi_i32(addr, addr, 4);
7170 if (insn & (1 << 21)) {
7171 /* write back */
7172 if (insn & (1 << 23)) {
7173 if (insn & (1 << 24)) {
7174 /* pre increment */
7175 } else {
7176 /* post increment */
7177 tcg_gen_addi_i32(addr, addr, 4);
7179 } else {
7180 if (insn & (1 << 24)) {
7181 /* pre decrement */
7182 if (n != 1)
7183 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7184 } else {
7185 /* post decrement */
7186 tcg_gen_addi_i32(addr, addr, -(n * 4));
7189 store_reg(s, rn, addr);
7190 } else {
7191 dead_tmp(addr);
7193 if (loaded_base) {
7194 store_reg(s, rn, loaded_var);
7196 if ((insn & (1 << 22)) && !user) {
7197 /* Restore CPSR from SPSR. */
7198 tmp = load_cpu_field(spsr);
7199 gen_set_cpsr(tmp, 0xffffffff);
7200 dead_tmp(tmp);
7201 s->is_jmp = DISAS_UPDATE;
7204 break;
7205 case 0xa:
7206 case 0xb:
7208 int32_t offset;
7210 /* branch (and link) */
7211 val = (int32_t)s->pc;
7212 if (insn & (1 << 24)) {
7213 tmp = new_tmp();
7214 tcg_gen_movi_i32(tmp, val);
7215 store_reg(s, 14, tmp);
7217 offset = (((int32_t)insn << 8) >> 8);
7218 val += (offset << 2) + 4;
7219 gen_jmp(s, val);
7221 break;
7222 case 0xc:
7223 case 0xd:
7224 case 0xe:
7225 /* Coprocessor. */
7226 if (disas_coproc_insn(env, s, insn))
7227 goto illegal_op;
7228 break;
7229 case 0xf:
7230 /* swi */
7231 gen_set_pc_im(s->pc);
7232 s->is_jmp = DISAS_SWI;
7233 break;
7234 default:
7235 illegal_op:
7236 gen_set_condexec(s);
7237 gen_set_pc_im(s->pc - 4);
7238 gen_exception(EXCP_UDEF);
7239 s->is_jmp = DISAS_JUMP;
7240 break;
7245 /* Return true if this is a Thumb-2 logical op. */
7246 static int
7247 thumb2_logic_op(int op)
7249 return (op < 8);
7252 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7253 then set condition code flags based on the result of the operation.
7254 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7255 to the high bit of T1.
7256 Returns zero if the opcode is valid. */
7258 static int
7259 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7261 int logic_cc;
7263 logic_cc = 0;
7264 switch (op) {
7265 case 0: /* and */
7266 tcg_gen_and_i32(t0, t0, t1);
7267 logic_cc = conds;
7268 break;
7269 case 1: /* bic */
7270 tcg_gen_andc_i32(t0, t0, t1);
7271 logic_cc = conds;
7272 break;
7273 case 2: /* orr */
7274 tcg_gen_or_i32(t0, t0, t1);
7275 logic_cc = conds;
7276 break;
7277 case 3: /* orn */
7278 tcg_gen_not_i32(t1, t1);
7279 tcg_gen_or_i32(t0, t0, t1);
7280 logic_cc = conds;
7281 break;
7282 case 4: /* eor */
7283 tcg_gen_xor_i32(t0, t0, t1);
7284 logic_cc = conds;
7285 break;
7286 case 8: /* add */
7287 if (conds)
7288 gen_helper_add_cc(t0, t0, t1);
7289 else
7290 tcg_gen_add_i32(t0, t0, t1);
7291 break;
7292 case 10: /* adc */
7293 if (conds)
7294 gen_helper_adc_cc(t0, t0, t1);
7295 else
7296 gen_adc(t0, t1);
7297 break;
7298 case 11: /* sbc */
7299 if (conds)
7300 gen_helper_sbc_cc(t0, t0, t1);
7301 else
7302 gen_sub_carry(t0, t0, t1);
7303 break;
7304 case 13: /* sub */
7305 if (conds)
7306 gen_helper_sub_cc(t0, t0, t1);
7307 else
7308 tcg_gen_sub_i32(t0, t0, t1);
7309 break;
7310 case 14: /* rsb */
7311 if (conds)
7312 gen_helper_sub_cc(t0, t1, t0);
7313 else
7314 tcg_gen_sub_i32(t0, t1, t0);
7315 break;
7316 default: /* 5, 6, 7, 9, 12, 15. */
7317 return 1;
7319 if (logic_cc) {
7320 gen_logic_CC(t0);
7321 if (shifter_out)
7322 gen_set_CF_bit31(t1);
7324 return 0;
7327 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7328 is not legal. */
7329 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7331 uint32_t insn, imm, shift, offset;
7332 uint32_t rd, rn, rm, rs;
7333 TCGv tmp;
7334 TCGv tmp2;
7335 TCGv tmp3;
7336 TCGv addr;
7337 TCGv_i64 tmp64;
7338 int op;
7339 int shiftop;
7340 int conds;
7341 int logic_cc;
7343 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7344 || arm_feature (env, ARM_FEATURE_M))) {
7345 /* Thumb-1 cores may need to treat bl and blx as a pair of
7346 16-bit instructions to get correct prefetch abort behavior. */
7347 insn = insn_hw1;
7348 if ((insn & (1 << 12)) == 0) {
7349 /* Second half of blx. */
7350 offset = ((insn & 0x7ff) << 1);
7351 tmp = load_reg(s, 14);
7352 tcg_gen_addi_i32(tmp, tmp, offset);
7353 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7355 tmp2 = new_tmp();
7356 tcg_gen_movi_i32(tmp2, s->pc | 1);
7357 store_reg(s, 14, tmp2);
7358 gen_bx(s, tmp);
7359 return 0;
7361 if (insn & (1 << 11)) {
7362 /* Second half of bl. */
7363 offset = ((insn & 0x7ff) << 1) | 1;
7364 tmp = load_reg(s, 14);
7365 tcg_gen_addi_i32(tmp, tmp, offset);
7367 tmp2 = new_tmp();
7368 tcg_gen_movi_i32(tmp2, s->pc | 1);
7369 store_reg(s, 14, tmp2);
7370 gen_bx(s, tmp);
7371 return 0;
7373 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7374 /* Instruction spans a page boundary. Implement it as two
7375 16-bit instructions in case the second half causes an
7376 prefetch abort. */
7377 offset = ((int32_t)insn << 21) >> 9;
7378 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7379 return 0;
7381 /* Fall through to 32-bit decode. */
7384 insn = lduw_code(s->pc);
7385 s->pc += 2;
7386 insn |= (uint32_t)insn_hw1 << 16;
7388 if ((insn & 0xf800e800) != 0xf000e800) {
7389 ARCH(6T2);
7392 rn = (insn >> 16) & 0xf;
7393 rs = (insn >> 12) & 0xf;
7394 rd = (insn >> 8) & 0xf;
7395 rm = insn & 0xf;
7396 switch ((insn >> 25) & 0xf) {
7397 case 0: case 1: case 2: case 3:
7398 /* 16-bit instructions. Should never happen. */
7399 abort();
7400 case 4:
7401 if (insn & (1 << 22)) {
7402 /* Other load/store, table branch. */
7403 if (insn & 0x01200000) {
7404 /* Load/store doubleword. */
7405 if (rn == 15) {
7406 addr = new_tmp();
7407 tcg_gen_movi_i32(addr, s->pc & ~3);
7408 } else {
7409 addr = load_reg(s, rn);
7411 offset = (insn & 0xff) * 4;
7412 if ((insn & (1 << 23)) == 0)
7413 offset = -offset;
7414 if (insn & (1 << 24)) {
7415 tcg_gen_addi_i32(addr, addr, offset);
7416 offset = 0;
7418 if (insn & (1 << 20)) {
7419 /* ldrd */
7420 tmp = gen_ld32(addr, IS_USER(s));
7421 store_reg(s, rs, tmp);
7422 tcg_gen_addi_i32(addr, addr, 4);
7423 tmp = gen_ld32(addr, IS_USER(s));
7424 store_reg(s, rd, tmp);
7425 } else {
7426 /* strd */
7427 tmp = load_reg(s, rs);
7428 gen_st32(tmp, addr, IS_USER(s));
7429 tcg_gen_addi_i32(addr, addr, 4);
7430 tmp = load_reg(s, rd);
7431 gen_st32(tmp, addr, IS_USER(s));
7433 if (insn & (1 << 21)) {
7434 /* Base writeback. */
7435 if (rn == 15)
7436 goto illegal_op;
7437 tcg_gen_addi_i32(addr, addr, offset - 4);
7438 store_reg(s, rn, addr);
7439 } else {
7440 dead_tmp(addr);
7442 } else if ((insn & (1 << 23)) == 0) {
7443 /* Load/store exclusive word. */
7444 addr = tcg_temp_local_new();
7445 load_reg_var(s, addr, rn);
7446 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7447 if (insn & (1 << 20)) {
7448 gen_load_exclusive(s, rs, 15, addr, 2);
7449 } else {
7450 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7452 tcg_temp_free(addr);
7453 } else if ((insn & (1 << 6)) == 0) {
7454 /* Table Branch. */
7455 if (rn == 15) {
7456 addr = new_tmp();
7457 tcg_gen_movi_i32(addr, s->pc);
7458 } else {
7459 addr = load_reg(s, rn);
7461 tmp = load_reg(s, rm);
7462 tcg_gen_add_i32(addr, addr, tmp);
7463 if (insn & (1 << 4)) {
7464 /* tbh */
7465 tcg_gen_add_i32(addr, addr, tmp);
7466 dead_tmp(tmp);
7467 tmp = gen_ld16u(addr, IS_USER(s));
7468 } else { /* tbb */
7469 dead_tmp(tmp);
7470 tmp = gen_ld8u(addr, IS_USER(s));
7472 dead_tmp(addr);
7473 tcg_gen_shli_i32(tmp, tmp, 1);
7474 tcg_gen_addi_i32(tmp, tmp, s->pc);
7475 store_reg(s, 15, tmp);
7476 } else {
7477 /* Load/store exclusive byte/halfword/doubleword. */
7478 ARCH(7);
7479 op = (insn >> 4) & 0x3;
7480 if (op == 2) {
7481 goto illegal_op;
7483 addr = tcg_temp_local_new();
7484 load_reg_var(s, addr, rn);
7485 if (insn & (1 << 20)) {
7486 gen_load_exclusive(s, rs, rd, addr, op);
7487 } else {
7488 gen_store_exclusive(s, rm, rs, rd, addr, op);
7490 tcg_temp_free(addr);
7492 } else {
7493 /* Load/store multiple, RFE, SRS. */
7494 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7495 /* Not available in user mode. */
7496 if (IS_USER(s))
7497 goto illegal_op;
7498 if (insn & (1 << 20)) {
7499 /* rfe */
7500 addr = load_reg(s, rn);
7501 if ((insn & (1 << 24)) == 0)
7502 tcg_gen_addi_i32(addr, addr, -8);
7503 /* Load PC into tmp and CPSR into tmp2. */
7504 tmp = gen_ld32(addr, 0);
7505 tcg_gen_addi_i32(addr, addr, 4);
7506 tmp2 = gen_ld32(addr, 0);
7507 if (insn & (1 << 21)) {
7508 /* Base writeback. */
7509 if (insn & (1 << 24)) {
7510 tcg_gen_addi_i32(addr, addr, 4);
7511 } else {
7512 tcg_gen_addi_i32(addr, addr, -4);
7514 store_reg(s, rn, addr);
7515 } else {
7516 dead_tmp(addr);
7518 gen_rfe(s, tmp, tmp2);
7519 } else {
7520 /* srs */
7521 op = (insn & 0x1f);
7522 if (op == (env->uncached_cpsr & CPSR_M)) {
7523 addr = load_reg(s, 13);
7524 } else {
7525 addr = new_tmp();
7526 tmp = tcg_const_i32(op);
7527 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7528 tcg_temp_free_i32(tmp);
7530 if ((insn & (1 << 24)) == 0) {
7531 tcg_gen_addi_i32(addr, addr, -8);
7533 tmp = load_reg(s, 14);
7534 gen_st32(tmp, addr, 0);
7535 tcg_gen_addi_i32(addr, addr, 4);
7536 tmp = new_tmp();
7537 gen_helper_cpsr_read(tmp);
7538 gen_st32(tmp, addr, 0);
7539 if (insn & (1 << 21)) {
7540 if ((insn & (1 << 24)) == 0) {
7541 tcg_gen_addi_i32(addr, addr, -4);
7542 } else {
7543 tcg_gen_addi_i32(addr, addr, 4);
7545 if (op == (env->uncached_cpsr & CPSR_M)) {
7546 store_reg(s, 13, addr);
7547 } else {
7548 tmp = tcg_const_i32(op);
7549 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7550 tcg_temp_free_i32(tmp);
7552 } else {
7553 dead_tmp(addr);
7556 } else {
7557 int i;
7558 /* Load/store multiple. */
7559 addr = load_reg(s, rn);
7560 offset = 0;
7561 for (i = 0; i < 16; i++) {
7562 if (insn & (1 << i))
7563 offset += 4;
7565 if (insn & (1 << 24)) {
7566 tcg_gen_addi_i32(addr, addr, -offset);
7569 for (i = 0; i < 16; i++) {
7570 if ((insn & (1 << i)) == 0)
7571 continue;
7572 if (insn & (1 << 20)) {
7573 /* Load. */
7574 tmp = gen_ld32(addr, IS_USER(s));
7575 if (i == 15) {
7576 gen_bx(s, tmp);
7577 } else {
7578 store_reg(s, i, tmp);
7580 } else {
7581 /* Store. */
7582 tmp = load_reg(s, i);
7583 gen_st32(tmp, addr, IS_USER(s));
7585 tcg_gen_addi_i32(addr, addr, 4);
7587 if (insn & (1 << 21)) {
7588 /* Base register writeback. */
7589 if (insn & (1 << 24)) {
7590 tcg_gen_addi_i32(addr, addr, -offset);
7592 /* Fault if writeback register is in register list. */
7593 if (insn & (1 << rn))
7594 goto illegal_op;
7595 store_reg(s, rn, addr);
7596 } else {
7597 dead_tmp(addr);
7601 break;
7602 case 5: /* Data processing register constant shift. */
7603 if (rn == 15) {
7604 tmp = new_tmp();
7605 tcg_gen_movi_i32(tmp, 0);
7606 } else {
7607 tmp = load_reg(s, rn);
7609 tmp2 = load_reg(s, rm);
7610 op = (insn >> 21) & 0xf;
7611 shiftop = (insn >> 4) & 3;
7612 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7613 conds = (insn & (1 << 20)) != 0;
7614 logic_cc = (conds && thumb2_logic_op(op));
7615 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7616 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7617 goto illegal_op;
7618 dead_tmp(tmp2);
7619 if (rd != 15) {
7620 store_reg(s, rd, tmp);
7621 } else {
7622 dead_tmp(tmp);
7624 break;
7625 case 13: /* Misc data processing. */
7626 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7627 if (op < 4 && (insn & 0xf000) != 0xf000)
7628 goto illegal_op;
7629 switch (op) {
7630 case 0: /* Register controlled shift. */
7631 tmp = load_reg(s, rn);
7632 tmp2 = load_reg(s, rm);
7633 if ((insn & 0x70) != 0)
7634 goto illegal_op;
7635 op = (insn >> 21) & 3;
7636 logic_cc = (insn & (1 << 20)) != 0;
7637 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7638 if (logic_cc)
7639 gen_logic_CC(tmp);
7640 store_reg_bx(env, s, rd, tmp);
7641 break;
7642 case 1: /* Sign/zero extend. */
7643 tmp = load_reg(s, rm);
7644 shift = (insn >> 4) & 3;
7645 /* ??? In many cases it's not neccessary to do a
7646 rotate, a shift is sufficient. */
7647 if (shift != 0)
7648 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7649 op = (insn >> 20) & 7;
7650 switch (op) {
7651 case 0: gen_sxth(tmp); break;
7652 case 1: gen_uxth(tmp); break;
7653 case 2: gen_sxtb16(tmp); break;
7654 case 3: gen_uxtb16(tmp); break;
7655 case 4: gen_sxtb(tmp); break;
7656 case 5: gen_uxtb(tmp); break;
7657 default: goto illegal_op;
7659 if (rn != 15) {
7660 tmp2 = load_reg(s, rn);
7661 if ((op >> 1) == 1) {
7662 gen_add16(tmp, tmp2);
7663 } else {
7664 tcg_gen_add_i32(tmp, tmp, tmp2);
7665 dead_tmp(tmp2);
7668 store_reg(s, rd, tmp);
7669 break;
7670 case 2: /* SIMD add/subtract. */
7671 op = (insn >> 20) & 7;
7672 shift = (insn >> 4) & 7;
7673 if ((op & 3) == 3 || (shift & 3) == 3)
7674 goto illegal_op;
7675 tmp = load_reg(s, rn);
7676 tmp2 = load_reg(s, rm);
7677 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7678 dead_tmp(tmp2);
7679 store_reg(s, rd, tmp);
7680 break;
7681 case 3: /* Other data processing. */
7682 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7683 if (op < 4) {
7684 /* Saturating add/subtract. */
7685 tmp = load_reg(s, rn);
7686 tmp2 = load_reg(s, rm);
7687 if (op & 2)
7688 gen_helper_double_saturate(tmp, tmp);
7689 if (op & 1)
7690 gen_helper_sub_saturate(tmp, tmp2, tmp);
7691 else
7692 gen_helper_add_saturate(tmp, tmp, tmp2);
7693 dead_tmp(tmp2);
7694 } else {
7695 tmp = load_reg(s, rn);
7696 switch (op) {
7697 case 0x0a: /* rbit */
7698 gen_helper_rbit(tmp, tmp);
7699 break;
7700 case 0x08: /* rev */
7701 tcg_gen_bswap32_i32(tmp, tmp);
7702 break;
7703 case 0x09: /* rev16 */
7704 gen_rev16(tmp);
7705 break;
7706 case 0x0b: /* revsh */
7707 gen_revsh(tmp);
7708 break;
7709 case 0x10: /* sel */
7710 tmp2 = load_reg(s, rm);
7711 tmp3 = new_tmp();
7712 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7713 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7714 dead_tmp(tmp3);
7715 dead_tmp(tmp2);
7716 break;
7717 case 0x18: /* clz */
7718 gen_helper_clz(tmp, tmp);
7719 break;
7720 default:
7721 goto illegal_op;
7724 store_reg(s, rd, tmp);
7725 break;
7726 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7727 op = (insn >> 4) & 0xf;
7728 tmp = load_reg(s, rn);
7729 tmp2 = load_reg(s, rm);
7730 switch ((insn >> 20) & 7) {
7731 case 0: /* 32 x 32 -> 32 */
7732 tcg_gen_mul_i32(tmp, tmp, tmp2);
7733 dead_tmp(tmp2);
7734 if (rs != 15) {
7735 tmp2 = load_reg(s, rs);
7736 if (op)
7737 tcg_gen_sub_i32(tmp, tmp2, tmp);
7738 else
7739 tcg_gen_add_i32(tmp, tmp, tmp2);
7740 dead_tmp(tmp2);
7742 break;
7743 case 1: /* 16 x 16 -> 32 */
7744 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7745 dead_tmp(tmp2);
7746 if (rs != 15) {
7747 tmp2 = load_reg(s, rs);
7748 gen_helper_add_setq(tmp, tmp, tmp2);
7749 dead_tmp(tmp2);
7751 break;
7752 case 2: /* Dual multiply add. */
7753 case 4: /* Dual multiply subtract. */
7754 if (op)
7755 gen_swap_half(tmp2);
7756 gen_smul_dual(tmp, tmp2);
7757 /* This addition cannot overflow. */
7758 if (insn & (1 << 22)) {
7759 tcg_gen_sub_i32(tmp, tmp, tmp2);
7760 } else {
7761 tcg_gen_add_i32(tmp, tmp, tmp2);
7763 dead_tmp(tmp2);
7764 if (rs != 15)
7766 tmp2 = load_reg(s, rs);
7767 gen_helper_add_setq(tmp, tmp, tmp2);
7768 dead_tmp(tmp2);
7770 break;
7771 case 3: /* 32 * 16 -> 32msb */
7772 if (op)
7773 tcg_gen_sari_i32(tmp2, tmp2, 16);
7774 else
7775 gen_sxth(tmp2);
7776 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7777 tcg_gen_shri_i64(tmp64, tmp64, 16);
7778 tmp = new_tmp();
7779 tcg_gen_trunc_i64_i32(tmp, tmp64);
7780 tcg_temp_free_i64(tmp64);
7781 if (rs != 15)
7783 tmp2 = load_reg(s, rs);
7784 gen_helper_add_setq(tmp, tmp, tmp2);
7785 dead_tmp(tmp2);
7787 break;
7788 case 5: case 6: /* 32 * 32 -> 32msb */
7789 gen_imull(tmp, tmp2);
7790 if (insn & (1 << 5)) {
7791 gen_roundqd(tmp, tmp2);
7792 dead_tmp(tmp2);
7793 } else {
7794 dead_tmp(tmp);
7795 tmp = tmp2;
7797 if (rs != 15) {
7798 tmp2 = load_reg(s, rs);
7799 if (insn & (1 << 21)) {
7800 tcg_gen_add_i32(tmp, tmp, tmp2);
7801 } else {
7802 tcg_gen_sub_i32(tmp, tmp2, tmp);
7804 dead_tmp(tmp2);
7806 break;
7807 case 7: /* Unsigned sum of absolute differences. */
7808 gen_helper_usad8(tmp, tmp, tmp2);
7809 dead_tmp(tmp2);
7810 if (rs != 15) {
7811 tmp2 = load_reg(s, rs);
7812 tcg_gen_add_i32(tmp, tmp, tmp2);
7813 dead_tmp(tmp2);
7815 break;
7817 store_reg(s, rd, tmp);
7818 break;
7819 case 6: case 7: /* 64-bit multiply, Divide. */
7820 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7821 tmp = load_reg(s, rn);
7822 tmp2 = load_reg(s, rm);
7823 if ((op & 0x50) == 0x10) {
7824 /* sdiv, udiv */
7825 if (!arm_feature(env, ARM_FEATURE_DIV))
7826 goto illegal_op;
7827 if (op & 0x20)
7828 gen_helper_udiv(tmp, tmp, tmp2);
7829 else
7830 gen_helper_sdiv(tmp, tmp, tmp2);
7831 dead_tmp(tmp2);
7832 store_reg(s, rd, tmp);
7833 } else if ((op & 0xe) == 0xc) {
7834 /* Dual multiply accumulate long. */
7835 if (op & 1)
7836 gen_swap_half(tmp2);
7837 gen_smul_dual(tmp, tmp2);
7838 if (op & 0x10) {
7839 tcg_gen_sub_i32(tmp, tmp, tmp2);
7840 } else {
7841 tcg_gen_add_i32(tmp, tmp, tmp2);
7843 dead_tmp(tmp2);
7844 /* BUGFIX */
7845 tmp64 = tcg_temp_new_i64();
7846 tcg_gen_ext_i32_i64(tmp64, tmp);
7847 dead_tmp(tmp);
7848 gen_addq(s, tmp64, rs, rd);
7849 gen_storeq_reg(s, rs, rd, tmp64);
7850 tcg_temp_free_i64(tmp64);
7851 } else {
7852 if (op & 0x20) {
7853 /* Unsigned 64-bit multiply */
7854 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7855 } else {
7856 if (op & 8) {
7857 /* smlalxy */
7858 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7859 dead_tmp(tmp2);
7860 tmp64 = tcg_temp_new_i64();
7861 tcg_gen_ext_i32_i64(tmp64, tmp);
7862 dead_tmp(tmp);
7863 } else {
7864 /* Signed 64-bit multiply */
7865 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7868 if (op & 4) {
7869 /* umaal */
7870 gen_addq_lo(s, tmp64, rs);
7871 gen_addq_lo(s, tmp64, rd);
7872 } else if (op & 0x40) {
7873 /* 64-bit accumulate. */
7874 gen_addq(s, tmp64, rs, rd);
7876 gen_storeq_reg(s, rs, rd, tmp64);
7877 tcg_temp_free_i64(tmp64);
7879 break;
7881 break;
7882 case 6: case 7: case 14: case 15:
7883 /* Coprocessor. */
7884 if (((insn >> 24) & 3) == 3) {
7885 /* Translate into the equivalent ARM encoding. */
7886 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7887 if (disas_neon_data_insn(env, s, insn))
7888 goto illegal_op;
7889 } else {
7890 if (insn & (1 << 28))
7891 goto illegal_op;
7892 if (disas_coproc_insn (env, s, insn))
7893 goto illegal_op;
7895 break;
7896 case 8: case 9: case 10: case 11:
7897 if (insn & (1 << 15)) {
7898 /* Branches, misc control. */
7899 if (insn & 0x5000) {
7900 /* Unconditional branch. */
7901 /* signextend(hw1[10:0]) -> offset[:12]. */
7902 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7903 /* hw1[10:0] -> offset[11:1]. */
7904 offset |= (insn & 0x7ff) << 1;
7905 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7906 offset[24:22] already have the same value because of the
7907 sign extension above. */
7908 offset ^= ((~insn) & (1 << 13)) << 10;
7909 offset ^= ((~insn) & (1 << 11)) << 11;
7911 if (insn & (1 << 14)) {
7912 /* Branch and link. */
7913 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
7916 offset += s->pc;
7917 if (insn & (1 << 12)) {
7918 /* b/bl */
7919 gen_jmp(s, offset);
7920 } else {
7921 /* blx */
7922 offset &= ~(uint32_t)2;
7923 gen_bx_im(s, offset);
7925 } else if (((insn >> 23) & 7) == 7) {
7926 /* Misc control */
7927 if (insn & (1 << 13))
7928 goto illegal_op;
7930 if (insn & (1 << 26)) {
7931 /* Secure monitor call (v6Z) */
7932 goto illegal_op; /* not implemented. */
7933 } else {
7934 op = (insn >> 20) & 7;
7935 switch (op) {
7936 case 0: /* msr cpsr. */
7937 if (IS_M(env)) {
7938 tmp = load_reg(s, rn);
7939 addr = tcg_const_i32(insn & 0xff);
7940 gen_helper_v7m_msr(cpu_env, addr, tmp);
7941 tcg_temp_free_i32(addr);
7942 dead_tmp(tmp);
7943 gen_lookup_tb(s);
7944 break;
7946 /* fall through */
7947 case 1: /* msr spsr. */
7948 if (IS_M(env))
7949 goto illegal_op;
7950 tmp = load_reg(s, rn);
7951 if (gen_set_psr(s,
7952 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7953 op == 1, tmp))
7954 goto illegal_op;
7955 break;
7956 case 2: /* cps, nop-hint. */
7957 if (((insn >> 8) & 7) == 0) {
7958 gen_nop_hint(s, insn & 0xff);
7960 /* Implemented as NOP in user mode. */
7961 if (IS_USER(s))
7962 break;
7963 offset = 0;
7964 imm = 0;
7965 if (insn & (1 << 10)) {
7966 if (insn & (1 << 7))
7967 offset |= CPSR_A;
7968 if (insn & (1 << 6))
7969 offset |= CPSR_I;
7970 if (insn & (1 << 5))
7971 offset |= CPSR_F;
7972 if (insn & (1 << 9))
7973 imm = CPSR_A | CPSR_I | CPSR_F;
7975 if (insn & (1 << 8)) {
7976 offset |= 0x1f;
7977 imm |= (insn & 0x1f);
7979 if (offset) {
7980 gen_set_psr_im(s, offset, 0, imm);
7982 break;
7983 case 3: /* Special control operations. */
7984 ARCH(7);
7985 op = (insn >> 4) & 0xf;
7986 switch (op) {
7987 case 2: /* clrex */
7988 gen_clrex(s);
7989 break;
7990 case 4: /* dsb */
7991 case 5: /* dmb */
7992 case 6: /* isb */
7993 /* These execute as NOPs. */
7994 break;
7995 default:
7996 goto illegal_op;
7998 break;
7999 case 4: /* bxj */
8000 /* Trivial implementation equivalent to bx. */
8001 tmp = load_reg(s, rn);
8002 gen_bx(s, tmp);
8003 break;
8004 case 5: /* Exception return. */
8005 if (IS_USER(s)) {
8006 goto illegal_op;
8008 if (rn != 14 || rd != 15) {
8009 goto illegal_op;
8011 tmp = load_reg(s, rn);
8012 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8013 gen_exception_return(s, tmp);
8014 break;
8015 case 6: /* mrs cpsr. */
8016 tmp = new_tmp();
8017 if (IS_M(env)) {
8018 addr = tcg_const_i32(insn & 0xff);
8019 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8020 tcg_temp_free_i32(addr);
8021 } else {
8022 gen_helper_cpsr_read(tmp);
8024 store_reg(s, rd, tmp);
8025 break;
8026 case 7: /* mrs spsr. */
8027 /* Not accessible in user mode. */
8028 if (IS_USER(s) || IS_M(env))
8029 goto illegal_op;
8030 tmp = load_cpu_field(spsr);
8031 store_reg(s, rd, tmp);
8032 break;
8035 } else {
8036 /* Conditional branch. */
8037 op = (insn >> 22) & 0xf;
8038 /* Generate a conditional jump to next instruction. */
8039 s->condlabel = gen_new_label();
8040 gen_test_cc(op ^ 1, s->condlabel);
8041 s->condjmp = 1;
8043 /* offset[11:1] = insn[10:0] */
8044 offset = (insn & 0x7ff) << 1;
8045 /* offset[17:12] = insn[21:16]. */
8046 offset |= (insn & 0x003f0000) >> 4;
8047 /* offset[31:20] = insn[26]. */
8048 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8049 /* offset[18] = insn[13]. */
8050 offset |= (insn & (1 << 13)) << 5;
8051 /* offset[19] = insn[11]. */
8052 offset |= (insn & (1 << 11)) << 8;
8054 /* jump to the offset */
8055 gen_jmp(s, s->pc + offset);
8057 } else {
8058 /* Data processing immediate. */
8059 if (insn & (1 << 25)) {
8060 if (insn & (1 << 24)) {
8061 if (insn & (1 << 20))
8062 goto illegal_op;
8063 /* Bitfield/Saturate. */
8064 op = (insn >> 21) & 7;
8065 imm = insn & 0x1f;
8066 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8067 if (rn == 15) {
8068 tmp = new_tmp();
8069 tcg_gen_movi_i32(tmp, 0);
8070 } else {
8071 tmp = load_reg(s, rn);
8073 switch (op) {
8074 case 2: /* Signed bitfield extract. */
8075 imm++;
8076 if (shift + imm > 32)
8077 goto illegal_op;
8078 if (imm < 32)
8079 gen_sbfx(tmp, shift, imm);
8080 break;
8081 case 6: /* Unsigned bitfield extract. */
8082 imm++;
8083 if (shift + imm > 32)
8084 goto illegal_op;
8085 if (imm < 32)
8086 gen_ubfx(tmp, shift, (1u << imm) - 1);
8087 break;
8088 case 3: /* Bitfield insert/clear. */
8089 if (imm < shift)
8090 goto illegal_op;
8091 imm = imm + 1 - shift;
8092 if (imm != 32) {
8093 tmp2 = load_reg(s, rd);
8094 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8095 dead_tmp(tmp2);
8097 break;
8098 case 7:
8099 goto illegal_op;
8100 default: /* Saturate. */
8101 if (shift) {
8102 if (op & 1)
8103 tcg_gen_sari_i32(tmp, tmp, shift);
8104 else
8105 tcg_gen_shli_i32(tmp, tmp, shift);
8107 tmp2 = tcg_const_i32(imm);
8108 if (op & 4) {
8109 /* Unsigned. */
8110 if ((op & 1) && shift == 0)
8111 gen_helper_usat16(tmp, tmp, tmp2);
8112 else
8113 gen_helper_usat(tmp, tmp, tmp2);
8114 } else {
8115 /* Signed. */
8116 if ((op & 1) && shift == 0)
8117 gen_helper_ssat16(tmp, tmp, tmp2);
8118 else
8119 gen_helper_ssat(tmp, tmp, tmp2);
8121 tcg_temp_free_i32(tmp2);
8122 break;
8124 store_reg(s, rd, tmp);
8125 } else {
8126 imm = ((insn & 0x04000000) >> 15)
8127 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8128 if (insn & (1 << 22)) {
8129 /* 16-bit immediate. */
8130 imm |= (insn >> 4) & 0xf000;
8131 if (insn & (1 << 23)) {
8132 /* movt */
8133 tmp = load_reg(s, rd);
8134 tcg_gen_ext16u_i32(tmp, tmp);
8135 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8136 } else {
8137 /* movw */
8138 tmp = new_tmp();
8139 tcg_gen_movi_i32(tmp, imm);
8141 } else {
8142 /* Add/sub 12-bit immediate. */
8143 if (rn == 15) {
8144 offset = s->pc & ~(uint32_t)3;
8145 if (insn & (1 << 23))
8146 offset -= imm;
8147 else
8148 offset += imm;
8149 tmp = new_tmp();
8150 tcg_gen_movi_i32(tmp, offset);
8151 } else {
8152 tmp = load_reg(s, rn);
8153 if (insn & (1 << 23))
8154 tcg_gen_subi_i32(tmp, tmp, imm);
8155 else
8156 tcg_gen_addi_i32(tmp, tmp, imm);
8159 store_reg(s, rd, tmp);
8161 } else {
8162 int shifter_out = 0;
8163 /* modified 12-bit immediate. */
8164 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8165 imm = (insn & 0xff);
8166 switch (shift) {
8167 case 0: /* XY */
8168 /* Nothing to do. */
8169 break;
8170 case 1: /* 00XY00XY */
8171 imm |= imm << 16;
8172 break;
8173 case 2: /* XY00XY00 */
8174 imm |= imm << 16;
8175 imm <<= 8;
8176 break;
8177 case 3: /* XYXYXYXY */
8178 imm |= imm << 16;
8179 imm |= imm << 8;
8180 break;
8181 default: /* Rotated constant. */
8182 shift = (shift << 1) | (imm >> 7);
8183 imm |= 0x80;
8184 imm = imm << (32 - shift);
8185 shifter_out = 1;
8186 break;
8188 tmp2 = new_tmp();
8189 tcg_gen_movi_i32(tmp2, imm);
8190 rn = (insn >> 16) & 0xf;
8191 if (rn == 15) {
8192 tmp = new_tmp();
8193 tcg_gen_movi_i32(tmp, 0);
8194 } else {
8195 tmp = load_reg(s, rn);
8197 op = (insn >> 21) & 0xf;
8198 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8199 shifter_out, tmp, tmp2))
8200 goto illegal_op;
8201 dead_tmp(tmp2);
8202 rd = (insn >> 8) & 0xf;
8203 if (rd != 15) {
8204 store_reg(s, rd, tmp);
8205 } else {
8206 dead_tmp(tmp);
8210 break;
8211 case 12: /* Load/store single data item. */
8213 int postinc = 0;
8214 int writeback = 0;
8215 int user;
8216 if ((insn & 0x01100000) == 0x01000000) {
8217 if (disas_neon_ls_insn(env, s, insn))
8218 goto illegal_op;
8219 break;
8221 user = IS_USER(s);
8222 if (rn == 15) {
8223 addr = new_tmp();
8224 /* PC relative. */
8225 /* s->pc has already been incremented by 4. */
8226 imm = s->pc & 0xfffffffc;
8227 if (insn & (1 << 23))
8228 imm += insn & 0xfff;
8229 else
8230 imm -= insn & 0xfff;
8231 tcg_gen_movi_i32(addr, imm);
8232 } else {
8233 addr = load_reg(s, rn);
8234 if (insn & (1 << 23)) {
8235 /* Positive offset. */
8236 imm = insn & 0xfff;
8237 tcg_gen_addi_i32(addr, addr, imm);
8238 } else {
8239 op = (insn >> 8) & 7;
8240 imm = insn & 0xff;
8241 switch (op) {
8242 case 0: case 8: /* Shifted Register. */
8243 shift = (insn >> 4) & 0xf;
8244 if (shift > 3)
8245 goto illegal_op;
8246 tmp = load_reg(s, rm);
8247 if (shift)
8248 tcg_gen_shli_i32(tmp, tmp, shift);
8249 tcg_gen_add_i32(addr, addr, tmp);
8250 dead_tmp(tmp);
8251 break;
8252 case 4: /* Negative offset. */
8253 tcg_gen_addi_i32(addr, addr, -imm);
8254 break;
8255 case 6: /* User privilege. */
8256 tcg_gen_addi_i32(addr, addr, imm);
8257 user = 1;
8258 break;
8259 case 1: /* Post-decrement. */
8260 imm = -imm;
8261 /* Fall through. */
8262 case 3: /* Post-increment. */
8263 postinc = 1;
8264 writeback = 1;
8265 break;
8266 case 5: /* Pre-decrement. */
8267 imm = -imm;
8268 /* Fall through. */
8269 case 7: /* Pre-increment. */
8270 tcg_gen_addi_i32(addr, addr, imm);
8271 writeback = 1;
8272 break;
8273 default:
8274 goto illegal_op;
8278 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8279 if (insn & (1 << 20)) {
8280 /* Load. */
8281 if (rs == 15 && op != 2) {
8282 if (op & 2)
8283 goto illegal_op;
8284 /* Memory hint. Implemented as NOP. */
8285 } else {
8286 switch (op) {
8287 case 0: tmp = gen_ld8u(addr, user); break;
8288 case 4: tmp = gen_ld8s(addr, user); break;
8289 case 1: tmp = gen_ld16u(addr, user); break;
8290 case 5: tmp = gen_ld16s(addr, user); break;
8291 case 2: tmp = gen_ld32(addr, user); break;
8292 default: goto illegal_op;
8294 if (rs == 15) {
8295 gen_bx(s, tmp);
8296 } else {
8297 store_reg(s, rs, tmp);
8300 } else {
8301 /* Store. */
8302 if (rs == 15)
8303 goto illegal_op;
8304 tmp = load_reg(s, rs);
8305 switch (op) {
8306 case 0: gen_st8(tmp, addr, user); break;
8307 case 1: gen_st16(tmp, addr, user); break;
8308 case 2: gen_st32(tmp, addr, user); break;
8309 default: goto illegal_op;
8312 if (postinc)
8313 tcg_gen_addi_i32(addr, addr, imm);
8314 if (writeback) {
8315 store_reg(s, rn, addr);
8316 } else {
8317 dead_tmp(addr);
8320 break;
8321 default:
8322 goto illegal_op;
8324 return 0;
8325 illegal_op:
8326 return 1;
8329 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8331 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8332 int32_t offset;
8333 int i;
8334 TCGv tmp;
8335 TCGv tmp2;
8336 TCGv addr;
8338 if (s->condexec_mask) {
8339 cond = s->condexec_cond;
8340 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8341 s->condlabel = gen_new_label();
8342 gen_test_cc(cond ^ 1, s->condlabel);
8343 s->condjmp = 1;
8347 insn = lduw_code(s->pc);
8348 s->pc += 2;
8350 switch (insn >> 12) {
8351 case 0: case 1:
8353 rd = insn & 7;
8354 op = (insn >> 11) & 3;
8355 if (op == 3) {
8356 /* add/subtract */
8357 rn = (insn >> 3) & 7;
8358 tmp = load_reg(s, rn);
8359 if (insn & (1 << 10)) {
8360 /* immediate */
8361 tmp2 = new_tmp();
8362 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8363 } else {
8364 /* reg */
8365 rm = (insn >> 6) & 7;
8366 tmp2 = load_reg(s, rm);
8368 if (insn & (1 << 9)) {
8369 if (s->condexec_mask)
8370 tcg_gen_sub_i32(tmp, tmp, tmp2);
8371 else
8372 gen_helper_sub_cc(tmp, tmp, tmp2);
8373 } else {
8374 if (s->condexec_mask)
8375 tcg_gen_add_i32(tmp, tmp, tmp2);
8376 else
8377 gen_helper_add_cc(tmp, tmp, tmp2);
8379 dead_tmp(tmp2);
8380 store_reg(s, rd, tmp);
8381 } else {
8382 /* shift immediate */
8383 rm = (insn >> 3) & 7;
8384 shift = (insn >> 6) & 0x1f;
8385 tmp = load_reg(s, rm);
8386 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8387 if (!s->condexec_mask)
8388 gen_logic_CC(tmp);
8389 store_reg(s, rd, tmp);
8391 break;
8392 case 2: case 3:
8393 /* arithmetic large immediate */
8394 op = (insn >> 11) & 3;
8395 rd = (insn >> 8) & 0x7;
8396 if (op == 0) { /* mov */
8397 tmp = new_tmp();
8398 tcg_gen_movi_i32(tmp, insn & 0xff);
8399 if (!s->condexec_mask)
8400 gen_logic_CC(tmp);
8401 store_reg(s, rd, tmp);
8402 } else {
8403 tmp = load_reg(s, rd);
8404 tmp2 = new_tmp();
8405 tcg_gen_movi_i32(tmp2, insn & 0xff);
8406 switch (op) {
8407 case 1: /* cmp */
8408 gen_helper_sub_cc(tmp, tmp, tmp2);
8409 dead_tmp(tmp);
8410 dead_tmp(tmp2);
8411 break;
8412 case 2: /* add */
8413 if (s->condexec_mask)
8414 tcg_gen_add_i32(tmp, tmp, tmp2);
8415 else
8416 gen_helper_add_cc(tmp, tmp, tmp2);
8417 dead_tmp(tmp2);
8418 store_reg(s, rd, tmp);
8419 break;
8420 case 3: /* sub */
8421 if (s->condexec_mask)
8422 tcg_gen_sub_i32(tmp, tmp, tmp2);
8423 else
8424 gen_helper_sub_cc(tmp, tmp, tmp2);
8425 dead_tmp(tmp2);
8426 store_reg(s, rd, tmp);
8427 break;
8430 break;
8431 case 4:
8432 if (insn & (1 << 11)) {
8433 rd = (insn >> 8) & 7;
8434 /* load pc-relative. Bit 1 of PC is ignored. */
8435 val = s->pc + 2 + ((insn & 0xff) * 4);
8436 val &= ~(uint32_t)2;
8437 addr = new_tmp();
8438 tcg_gen_movi_i32(addr, val);
8439 tmp = gen_ld32(addr, IS_USER(s));
8440 dead_tmp(addr);
8441 store_reg(s, rd, tmp);
8442 break;
8444 if (insn & (1 << 10)) {
8445 /* data processing extended or blx */
8446 rd = (insn & 7) | ((insn >> 4) & 8);
8447 rm = (insn >> 3) & 0xf;
8448 op = (insn >> 8) & 3;
8449 switch (op) {
8450 case 0: /* add */
8451 tmp = load_reg(s, rd);
8452 tmp2 = load_reg(s, rm);
8453 tcg_gen_add_i32(tmp, tmp, tmp2);
8454 dead_tmp(tmp2);
8455 store_reg(s, rd, tmp);
8456 break;
8457 case 1: /* cmp */
8458 tmp = load_reg(s, rd);
8459 tmp2 = load_reg(s, rm);
8460 gen_helper_sub_cc(tmp, tmp, tmp2);
8461 dead_tmp(tmp2);
8462 dead_tmp(tmp);
8463 break;
8464 case 2: /* mov/cpy */
8465 tmp = load_reg(s, rm);
8466 store_reg(s, rd, tmp);
8467 break;
8468 case 3:/* branch [and link] exchange thumb register */
8469 tmp = load_reg(s, rm);
8470 if (insn & (1 << 7)) {
8471 val = (uint32_t)s->pc | 1;
8472 tmp2 = new_tmp();
8473 tcg_gen_movi_i32(tmp2, val);
8474 store_reg(s, 14, tmp2);
8476 gen_bx(s, tmp);
8477 break;
8479 break;
8482 /* data processing register */
8483 rd = insn & 7;
8484 rm = (insn >> 3) & 7;
8485 op = (insn >> 6) & 0xf;
8486 if (op == 2 || op == 3 || op == 4 || op == 7) {
8487 /* the shift/rotate ops want the operands backwards */
8488 val = rm;
8489 rm = rd;
8490 rd = val;
8491 val = 1;
8492 } else {
8493 val = 0;
8496 if (op == 9) { /* neg */
8497 tmp = new_tmp();
8498 tcg_gen_movi_i32(tmp, 0);
8499 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8500 tmp = load_reg(s, rd);
8501 } else {
8502 TCGV_UNUSED(tmp);
8505 tmp2 = load_reg(s, rm);
8506 switch (op) {
8507 case 0x0: /* and */
8508 tcg_gen_and_i32(tmp, tmp, tmp2);
8509 if (!s->condexec_mask)
8510 gen_logic_CC(tmp);
8511 break;
8512 case 0x1: /* eor */
8513 tcg_gen_xor_i32(tmp, tmp, tmp2);
8514 if (!s->condexec_mask)
8515 gen_logic_CC(tmp);
8516 break;
8517 case 0x2: /* lsl */
8518 if (s->condexec_mask) {
8519 gen_helper_shl(tmp2, tmp2, tmp);
8520 } else {
8521 gen_helper_shl_cc(tmp2, tmp2, tmp);
8522 gen_logic_CC(tmp2);
8524 break;
8525 case 0x3: /* lsr */
8526 if (s->condexec_mask) {
8527 gen_helper_shr(tmp2, tmp2, tmp);
8528 } else {
8529 gen_helper_shr_cc(tmp2, tmp2, tmp);
8530 gen_logic_CC(tmp2);
8532 break;
8533 case 0x4: /* asr */
8534 if (s->condexec_mask) {
8535 gen_helper_sar(tmp2, tmp2, tmp);
8536 } else {
8537 gen_helper_sar_cc(tmp2, tmp2, tmp);
8538 gen_logic_CC(tmp2);
8540 break;
8541 case 0x5: /* adc */
8542 if (s->condexec_mask)
8543 gen_adc(tmp, tmp2);
8544 else
8545 gen_helper_adc_cc(tmp, tmp, tmp2);
8546 break;
8547 case 0x6: /* sbc */
8548 if (s->condexec_mask)
8549 gen_sub_carry(tmp, tmp, tmp2);
8550 else
8551 gen_helper_sbc_cc(tmp, tmp, tmp2);
8552 break;
8553 case 0x7: /* ror */
8554 if (s->condexec_mask) {
8555 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8556 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8557 } else {
8558 gen_helper_ror_cc(tmp2, tmp2, tmp);
8559 gen_logic_CC(tmp2);
8561 break;
8562 case 0x8: /* tst */
8563 tcg_gen_and_i32(tmp, tmp, tmp2);
8564 gen_logic_CC(tmp);
8565 rd = 16;
8566 break;
8567 case 0x9: /* neg */
8568 if (s->condexec_mask)
8569 tcg_gen_neg_i32(tmp, tmp2);
8570 else
8571 gen_helper_sub_cc(tmp, tmp, tmp2);
8572 break;
8573 case 0xa: /* cmp */
8574 gen_helper_sub_cc(tmp, tmp, tmp2);
8575 rd = 16;
8576 break;
8577 case 0xb: /* cmn */
8578 gen_helper_add_cc(tmp, tmp, tmp2);
8579 rd = 16;
8580 break;
8581 case 0xc: /* orr */
8582 tcg_gen_or_i32(tmp, tmp, tmp2);
8583 if (!s->condexec_mask)
8584 gen_logic_CC(tmp);
8585 break;
8586 case 0xd: /* mul */
8587 tcg_gen_mul_i32(tmp, tmp, tmp2);
8588 if (!s->condexec_mask)
8589 gen_logic_CC(tmp);
8590 break;
8591 case 0xe: /* bic */
8592 tcg_gen_andc_i32(tmp, tmp, tmp2);
8593 if (!s->condexec_mask)
8594 gen_logic_CC(tmp);
8595 break;
8596 case 0xf: /* mvn */
8597 tcg_gen_not_i32(tmp2, tmp2);
8598 if (!s->condexec_mask)
8599 gen_logic_CC(tmp2);
8600 val = 1;
8601 rm = rd;
8602 break;
8604 if (rd != 16) {
8605 if (val) {
8606 store_reg(s, rm, tmp2);
8607 if (op != 0xf)
8608 dead_tmp(tmp);
8609 } else {
8610 store_reg(s, rd, tmp);
8611 dead_tmp(tmp2);
8613 } else {
8614 dead_tmp(tmp);
8615 dead_tmp(tmp2);
8617 break;
8619 case 5:
8620 /* load/store register offset. */
8621 rd = insn & 7;
8622 rn = (insn >> 3) & 7;
8623 rm = (insn >> 6) & 7;
8624 op = (insn >> 9) & 7;
8625 addr = load_reg(s, rn);
8626 tmp = load_reg(s, rm);
8627 tcg_gen_add_i32(addr, addr, tmp);
8628 dead_tmp(tmp);
8630 if (op < 3) /* store */
8631 tmp = load_reg(s, rd);
8633 switch (op) {
8634 case 0: /* str */
8635 gen_st32(tmp, addr, IS_USER(s));
8636 break;
8637 case 1: /* strh */
8638 gen_st16(tmp, addr, IS_USER(s));
8639 break;
8640 case 2: /* strb */
8641 gen_st8(tmp, addr, IS_USER(s));
8642 break;
8643 case 3: /* ldrsb */
8644 tmp = gen_ld8s(addr, IS_USER(s));
8645 break;
8646 case 4: /* ldr */
8647 tmp = gen_ld32(addr, IS_USER(s));
8648 break;
8649 case 5: /* ldrh */
8650 tmp = gen_ld16u(addr, IS_USER(s));
8651 break;
8652 case 6: /* ldrb */
8653 tmp = gen_ld8u(addr, IS_USER(s));
8654 break;
8655 case 7: /* ldrsh */
8656 tmp = gen_ld16s(addr, IS_USER(s));
8657 break;
8659 if (op >= 3) /* load */
8660 store_reg(s, rd, tmp);
8661 dead_tmp(addr);
8662 break;
8664 case 6:
8665 /* load/store word immediate offset */
8666 rd = insn & 7;
8667 rn = (insn >> 3) & 7;
8668 addr = load_reg(s, rn);
8669 val = (insn >> 4) & 0x7c;
8670 tcg_gen_addi_i32(addr, addr, val);
8672 if (insn & (1 << 11)) {
8673 /* load */
8674 tmp = gen_ld32(addr, IS_USER(s));
8675 store_reg(s, rd, tmp);
8676 } else {
8677 /* store */
8678 tmp = load_reg(s, rd);
8679 gen_st32(tmp, addr, IS_USER(s));
8681 dead_tmp(addr);
8682 break;
8684 case 7:
8685 /* load/store byte immediate offset */
8686 rd = insn & 7;
8687 rn = (insn >> 3) & 7;
8688 addr = load_reg(s, rn);
8689 val = (insn >> 6) & 0x1f;
8690 tcg_gen_addi_i32(addr, addr, val);
8692 if (insn & (1 << 11)) {
8693 /* load */
8694 tmp = gen_ld8u(addr, IS_USER(s));
8695 store_reg(s, rd, tmp);
8696 } else {
8697 /* store */
8698 tmp = load_reg(s, rd);
8699 gen_st8(tmp, addr, IS_USER(s));
8701 dead_tmp(addr);
8702 break;
8704 case 8:
8705 /* load/store halfword immediate offset */
8706 rd = insn & 7;
8707 rn = (insn >> 3) & 7;
8708 addr = load_reg(s, rn);
8709 val = (insn >> 5) & 0x3e;
8710 tcg_gen_addi_i32(addr, addr, val);
8712 if (insn & (1 << 11)) {
8713 /* load */
8714 tmp = gen_ld16u(addr, IS_USER(s));
8715 store_reg(s, rd, tmp);
8716 } else {
8717 /* store */
8718 tmp = load_reg(s, rd);
8719 gen_st16(tmp, addr, IS_USER(s));
8721 dead_tmp(addr);
8722 break;
8724 case 9:
8725 /* load/store from stack */
8726 rd = (insn >> 8) & 7;
8727 addr = load_reg(s, 13);
8728 val = (insn & 0xff) * 4;
8729 tcg_gen_addi_i32(addr, addr, val);
8731 if (insn & (1 << 11)) {
8732 /* load */
8733 tmp = gen_ld32(addr, IS_USER(s));
8734 store_reg(s, rd, tmp);
8735 } else {
8736 /* store */
8737 tmp = load_reg(s, rd);
8738 gen_st32(tmp, addr, IS_USER(s));
8740 dead_tmp(addr);
8741 break;
8743 case 10:
8744 /* add to high reg */
8745 rd = (insn >> 8) & 7;
8746 if (insn & (1 << 11)) {
8747 /* SP */
8748 tmp = load_reg(s, 13);
8749 } else {
8750 /* PC. bit 1 is ignored. */
8751 tmp = new_tmp();
8752 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8754 val = (insn & 0xff) * 4;
8755 tcg_gen_addi_i32(tmp, tmp, val);
8756 store_reg(s, rd, tmp);
8757 break;
8759 case 11:
8760 /* misc */
8761 op = (insn >> 8) & 0xf;
8762 switch (op) {
8763 case 0:
8764 /* adjust stack pointer */
8765 tmp = load_reg(s, 13);
8766 val = (insn & 0x7f) * 4;
8767 if (insn & (1 << 7))
8768 val = -(int32_t)val;
8769 tcg_gen_addi_i32(tmp, tmp, val);
8770 store_reg(s, 13, tmp);
8771 break;
8773 case 2: /* sign/zero extend. */
8774 ARCH(6);
8775 rd = insn & 7;
8776 rm = (insn >> 3) & 7;
8777 tmp = load_reg(s, rm);
8778 switch ((insn >> 6) & 3) {
8779 case 0: gen_sxth(tmp); break;
8780 case 1: gen_sxtb(tmp); break;
8781 case 2: gen_uxth(tmp); break;
8782 case 3: gen_uxtb(tmp); break;
8784 store_reg(s, rd, tmp);
8785 break;
8786 case 4: case 5: case 0xc: case 0xd:
8787 /* push/pop */
8788 addr = load_reg(s, 13);
8789 if (insn & (1 << 8))
8790 offset = 4;
8791 else
8792 offset = 0;
8793 for (i = 0; i < 8; i++) {
8794 if (insn & (1 << i))
8795 offset += 4;
8797 if ((insn & (1 << 11)) == 0) {
8798 tcg_gen_addi_i32(addr, addr, -offset);
8800 for (i = 0; i < 8; i++) {
8801 if (insn & (1 << i)) {
8802 if (insn & (1 << 11)) {
8803 /* pop */
8804 tmp = gen_ld32(addr, IS_USER(s));
8805 store_reg(s, i, tmp);
8806 } else {
8807 /* push */
8808 tmp = load_reg(s, i);
8809 gen_st32(tmp, addr, IS_USER(s));
8811 /* advance to the next address. */
8812 tcg_gen_addi_i32(addr, addr, 4);
8815 TCGV_UNUSED(tmp);
8816 if (insn & (1 << 8)) {
8817 if (insn & (1 << 11)) {
8818 /* pop pc */
8819 tmp = gen_ld32(addr, IS_USER(s));
8820 /* don't set the pc until the rest of the instruction
8821 has completed */
8822 } else {
8823 /* push lr */
8824 tmp = load_reg(s, 14);
8825 gen_st32(tmp, addr, IS_USER(s));
8827 tcg_gen_addi_i32(addr, addr, 4);
8829 if ((insn & (1 << 11)) == 0) {
8830 tcg_gen_addi_i32(addr, addr, -offset);
8832 /* write back the new stack pointer */
8833 store_reg(s, 13, addr);
8834 /* set the new PC value */
8835 if ((insn & 0x0900) == 0x0900)
8836 gen_bx(s, tmp);
8837 break;
8839 case 1: case 3: case 9: case 11: /* czb */
8840 rm = insn & 7;
8841 tmp = load_reg(s, rm);
8842 s->condlabel = gen_new_label();
8843 s->condjmp = 1;
8844 if (insn & (1 << 11))
8845 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8846 else
8847 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8848 dead_tmp(tmp);
8849 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8850 val = (uint32_t)s->pc + 2;
8851 val += offset;
8852 gen_jmp(s, val);
8853 break;
8855 case 15: /* IT, nop-hint. */
8856 if ((insn & 0xf) == 0) {
8857 gen_nop_hint(s, (insn >> 4) & 0xf);
8858 break;
8860 /* If Then. */
8861 s->condexec_cond = (insn >> 4) & 0xe;
8862 s->condexec_mask = insn & 0x1f;
8863 /* No actual code generated for this insn, just setup state. */
8864 break;
8866 case 0xe: /* bkpt */
8867 gen_set_condexec(s);
8868 gen_set_pc_im(s->pc - 2);
8869 gen_exception(EXCP_BKPT);
8870 s->is_jmp = DISAS_JUMP;
8871 break;
8873 case 0xa: /* rev */
8874 ARCH(6);
8875 rn = (insn >> 3) & 0x7;
8876 rd = insn & 0x7;
8877 tmp = load_reg(s, rn);
8878 switch ((insn >> 6) & 3) {
8879 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8880 case 1: gen_rev16(tmp); break;
8881 case 3: gen_revsh(tmp); break;
8882 default: goto illegal_op;
8884 store_reg(s, rd, tmp);
8885 break;
8887 case 6: /* cps */
8888 ARCH(6);
8889 if (IS_USER(s))
8890 break;
8891 if (IS_M(env)) {
8892 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8893 /* PRIMASK */
8894 if (insn & 1) {
8895 addr = tcg_const_i32(16);
8896 gen_helper_v7m_msr(cpu_env, addr, tmp);
8897 tcg_temp_free_i32(addr);
8899 /* FAULTMASK */
8900 if (insn & 2) {
8901 addr = tcg_const_i32(17);
8902 gen_helper_v7m_msr(cpu_env, addr, tmp);
8903 tcg_temp_free_i32(addr);
8905 tcg_temp_free_i32(tmp);
8906 gen_lookup_tb(s);
8907 } else {
8908 if (insn & (1 << 4))
8909 shift = CPSR_A | CPSR_I | CPSR_F;
8910 else
8911 shift = 0;
8912 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8914 break;
8916 default:
8917 goto undef;
8919 break;
8921 case 12:
8922 /* load/store multiple */
8923 rn = (insn >> 8) & 0x7;
8924 addr = load_reg(s, rn);
8925 for (i = 0; i < 8; i++) {
8926 if (insn & (1 << i)) {
8927 if (insn & (1 << 11)) {
8928 /* load */
8929 tmp = gen_ld32(addr, IS_USER(s));
8930 store_reg(s, i, tmp);
8931 } else {
8932 /* store */
8933 tmp = load_reg(s, i);
8934 gen_st32(tmp, addr, IS_USER(s));
8936 /* advance to the next address */
8937 tcg_gen_addi_i32(addr, addr, 4);
8940 /* Base register writeback. */
8941 if ((insn & (1 << rn)) == 0) {
8942 store_reg(s, rn, addr);
8943 } else {
8944 dead_tmp(addr);
8946 break;
8948 case 13:
8949 /* conditional branch or swi */
8950 cond = (insn >> 8) & 0xf;
8951 if (cond == 0xe)
8952 goto undef;
8954 if (cond == 0xf) {
8955 /* swi */
8956 gen_set_condexec(s);
8957 gen_set_pc_im(s->pc);
8958 s->is_jmp = DISAS_SWI;
8959 break;
8961 /* generate a conditional jump to next instruction */
8962 s->condlabel = gen_new_label();
8963 gen_test_cc(cond ^ 1, s->condlabel);
8964 s->condjmp = 1;
8966 /* jump to the offset */
8967 val = (uint32_t)s->pc + 2;
8968 offset = ((int32_t)insn << 24) >> 24;
8969 val += offset << 1;
8970 gen_jmp(s, val);
8971 break;
8973 case 14:
8974 if (insn & (1 << 11)) {
8975 if (disas_thumb2_insn(env, s, insn))
8976 goto undef32;
8977 break;
8979 /* unconditional branch */
8980 val = (uint32_t)s->pc;
8981 offset = ((int32_t)insn << 21) >> 21;
8982 val += (offset << 1) + 2;
8983 gen_jmp(s, val);
8984 break;
8986 case 15:
8987 if (disas_thumb2_insn(env, s, insn))
8988 goto undef32;
8989 break;
8991 return;
8992 undef32:
8993 gen_set_condexec(s);
8994 gen_set_pc_im(s->pc - 4);
8995 gen_exception(EXCP_UDEF);
8996 s->is_jmp = DISAS_JUMP;
8997 return;
8998 illegal_op:
8999 undef:
9000 gen_set_condexec(s);
9001 gen_set_pc_im(s->pc - 2);
9002 gen_exception(EXCP_UDEF);
9003 s->is_jmp = DISAS_JUMP;
9006 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9007 basic block 'tb'. If search_pc is TRUE, also generate PC
9008 information for each intermediate instruction. */
9009 static inline void gen_intermediate_code_internal(CPUState *env,
9010 TranslationBlock *tb,
9011 int search_pc)
9013 DisasContext dc1, *dc = &dc1;
9014 CPUBreakpoint *bp;
9015 uint16_t *gen_opc_end;
9016 int j, lj;
9017 target_ulong pc_start;
9018 uint32_t next_page_start;
9019 int num_insns;
9020 int max_insns;
9022 /* generate intermediate code */
9023 num_temps = 0;
9025 pc_start = tb->pc;
9027 dc->tb = tb;
9029 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9031 dc->is_jmp = DISAS_NEXT;
9032 dc->pc = pc_start;
9033 dc->singlestep_enabled = env->singlestep_enabled;
9034 dc->condjmp = 0;
9035 dc->thumb = env->thumb;
9036 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9037 dc->condexec_cond = env->condexec_bits >> 4;
9038 #if !defined(CONFIG_USER_ONLY)
9039 if (IS_M(env)) {
9040 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9041 } else {
9042 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9044 #endif
9045 cpu_F0s = tcg_temp_new_i32();
9046 cpu_F1s = tcg_temp_new_i32();
9047 cpu_F0d = tcg_temp_new_i64();
9048 cpu_F1d = tcg_temp_new_i64();
9049 cpu_V0 = cpu_F0d;
9050 cpu_V1 = cpu_F1d;
9051 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9052 cpu_M0 = tcg_temp_new_i64();
9053 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9054 lj = -1;
9055 num_insns = 0;
9056 max_insns = tb->cflags & CF_COUNT_MASK;
9057 if (max_insns == 0)
9058 max_insns = CF_COUNT_MASK;
9060 gen_icount_start();
9061 /* Reset the conditional execution bits immediately. This avoids
9062 complications trying to do it at the end of the block. */
9063 if (env->condexec_bits)
9065 TCGv tmp = new_tmp();
9066 tcg_gen_movi_i32(tmp, 0);
9067 store_cpu_field(tmp, condexec_bits);
9069 do {
9070 #ifdef CONFIG_USER_ONLY
9071 /* Intercept jump to the magic kernel page. */
9072 if (dc->pc >= 0xffff0000) {
9073 /* We always get here via a jump, so know we are not in a
9074 conditional execution block. */
9075 gen_exception(EXCP_KERNEL_TRAP);
9076 dc->is_jmp = DISAS_UPDATE;
9077 break;
9079 #else
9080 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9081 /* We always get here via a jump, so know we are not in a
9082 conditional execution block. */
9083 gen_exception(EXCP_EXCEPTION_EXIT);
9084 dc->is_jmp = DISAS_UPDATE;
9085 break;
9087 #endif
9089 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9090 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9091 if (bp->pc == dc->pc) {
9092 gen_set_condexec(dc);
9093 gen_set_pc_im(dc->pc);
9094 gen_exception(EXCP_DEBUG);
9095 dc->is_jmp = DISAS_JUMP;
9096 /* Advance PC so that clearing the breakpoint will
9097 invalidate this TB. */
9098 dc->pc += 2;
9099 goto done_generating;
9100 break;
9104 if (search_pc) {
9105 j = gen_opc_ptr - gen_opc_buf;
9106 if (lj < j) {
9107 lj++;
9108 while (lj < j)
9109 gen_opc_instr_start[lj++] = 0;
9111 gen_opc_pc[lj] = dc->pc;
9112 gen_opc_instr_start[lj] = 1;
9113 gen_opc_icount[lj] = num_insns;
9116 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9117 gen_io_start();
9119 if (env->thumb) {
9120 disas_thumb_insn(env, dc);
9121 if (dc->condexec_mask) {
9122 dc->condexec_cond = (dc->condexec_cond & 0xe)
9123 | ((dc->condexec_mask >> 4) & 1);
9124 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9125 if (dc->condexec_mask == 0) {
9126 dc->condexec_cond = 0;
9129 } else {
9130 disas_arm_insn(env, dc);
9132 if (num_temps) {
9133 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9134 num_temps = 0;
9137 if (dc->condjmp && !dc->is_jmp) {
9138 gen_set_label(dc->condlabel);
9139 dc->condjmp = 0;
9141 /* Translation stops when a conditional branch is encountered.
9142 * Otherwise the subsequent code could get translated several times.
9143 * Also stop translation when a page boundary is reached. This
9144 * ensures prefetch aborts occur at the right place. */
9145 num_insns ++;
9146 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9147 !env->singlestep_enabled &&
9148 !singlestep &&
9149 dc->pc < next_page_start &&
9150 num_insns < max_insns);
9152 if (tb->cflags & CF_LAST_IO) {
9153 if (dc->condjmp) {
9154 /* FIXME: This can theoretically happen with self-modifying
9155 code. */
9156 cpu_abort(env, "IO on conditional branch instruction");
9158 gen_io_end();
9161 /* At this stage dc->condjmp will only be set when the skipped
9162 instruction was a conditional branch or trap, and the PC has
9163 already been written. */
9164 if (unlikely(env->singlestep_enabled)) {
9165 /* Make sure the pc is updated, and raise a debug exception. */
9166 if (dc->condjmp) {
9167 gen_set_condexec(dc);
9168 if (dc->is_jmp == DISAS_SWI) {
9169 gen_exception(EXCP_SWI);
9170 } else {
9171 gen_exception(EXCP_DEBUG);
9173 gen_set_label(dc->condlabel);
9175 if (dc->condjmp || !dc->is_jmp) {
9176 gen_set_pc_im(dc->pc);
9177 dc->condjmp = 0;
9179 gen_set_condexec(dc);
9180 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9181 gen_exception(EXCP_SWI);
9182 } else {
9183 /* FIXME: Single stepping a WFI insn will not halt
9184 the CPU. */
9185 gen_exception(EXCP_DEBUG);
9187 } else {
9188 /* While branches must always occur at the end of an IT block,
9189 there are a few other things that can cause us to terminate
9190 the TB in the middel of an IT block:
9191 - Exception generating instructions (bkpt, swi, undefined).
9192 - Page boundaries.
9193 - Hardware watchpoints.
9194 Hardware breakpoints have already been handled and skip this code.
9196 gen_set_condexec(dc);
9197 switch(dc->is_jmp) {
9198 case DISAS_NEXT:
9199 gen_goto_tb(dc, 1, dc->pc);
9200 break;
9201 default:
9202 case DISAS_JUMP:
9203 case DISAS_UPDATE:
9204 /* indicate that the hash table must be used to find the next TB */
9205 tcg_gen_exit_tb(0);
9206 break;
9207 case DISAS_TB_JUMP:
9208 /* nothing more to generate */
9209 break;
9210 case DISAS_WFI:
9211 gen_helper_wfi();
9212 break;
9213 case DISAS_SWI:
9214 gen_exception(EXCP_SWI);
9215 break;
9217 if (dc->condjmp) {
9218 gen_set_label(dc->condlabel);
9219 gen_set_condexec(dc);
9220 gen_goto_tb(dc, 1, dc->pc);
9221 dc->condjmp = 0;
9225 done_generating:
9226 gen_icount_end(tb, num_insns);
9227 *gen_opc_ptr = INDEX_op_end;
9229 #ifdef DEBUG_DISAS
9230 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9231 qemu_log("----------------\n");
9232 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9233 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9234 qemu_log("\n");
9236 #endif
9237 if (search_pc) {
9238 j = gen_opc_ptr - gen_opc_buf;
9239 lj++;
9240 while (lj <= j)
9241 gen_opc_instr_start[lj++] = 0;
9242 } else {
9243 tb->size = dc->pc - pc_start;
9244 tb->icount = num_insns;
9248 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9250 gen_intermediate_code_internal(env, tb, 0);
9253 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9255 gen_intermediate_code_internal(env, tb, 1);
9258 static const char *cpu_mode_names[16] = {
9259 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9260 "???", "???", "???", "und", "???", "???", "???", "sys"
9263 void cpu_dump_state(CPUState *env, FILE *f,
9264 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9265 int flags)
9267 int i;
9268 #if 0
9269 union {
9270 uint32_t i;
9271 float s;
9272 } s0, s1;
9273 CPU_DoubleU d;
9274 /* ??? This assumes float64 and double have the same layout.
9275 Oh well, it's only debug dumps. */
9276 union {
9277 float64 f64;
9278 double d;
9279 } d0;
9280 #endif
9281 uint32_t psr;
9283 for(i=0;i<16;i++) {
9284 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9285 if ((i % 4) == 3)
9286 cpu_fprintf(f, "\n");
9287 else
9288 cpu_fprintf(f, " ");
9290 psr = cpsr_read(env);
9291 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9292 psr,
9293 psr & (1 << 31) ? 'N' : '-',
9294 psr & (1 << 30) ? 'Z' : '-',
9295 psr & (1 << 29) ? 'C' : '-',
9296 psr & (1 << 28) ? 'V' : '-',
9297 psr & CPSR_T ? 'T' : 'A',
9298 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9300 #if 0
9301 for (i = 0; i < 16; i++) {
9302 d.d = env->vfp.regs[i];
9303 s0.i = d.l.lower;
9304 s1.i = d.l.upper;
9305 d0.f64 = d.d;
9306 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9307 i * 2, (int)s0.i, s0.s,
9308 i * 2 + 1, (int)s1.i, s1.s,
9309 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9310 d0.d);
9312 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9313 #endif
9316 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9317 unsigned long searched_pc, int pc_pos, void *puc)
9319 env->regs[15] = gen_opc_pc[pc_pos];