4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext
{
41 /* Nonzero if this instruction has been conditionally skipped. */
43 /* The label that will be jumped to when the instruction is skipped. */
45 struct TranslationBlock
*tb
;
46 int singlestep_enabled
;
48 #if !defined(CONFIG_USER_ONLY)
53 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) (s->user)
59 #define DISAS_JUMP_NEXT 4
61 #ifdef USE_DIRECT_JUMP
64 #define TBPARAM(x) (long)(x)
67 /* XXX: move that elsewhere */
68 static uint16_t *gen_opc_ptr
;
69 static uint32_t *gen_opparam_ptr
;
74 #define DEF(s, n, copy_size) INDEX_op_ ## s,
82 static GenOpFunc1
*gen_test_cc
[14] = {
99 const uint8_t table_logic_cc
[16] = {
118 static GenOpFunc1
*gen_shift_T1_im
[4] = {
125 static GenOpFunc
*gen_shift_T1_0
[4] = {
132 static GenOpFunc1
*gen_shift_T2_im
[4] = {
139 static GenOpFunc
*gen_shift_T2_0
[4] = {
146 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
147 gen_op_shll_T1_im_cc
,
148 gen_op_shrl_T1_im_cc
,
149 gen_op_sarl_T1_im_cc
,
150 gen_op_rorl_T1_im_cc
,
153 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
160 static GenOpFunc
*gen_shift_T1_T0
[4] = {
167 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
168 gen_op_shll_T1_T0_cc
,
169 gen_op_shrl_T1_T0_cc
,
170 gen_op_sarl_T1_T0_cc
,
171 gen_op_rorl_T1_T0_cc
,
174 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
231 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
270 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
276 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
277 gen_op_shll_T0_im_thumb
,
278 gen_op_shrl_T0_im_thumb
,
279 gen_op_sarl_T0_im_thumb
,
282 static inline void gen_bx(DisasContext
*s
)
284 s
->is_jmp
= DISAS_UPDATE
;
289 #if defined(CONFIG_USER_ONLY)
290 #define gen_ldst(name, s) gen_op_##name##_raw()
292 #define gen_ldst(name, s) do { \
294 gen_op_##name##_user(); \
296 gen_op_##name##_kernel(); \
300 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
305 /* normaly, since we updated PC, we need only to add one insn */
307 val
= (long)s
->pc
+ 2;
309 val
= (long)s
->pc
+ 4;
310 gen_op_movl_TN_im
[t
](val
);
312 gen_op_movl_TN_reg
[t
][reg
]();
316 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
318 gen_movl_TN_reg(s
, reg
, 0);
321 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
323 gen_movl_TN_reg(s
, reg
, 1);
326 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
328 gen_movl_TN_reg(s
, reg
, 2);
331 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
333 gen_op_movl_reg_TN
[t
][reg
]();
335 s
->is_jmp
= DISAS_JUMP
;
339 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
341 gen_movl_reg_TN(s
, reg
, 0);
344 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
346 gen_movl_reg_TN(s
, reg
, 1);
349 /* Force a TB lookup after an instruction that changes the CPU state. */
350 static inline void gen_lookup_tb(DisasContext
*s
)
352 gen_op_movl_T0_im(s
->pc
);
353 gen_movl_reg_T0(s
, 15);
354 s
->is_jmp
= DISAS_UPDATE
;
357 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
359 int val
, rm
, shift
, shiftop
;
361 if (!(insn
& (1 << 25))) {
364 if (!(insn
& (1 << 23)))
367 gen_op_addl_T1_im(val
);
371 shift
= (insn
>> 7) & 0x1f;
372 gen_movl_T2_reg(s
, rm
);
373 shiftop
= (insn
>> 5) & 3;
375 gen_shift_T2_im
[shiftop
](shift
);
376 } else if (shiftop
!= 0) {
377 gen_shift_T2_0
[shiftop
]();
379 if (!(insn
& (1 << 23)))
386 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
391 if (insn
& (1 << 22)) {
393 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
395 if (!(insn
& (1 << 23)))
398 gen_op_addl_T1_im(val
);
402 gen_op_addl_T1_im(extra
);
404 gen_movl_T2_reg(s
, rm
);
405 if (!(insn
& (1 << 23)))
412 #define VFP_OP(name) \
413 static inline void gen_vfp_##name(int dp) \
416 gen_op_vfp_##name##d(); \
418 gen_op_vfp_##name##s(); \
440 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
443 gen_ldst(vfp_ldd
, s
);
445 gen_ldst(vfp_lds
, s
);
448 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
451 gen_ldst(vfp_std
, s
);
453 gen_ldst(vfp_sts
, s
);
457 vfp_reg_offset (int dp
, int reg
)
460 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
462 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
463 + offsetof(CPU_DoubleU
, l
.upper
);
465 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
466 + offsetof(CPU_DoubleU
, l
.lower
);
469 static inline void gen_mov_F0_vreg(int dp
, int reg
)
472 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
474 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
477 static inline void gen_mov_F1_vreg(int dp
, int reg
)
480 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
482 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
485 static inline void gen_mov_vreg_F0(int dp
, int reg
)
488 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
490 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
493 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
494 instruction is not defined. */
495 static int disas_cp15_insn(DisasContext
*s
, uint32_t insn
)
499 /* ??? Some cp15 registers are accessible from userspace. */
503 if ((insn
& 0x0fff0fff) == 0x0e070f90
504 || (insn
& 0x0fff0fff) == 0x0e070f58) {
505 /* Wait for interrupt. */
506 gen_op_movl_T0_im((long)s
->pc
);
507 gen_op_movl_reg_TN
[0][15]();
509 s
->is_jmp
= DISAS_JUMP
;
512 rd
= (insn
>> 12) & 0xf;
513 if (insn
& (1 << 20)) {
514 gen_op_movl_T0_cp15(insn
);
515 /* If the destination register is r15 then sets condition codes. */
517 gen_movl_reg_T0(s
, rd
);
519 gen_movl_T0_reg(s
, rd
);
520 gen_op_movl_cp15_T0(insn
);
526 /* Disassemble a VFP instruction. Returns nonzero if an error occured
527 (ie. an undefined instruction). */
528 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
530 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
533 if (!arm_feature(env
, ARM_FEATURE_VFP
))
536 if ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) == 0) {
537 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
538 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
540 rn
= (insn
>> 16) & 0xf;
541 if (rn
!= 0 && rn
!= 8)
544 dp
= ((insn
& 0xf00) == 0xb00);
545 switch ((insn
>> 24) & 0xf) {
547 if (insn
& (1 << 4)) {
548 /* single register transfer */
549 if ((insn
& 0x6f) != 0x00)
551 rd
= (insn
>> 12) & 0xf;
555 rn
= (insn
>> 16) & 0xf;
556 /* Get the existing value even for arm->vfp moves because
557 we only set half the register. */
558 gen_mov_F0_vreg(1, rn
);
560 if (insn
& (1 << 20)) {
562 if (insn
& (1 << 21))
563 gen_movl_reg_T1(s
, rd
);
565 gen_movl_reg_T0(s
, rd
);
568 if (insn
& (1 << 21))
569 gen_movl_T1_reg(s
, rd
);
571 gen_movl_T0_reg(s
, rd
);
573 gen_mov_vreg_F0(dp
, rn
);
576 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
577 if (insn
& (1 << 20)) {
579 if (insn
& (1 << 21)) {
580 /* system register */
586 case ARM_VFP_FPINST2
:
587 gen_op_vfp_movl_T0_xreg(rn
);
591 gen_op_vfp_movl_T0_fpscr_flags();
593 gen_op_vfp_movl_T0_fpscr();
599 gen_mov_F0_vreg(0, rn
);
603 /* Set the 4 flag bits in the CPSR. */
604 gen_op_movl_cpsr_T0(0xf0000000);
606 gen_movl_reg_T0(s
, rd
);
609 gen_movl_T0_reg(s
, rd
);
610 if (insn
& (1 << 21)) {
612 /* system register */
615 /* Writes are ignored. */
618 gen_op_vfp_movl_fpscr_T0();
622 gen_op_vfp_movl_xreg_T0(rn
);
626 case ARM_VFP_FPINST2
:
627 gen_op_vfp_movl_xreg_T0(rn
);
634 gen_mov_vreg_F0(0, rn
);
639 /* data processing */
640 /* The opcode is in bits 23, 21, 20 and 6. */
641 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
645 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
647 /* rn is register number */
650 rn
= (insn
>> 16) & 0xf;
653 if (op
== 15 && (rn
== 15 || rn
> 17)) {
654 /* Integer or single precision destination. */
655 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
657 if (insn
& (1 << 22))
659 rd
= (insn
>> 12) & 0xf;
662 if (op
== 15 && (rn
== 16 || rn
== 17)) {
663 /* Integer source. */
664 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
671 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
672 if (op
== 15 && rn
== 15) {
673 /* Double precision destination. */
674 if (insn
& (1 << 22))
676 rd
= (insn
>> 12) & 0xf;
678 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
679 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
682 veclen
= env
->vfp
.vec_len
;
683 if (op
== 15 && rn
> 3)
686 /* Shut up compiler warnings. */
697 /* Figure out what type of vector operation this is. */
698 if ((rd
& bank_mask
) == 0) {
703 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
705 delta_d
= env
->vfp
.vec_stride
+ 1;
707 if ((rm
& bank_mask
) == 0) {
708 /* mixed scalar/vector */
717 /* Load the initial operands. */
723 gen_mov_F0_vreg(0, rm
);
728 gen_mov_F0_vreg(dp
, rd
);
729 gen_mov_F1_vreg(dp
, rm
);
733 /* Compare with zero */
734 gen_mov_F0_vreg(dp
, rd
);
738 /* One source operand. */
739 gen_mov_F0_vreg(dp
, rm
);
742 /* Two source operands. */
743 gen_mov_F0_vreg(dp
, rn
);
744 gen_mov_F1_vreg(dp
, rm
);
748 /* Perform the calculation. */
750 case 0: /* mac: fd + (fn * fm) */
752 gen_mov_F1_vreg(dp
, rd
);
755 case 1: /* nmac: fd - (fn * fm) */
758 gen_mov_F1_vreg(dp
, rd
);
761 case 2: /* msc: -fd + (fn * fm) */
763 gen_mov_F1_vreg(dp
, rd
);
766 case 3: /* nmsc: -fd - (fn * fm) */
768 gen_mov_F1_vreg(dp
, rd
);
772 case 4: /* mul: fn * fm */
775 case 5: /* nmul: -(fn * fm) */
779 case 6: /* add: fn + fm */
782 case 7: /* sub: fn - fm */
785 case 8: /* div: fn / fm */
788 case 15: /* extension space */
815 case 15: /* single<->double conversion */
830 case 25: /* ftouiz */
836 case 27: /* ftosiz */
839 default: /* undefined */
840 printf ("rn:%d\n", rn
);
844 default: /* undefined */
845 printf ("op:%d\n", op
);
849 /* Write back the result. */
850 if (op
== 15 && (rn
>= 8 && rn
<= 11))
851 ; /* Comparison, do nothing. */
852 else if (op
== 15 && rn
> 17)
853 /* Integer result. */
854 gen_mov_vreg_F0(0, rd
);
855 else if (op
== 15 && rn
== 15)
857 gen_mov_vreg_F0(!dp
, rd
);
859 gen_mov_vreg_F0(dp
, rd
);
861 /* break out of the loop if we have finished */
865 if (op
== 15 && delta_m
== 0) {
866 /* single source one-many */
868 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
870 gen_mov_vreg_F0(dp
, rd
);
874 /* Setup the next operands. */
876 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
880 /* One source operand. */
881 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
883 gen_mov_F0_vreg(dp
, rm
);
885 /* Two source operands. */
886 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
888 gen_mov_F0_vreg(dp
, rn
);
890 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
892 gen_mov_F1_vreg(dp
, rm
);
900 if (dp
&& (insn
& (1 << 22))) {
901 /* two-register transfer */
902 rn
= (insn
>> 16) & 0xf;
903 rd
= (insn
>> 12) & 0xf;
909 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
911 if (insn
& (1 << 20)) {
914 gen_mov_F0_vreg(1, rm
);
916 gen_movl_reg_T0(s
, rd
);
917 gen_movl_reg_T1(s
, rn
);
919 gen_mov_F0_vreg(0, rm
);
921 gen_movl_reg_T0(s
, rn
);
922 gen_mov_F0_vreg(0, rm
+ 1);
924 gen_movl_reg_T0(s
, rd
);
929 gen_movl_T0_reg(s
, rd
);
930 gen_movl_T1_reg(s
, rn
);
932 gen_mov_vreg_F0(1, rm
);
934 gen_movl_T0_reg(s
, rn
);
936 gen_mov_vreg_F0(0, rm
);
937 gen_movl_T0_reg(s
, rd
);
939 gen_mov_vreg_F0(0, rm
+ 1);
944 rn
= (insn
>> 16) & 0xf;
946 rd
= (insn
>> 12) & 0xf;
948 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
949 gen_movl_T1_reg(s
, rn
);
950 if ((insn
& 0x01200000) == 0x01000000) {
951 /* Single load/store */
952 offset
= (insn
& 0xff) << 2;
953 if ((insn
& (1 << 23)) == 0)
955 gen_op_addl_T1_im(offset
);
956 if (insn
& (1 << 20)) {
958 gen_mov_vreg_F0(dp
, rd
);
960 gen_mov_F0_vreg(dp
, rd
);
964 /* load/store multiple */
966 n
= (insn
>> 1) & 0x7f;
970 if (insn
& (1 << 24)) /* pre-decrement */
971 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
977 for (i
= 0; i
< n
; i
++) {
978 if (insn
& (1 << 20)) {
981 gen_mov_vreg_F0(dp
, rd
+ i
);
984 gen_mov_F0_vreg(dp
, rd
+ i
);
987 gen_op_addl_T1_im(offset
);
989 if (insn
& (1 << 21)) {
991 if (insn
& (1 << 24))
992 offset
= -offset
* n
;
993 else if (dp
&& (insn
& 1))
999 gen_op_addl_T1_im(offset
);
1000 gen_movl_reg_T1(s
, rn
);
1006 /* Should never happen. */
1012 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
1014 TranslationBlock
*tb
;
1017 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
1019 gen_op_goto_tb0(TBPARAM(tb
));
1021 gen_op_goto_tb1(TBPARAM(tb
));
1022 gen_op_movl_T0_im(dest
);
1023 gen_op_movl_r15_T0();
1024 gen_op_movl_T0_im((long)tb
+ n
);
1027 gen_op_movl_T0_im(dest
);
1028 gen_op_movl_r15_T0();
1034 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
1036 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
1037 /* An indirect jump so that we still trigger the debug exception. */
1040 gen_op_movl_T0_im(dest
);
1043 gen_goto_tb(s
, 0, dest
);
1044 s
->is_jmp
= DISAS_TB_JUMP
;
1048 static inline void gen_mulxy(int x
, int y
)
1051 gen_op_sarl_T0_im(16);
1055 gen_op_sarl_T1_im(16);
1061 /* Return the mask of PSR bits set by a MSR instruction. */
1062 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
) {
1066 if (flags
& (1 << 0))
1068 if (flags
& (1 << 1))
1070 if (flags
& (1 << 2))
1072 if (flags
& (1 << 3))
1074 /* Mask out undefined bits. */
1076 /* Mask out state bits. */
1078 mask
&= ~0x01000020;
1079 /* Mask out privileged bits. */
1085 /* Returns nonzero if access to the PSR is not permitted. */
1086 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
1089 /* ??? This is also undefined in system mode. */
1092 gen_op_movl_spsr_T0(mask
);
1094 gen_op_movl_cpsr_T0(mask
);
1100 static void gen_exception_return(DisasContext
*s
)
1102 gen_op_movl_reg_TN
[0][15]();
1103 gen_op_movl_T0_spsr();
1104 gen_op_movl_cpsr_T0(0xffffffff);
1105 s
->is_jmp
= DISAS_UPDATE
;
1108 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
1110 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
1112 insn
= ldl_code(s
->pc
);
1117 /* Unconditional instructions. */
1118 if ((insn
& 0x0d70f000) == 0x0550f000)
1120 else if ((insn
& 0x0e000000) == 0x0a000000) {
1121 /* branch link and change to thumb (blx <offset>) */
1124 val
= (uint32_t)s
->pc
;
1125 gen_op_movl_T0_im(val
);
1126 gen_movl_reg_T0(s
, 14);
1127 /* Sign-extend the 24-bit offset */
1128 offset
= (((int32_t)insn
) << 8) >> 8;
1129 /* offset * 4 + bit24 * 2 + (thumb bit) */
1130 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
1131 /* pipeline offset */
1133 gen_op_movl_T0_im(val
);
1136 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
1137 /* Coprocessor double register transfer. */
1138 } else if ((insn
& 0x0f000010) == 0x0e000010) {
1139 /* Additional coprocessor register transfer. */
1140 } else if ((insn
& 0x0ff10010) == 0x01000000) {
1141 /* cps (privileged) */
1142 } else if ((insn
& 0x0ffffdff) == 0x01010000) {
1144 if (insn
& (1 << 9)) {
1145 /* BE8 mode not implemented. */
1153 /* if not always execute, we generate a conditional jump to
1155 s
->condlabel
= gen_new_label();
1156 gen_test_cc
[cond
^ 1](s
->condlabel
);
1158 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1159 //s->is_jmp = DISAS_JUMP_NEXT;
1161 if ((insn
& 0x0f900000) == 0x03000000) {
1162 if ((insn
& 0x0fb0f000) != 0x0320f000)
1164 /* CPSR = immediate */
1166 shift
= ((insn
>> 8) & 0xf) * 2;
1168 val
= (val
>> shift
) | (val
<< (32 - shift
));
1169 gen_op_movl_T0_im(val
);
1170 i
= ((insn
& (1 << 22)) != 0);
1171 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
1173 } else if ((insn
& 0x0f900000) == 0x01000000
1174 && (insn
& 0x00000090) != 0x00000090) {
1175 /* miscellaneous instructions */
1176 op1
= (insn
>> 21) & 3;
1177 sh
= (insn
>> 4) & 0xf;
1180 case 0x0: /* move program status register */
1183 gen_movl_T0_reg(s
, rm
);
1184 i
= ((op1
& 2) != 0);
1185 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
1189 rd
= (insn
>> 12) & 0xf;
1193 gen_op_movl_T0_spsr();
1195 gen_op_movl_T0_cpsr();
1197 gen_movl_reg_T0(s
, rd
);
1202 /* branch/exchange thumb (bx). */
1203 gen_movl_T0_reg(s
, rm
);
1205 } else if (op1
== 3) {
1207 rd
= (insn
>> 12) & 0xf;
1208 gen_movl_T0_reg(s
, rm
);
1210 gen_movl_reg_T0(s
, rd
);
1218 /* Trivial implementation equivalent to bx. */
1219 gen_movl_T0_reg(s
, rm
);
1229 /* branch link/exchange thumb (blx) */
1230 val
= (uint32_t)s
->pc
;
1231 gen_op_movl_T0_im(val
);
1232 gen_movl_reg_T0(s
, 14);
1233 gen_movl_T0_reg(s
, rm
);
1236 case 0x5: /* saturating add/subtract */
1237 rd
= (insn
>> 12) & 0xf;
1238 rn
= (insn
>> 16) & 0xf;
1239 gen_movl_T0_reg(s
, rm
);
1240 gen_movl_T1_reg(s
, rn
);
1242 gen_op_double_T1_saturate();
1244 gen_op_subl_T0_T1_saturate();
1246 gen_op_addl_T0_T1_saturate();
1247 gen_movl_reg_T0(s
, rd
);
1250 gen_op_movl_T0_im((long)s
->pc
- 4);
1251 gen_op_movl_reg_TN
[0][15]();
1253 s
->is_jmp
= DISAS_JUMP
;
1255 case 0x8: /* signed multiply */
1259 rs
= (insn
>> 8) & 0xf;
1260 rn
= (insn
>> 12) & 0xf;
1261 rd
= (insn
>> 16) & 0xf;
1263 /* (32 * 16) >> 16 */
1264 gen_movl_T0_reg(s
, rm
);
1265 gen_movl_T1_reg(s
, rs
);
1267 gen_op_sarl_T1_im(16);
1270 gen_op_imulw_T0_T1();
1271 if ((sh
& 2) == 0) {
1272 gen_movl_T1_reg(s
, rn
);
1273 gen_op_addl_T0_T1_setq();
1275 gen_movl_reg_T0(s
, rd
);
1278 gen_movl_T0_reg(s
, rm
);
1279 gen_movl_T1_reg(s
, rs
);
1280 gen_mulxy(sh
& 2, sh
& 4);
1282 gen_op_signbit_T1_T0();
1283 gen_op_addq_T0_T1(rn
, rd
);
1284 gen_movl_reg_T0(s
, rn
);
1285 gen_movl_reg_T1(s
, rd
);
1288 gen_movl_T1_reg(s
, rn
);
1289 gen_op_addl_T0_T1_setq();
1291 gen_movl_reg_T0(s
, rd
);
1298 } else if (((insn
& 0x0e000000) == 0 &&
1299 (insn
& 0x00000090) != 0x90) ||
1300 ((insn
& 0x0e000000) == (1 << 25))) {
1301 int set_cc
, logic_cc
, shiftop
;
1303 op1
= (insn
>> 21) & 0xf;
1304 set_cc
= (insn
>> 20) & 1;
1305 logic_cc
= table_logic_cc
[op1
] & set_cc
;
1307 /* data processing instruction */
1308 if (insn
& (1 << 25)) {
1309 /* immediate operand */
1311 shift
= ((insn
>> 8) & 0xf) * 2;
1313 val
= (val
>> shift
) | (val
<< (32 - shift
));
1314 gen_op_movl_T1_im(val
);
1315 if (logic_cc
&& shift
)
1320 gen_movl_T1_reg(s
, rm
);
1321 shiftop
= (insn
>> 5) & 3;
1322 if (!(insn
& (1 << 4))) {
1323 shift
= (insn
>> 7) & 0x1f;
1326 gen_shift_T1_im_cc
[shiftop
](shift
);
1328 gen_shift_T1_im
[shiftop
](shift
);
1330 } else if (shiftop
!= 0) {
1332 gen_shift_T1_0_cc
[shiftop
]();
1334 gen_shift_T1_0
[shiftop
]();
1338 rs
= (insn
>> 8) & 0xf;
1339 gen_movl_T0_reg(s
, rs
);
1341 gen_shift_T1_T0_cc
[shiftop
]();
1343 gen_shift_T1_T0
[shiftop
]();
1347 if (op1
!= 0x0f && op1
!= 0x0d) {
1348 rn
= (insn
>> 16) & 0xf;
1349 gen_movl_T0_reg(s
, rn
);
1351 rd
= (insn
>> 12) & 0xf;
1354 gen_op_andl_T0_T1();
1355 gen_movl_reg_T0(s
, rd
);
1357 gen_op_logic_T0_cc();
1360 gen_op_xorl_T0_T1();
1361 gen_movl_reg_T0(s
, rd
);
1363 gen_op_logic_T0_cc();
1366 if (set_cc
&& rd
== 15) {
1367 /* SUBS r15, ... is used for exception return. */
1370 gen_op_subl_T0_T1_cc();
1371 gen_exception_return(s
);
1374 gen_op_subl_T0_T1_cc();
1376 gen_op_subl_T0_T1();
1377 gen_movl_reg_T0(s
, rd
);
1382 gen_op_rsbl_T0_T1_cc();
1384 gen_op_rsbl_T0_T1();
1385 gen_movl_reg_T0(s
, rd
);
1389 gen_op_addl_T0_T1_cc();
1391 gen_op_addl_T0_T1();
1392 gen_movl_reg_T0(s
, rd
);
1396 gen_op_adcl_T0_T1_cc();
1398 gen_op_adcl_T0_T1();
1399 gen_movl_reg_T0(s
, rd
);
1403 gen_op_sbcl_T0_T1_cc();
1405 gen_op_sbcl_T0_T1();
1406 gen_movl_reg_T0(s
, rd
);
1410 gen_op_rscl_T0_T1_cc();
1412 gen_op_rscl_T0_T1();
1413 gen_movl_reg_T0(s
, rd
);
1417 gen_op_andl_T0_T1();
1418 gen_op_logic_T0_cc();
1423 gen_op_xorl_T0_T1();
1424 gen_op_logic_T0_cc();
1429 gen_op_subl_T0_T1_cc();
1434 gen_op_addl_T0_T1_cc();
1439 gen_movl_reg_T0(s
, rd
);
1441 gen_op_logic_T0_cc();
1444 if (logic_cc
&& rd
== 15) {
1445 /* MOVS r15, ... is used for exception return. */
1448 gen_op_movl_T0_T1();
1449 gen_exception_return(s
);
1451 gen_movl_reg_T1(s
, rd
);
1453 gen_op_logic_T1_cc();
1457 gen_op_bicl_T0_T1();
1458 gen_movl_reg_T0(s
, rd
);
1460 gen_op_logic_T0_cc();
1465 gen_movl_reg_T1(s
, rd
);
1467 gen_op_logic_T1_cc();
1471 /* other instructions */
1472 op1
= (insn
>> 24) & 0xf;
1476 /* multiplies, extra load/stores */
1477 sh
= (insn
>> 5) & 3;
1480 rd
= (insn
>> 16) & 0xf;
1481 rn
= (insn
>> 12) & 0xf;
1482 rs
= (insn
>> 8) & 0xf;
1484 if (((insn
>> 22) & 3) == 0) {
1486 gen_movl_T0_reg(s
, rs
);
1487 gen_movl_T1_reg(s
, rm
);
1489 if (insn
& (1 << 21)) {
1490 gen_movl_T1_reg(s
, rn
);
1491 gen_op_addl_T0_T1();
1493 if (insn
& (1 << 20))
1494 gen_op_logic_T0_cc();
1495 gen_movl_reg_T0(s
, rd
);
1498 gen_movl_T0_reg(s
, rs
);
1499 gen_movl_T1_reg(s
, rm
);
1500 if (insn
& (1 << 22))
1501 gen_op_imull_T0_T1();
1503 gen_op_mull_T0_T1();
1504 if (insn
& (1 << 21)) /* mult accumulate */
1505 gen_op_addq_T0_T1(rn
, rd
);
1506 if (!(insn
& (1 << 23))) { /* double accumulate */
1508 gen_op_addq_lo_T0_T1(rn
);
1509 gen_op_addq_lo_T0_T1(rd
);
1511 if (insn
& (1 << 20))
1513 gen_movl_reg_T0(s
, rn
);
1514 gen_movl_reg_T1(s
, rd
);
1517 rn
= (insn
>> 16) & 0xf;
1518 rd
= (insn
>> 12) & 0xf;
1519 if (insn
& (1 << 23)) {
1520 /* load/store exclusive */
1523 /* SWP instruction */
1526 gen_movl_T0_reg(s
, rm
);
1527 gen_movl_T1_reg(s
, rn
);
1528 if (insn
& (1 << 22)) {
1533 gen_movl_reg_T0(s
, rd
);
1538 /* Misc load/store */
1539 rn
= (insn
>> 16) & 0xf;
1540 rd
= (insn
>> 12) & 0xf;
1541 gen_movl_T1_reg(s
, rn
);
1542 if (insn
& (1 << 24))
1543 gen_add_datah_offset(s
, insn
, 0);
1545 if (insn
& (1 << 20)) {
1559 gen_movl_reg_T0(s
, rd
);
1560 } else if (sh
& 2) {
1564 gen_movl_T0_reg(s
, rd
);
1566 gen_op_addl_T1_im(4);
1567 gen_movl_T0_reg(s
, rd
+ 1);
1572 gen_movl_reg_T0(s
, rd
);
1573 gen_op_addl_T1_im(4);
1575 gen_movl_reg_T0(s
, rd
+ 1);
1577 address_offset
= -4;
1580 gen_movl_T0_reg(s
, rd
);
1583 if (!(insn
& (1 << 24))) {
1584 gen_add_datah_offset(s
, insn
, address_offset
);
1585 gen_movl_reg_T1(s
, rn
);
1586 } else if (insn
& (1 << 21)) {
1588 gen_op_addl_T1_im(address_offset
);
1589 gen_movl_reg_T1(s
, rn
);
1597 /* Check for undefined extension instructions
1598 * per the ARM Bible IE:
1599 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
1601 sh
= (0xf << 20) | (0xf << 4);
1602 if (op1
== 0x7 && ((insn
& sh
) == sh
))
1606 /* load/store byte/word */
1607 rn
= (insn
>> 16) & 0xf;
1608 rd
= (insn
>> 12) & 0xf;
1609 gen_movl_T1_reg(s
, rn
);
1610 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
1611 if (insn
& (1 << 24))
1612 gen_add_data_offset(s
, insn
);
1613 if (insn
& (1 << 20)) {
1615 #if defined(CONFIG_USER_ONLY)
1616 if (insn
& (1 << 22))
1621 if (insn
& (1 << 22)) {
1625 gen_op_ldub_kernel();
1630 gen_op_ldl_kernel();
1636 gen_movl_reg_T0(s
, rd
);
1639 gen_movl_T0_reg(s
, rd
);
1640 #if defined(CONFIG_USER_ONLY)
1641 if (insn
& (1 << 22))
1646 if (insn
& (1 << 22)) {
1650 gen_op_stb_kernel();
1655 gen_op_stl_kernel();
1659 if (!(insn
& (1 << 24))) {
1660 gen_add_data_offset(s
, insn
);
1661 gen_movl_reg_T1(s
, rn
);
1662 } else if (insn
& (1 << 21))
1663 gen_movl_reg_T1(s
, rn
); {
1669 int j
, n
, user
, loaded_base
;
1670 /* load/store multiple words */
1671 /* XXX: store correct base if write back */
1673 if (insn
& (1 << 22)) {
1675 goto illegal_op
; /* only usable in supervisor mode */
1677 if ((insn
& (1 << 15)) == 0)
1680 rn
= (insn
>> 16) & 0xf;
1681 gen_movl_T1_reg(s
, rn
);
1683 /* compute total size */
1687 if (insn
& (1 << i
))
1690 /* XXX: test invalid n == 0 case ? */
1691 if (insn
& (1 << 23)) {
1692 if (insn
& (1 << 24)) {
1694 gen_op_addl_T1_im(4);
1696 /* post increment */
1699 if (insn
& (1 << 24)) {
1701 gen_op_addl_T1_im(-(n
* 4));
1703 /* post decrement */
1705 gen_op_addl_T1_im(-((n
- 1) * 4));
1710 if (insn
& (1 << i
)) {
1711 if (insn
& (1 << 20)) {
1717 gen_op_movl_user_T0(i
);
1718 } else if (i
== rn
) {
1719 gen_op_movl_T2_T0();
1722 gen_movl_reg_T0(s
, i
);
1727 /* special case: r15 = PC + 12 */
1728 val
= (long)s
->pc
+ 8;
1729 gen_op_movl_TN_im
[0](val
);
1731 gen_op_movl_T0_user(i
);
1733 gen_movl_T0_reg(s
, i
);
1738 /* no need to add after the last transfer */
1740 gen_op_addl_T1_im(4);
1743 if (insn
& (1 << 21)) {
1745 if (insn
& (1 << 23)) {
1746 if (insn
& (1 << 24)) {
1749 /* post increment */
1750 gen_op_addl_T1_im(4);
1753 if (insn
& (1 << 24)) {
1756 gen_op_addl_T1_im(-((n
- 1) * 4));
1758 /* post decrement */
1759 gen_op_addl_T1_im(-(n
* 4));
1762 gen_movl_reg_T1(s
, rn
);
1765 gen_op_movl_T0_T2();
1766 gen_movl_reg_T0(s
, rn
);
1768 if ((insn
& (1 << 22)) && !user
) {
1769 /* Restore CPSR from SPSR. */
1770 gen_op_movl_T0_spsr();
1771 gen_op_movl_cpsr_T0(0xffffffff);
1772 s
->is_jmp
= DISAS_UPDATE
;
1781 /* branch (and link) */
1782 val
= (int32_t)s
->pc
;
1783 if (insn
& (1 << 24)) {
1784 gen_op_movl_T0_im(val
);
1785 gen_op_movl_reg_TN
[0][14]();
1787 offset
= (((int32_t)insn
<< 8) >> 8);
1788 val
+= (offset
<< 2) + 4;
1796 op1
= (insn
>> 8) & 0xf;
1800 if (disas_vfp_insn (env
, s
, insn
))
1804 if (disas_cp15_insn (s
, insn
))
1808 /* unknown coprocessor. */
1814 gen_op_movl_T0_im((long)s
->pc
);
1815 gen_op_movl_reg_TN
[0][15]();
1817 s
->is_jmp
= DISAS_JUMP
;
1821 gen_op_movl_T0_im((long)s
->pc
- 4);
1822 gen_op_movl_reg_TN
[0][15]();
1823 gen_op_undef_insn();
1824 s
->is_jmp
= DISAS_JUMP
;
1830 static void disas_thumb_insn(DisasContext
*s
)
1832 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
1836 insn
= lduw_code(s
->pc
);
1839 switch (insn
>> 12) {
1842 op
= (insn
>> 11) & 3;
1845 rn
= (insn
>> 3) & 7;
1846 gen_movl_T0_reg(s
, rn
);
1847 if (insn
& (1 << 10)) {
1849 gen_op_movl_T1_im((insn
>> 6) & 7);
1852 rm
= (insn
>> 6) & 7;
1853 gen_movl_T1_reg(s
, rm
);
1855 if (insn
& (1 << 9))
1856 gen_op_subl_T0_T1_cc();
1858 gen_op_addl_T0_T1_cc();
1859 gen_movl_reg_T0(s
, rd
);
1861 /* shift immediate */
1862 rm
= (insn
>> 3) & 7;
1863 shift
= (insn
>> 6) & 0x1f;
1864 gen_movl_T0_reg(s
, rm
);
1865 gen_shift_T0_im_thumb
[op
](shift
);
1866 gen_movl_reg_T0(s
, rd
);
1870 /* arithmetic large immediate */
1871 op
= (insn
>> 11) & 3;
1872 rd
= (insn
>> 8) & 0x7;
1874 gen_op_movl_T0_im(insn
& 0xff);
1876 gen_movl_T0_reg(s
, rd
);
1877 gen_op_movl_T1_im(insn
& 0xff);
1881 gen_op_logic_T0_cc();
1884 gen_op_subl_T0_T1_cc();
1887 gen_op_addl_T0_T1_cc();
1890 gen_op_subl_T0_T1_cc();
1894 gen_movl_reg_T0(s
, rd
);
1897 if (insn
& (1 << 11)) {
1898 rd
= (insn
>> 8) & 7;
1899 /* load pc-relative. Bit 1 of PC is ignored. */
1900 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
1901 val
&= ~(uint32_t)2;
1902 gen_op_movl_T1_im(val
);
1904 gen_movl_reg_T0(s
, rd
);
1907 if (insn
& (1 << 10)) {
1908 /* data processing extended or blx */
1909 rd
= (insn
& 7) | ((insn
>> 4) & 8);
1910 rm
= (insn
>> 3) & 0xf;
1911 op
= (insn
>> 8) & 3;
1914 gen_movl_T0_reg(s
, rd
);
1915 gen_movl_T1_reg(s
, rm
);
1916 gen_op_addl_T0_T1();
1917 gen_movl_reg_T0(s
, rd
);
1920 gen_movl_T0_reg(s
, rd
);
1921 gen_movl_T1_reg(s
, rm
);
1922 gen_op_subl_T0_T1_cc();
1924 case 2: /* mov/cpy */
1925 gen_movl_T0_reg(s
, rm
);
1926 gen_movl_reg_T0(s
, rd
);
1928 case 3:/* branch [and link] exchange thumb register */
1929 if (insn
& (1 << 7)) {
1930 val
= (uint32_t)s
->pc
| 1;
1931 gen_op_movl_T1_im(val
);
1932 gen_movl_reg_T1(s
, 14);
1934 gen_movl_T0_reg(s
, rm
);
1941 /* data processing register */
1943 rm
= (insn
>> 3) & 7;
1944 op
= (insn
>> 6) & 0xf;
1945 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
1946 /* the shift/rotate ops want the operands backwards */
1955 if (op
== 9) /* neg */
1956 gen_op_movl_T0_im(0);
1957 else if (op
!= 0xf) /* mvn doesn't read its first operand */
1958 gen_movl_T0_reg(s
, rd
);
1960 gen_movl_T1_reg(s
, rm
);
1963 gen_op_andl_T0_T1();
1964 gen_op_logic_T0_cc();
1967 gen_op_xorl_T0_T1();
1968 gen_op_logic_T0_cc();
1971 gen_op_shll_T1_T0_cc();
1972 gen_op_logic_T1_cc();
1975 gen_op_shrl_T1_T0_cc();
1976 gen_op_logic_T1_cc();
1979 gen_op_sarl_T1_T0_cc();
1980 gen_op_logic_T1_cc();
1983 gen_op_adcl_T0_T1_cc();
1986 gen_op_sbcl_T0_T1_cc();
1989 gen_op_rorl_T1_T0_cc();
1990 gen_op_logic_T1_cc();
1993 gen_op_andl_T0_T1();
1994 gen_op_logic_T0_cc();
1998 gen_op_subl_T0_T1_cc();
2001 gen_op_subl_T0_T1_cc();
2005 gen_op_addl_T0_T1_cc();
2010 gen_op_logic_T0_cc();
2013 gen_op_mull_T0_T1();
2014 gen_op_logic_T0_cc();
2017 gen_op_bicl_T0_T1();
2018 gen_op_logic_T0_cc();
2022 gen_op_logic_T1_cc();
2029 gen_movl_reg_T1(s
, rm
);
2031 gen_movl_reg_T0(s
, rd
);
2036 /* load/store register offset. */
2038 rn
= (insn
>> 3) & 7;
2039 rm
= (insn
>> 6) & 7;
2040 op
= (insn
>> 9) & 7;
2041 gen_movl_T1_reg(s
, rn
);
2042 gen_movl_T2_reg(s
, rm
);
2043 gen_op_addl_T1_T2();
2045 if (op
< 3) /* store */
2046 gen_movl_T0_reg(s
, rd
);
2074 if (op
>= 3) /* load */
2075 gen_movl_reg_T0(s
, rd
);
2079 /* load/store word immediate offset */
2081 rn
= (insn
>> 3) & 7;
2082 gen_movl_T1_reg(s
, rn
);
2083 val
= (insn
>> 4) & 0x7c;
2084 gen_op_movl_T2_im(val
);
2085 gen_op_addl_T1_T2();
2087 if (insn
& (1 << 11)) {
2090 gen_movl_reg_T0(s
, rd
);
2093 gen_movl_T0_reg(s
, rd
);
2099 /* load/store byte immediate offset */
2101 rn
= (insn
>> 3) & 7;
2102 gen_movl_T1_reg(s
, rn
);
2103 val
= (insn
>> 6) & 0x1f;
2104 gen_op_movl_T2_im(val
);
2105 gen_op_addl_T1_T2();
2107 if (insn
& (1 << 11)) {
2110 gen_movl_reg_T0(s
, rd
);
2113 gen_movl_T0_reg(s
, rd
);
2119 /* load/store halfword immediate offset */
2121 rn
= (insn
>> 3) & 7;
2122 gen_movl_T1_reg(s
, rn
);
2123 val
= (insn
>> 5) & 0x3e;
2124 gen_op_movl_T2_im(val
);
2125 gen_op_addl_T1_T2();
2127 if (insn
& (1 << 11)) {
2130 gen_movl_reg_T0(s
, rd
);
2133 gen_movl_T0_reg(s
, rd
);
2139 /* load/store from stack */
2140 rd
= (insn
>> 8) & 7;
2141 gen_movl_T1_reg(s
, 13);
2142 val
= (insn
& 0xff) * 4;
2143 gen_op_movl_T2_im(val
);
2144 gen_op_addl_T1_T2();
2146 if (insn
& (1 << 11)) {
2149 gen_movl_reg_T0(s
, rd
);
2152 gen_movl_T0_reg(s
, rd
);
2158 /* add to high reg */
2159 rd
= (insn
>> 8) & 7;
2160 if (insn
& (1 << 11)) {
2162 gen_movl_T0_reg(s
, 13);
2164 /* PC. bit 1 is ignored. */
2165 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
2167 val
= (insn
& 0xff) * 4;
2168 gen_op_movl_T1_im(val
);
2169 gen_op_addl_T0_T1();
2170 gen_movl_reg_T0(s
, rd
);
2175 op
= (insn
>> 8) & 0xf;
2178 /* adjust stack pointer */
2179 gen_movl_T1_reg(s
, 13);
2180 val
= (insn
& 0x7f) * 4;
2181 if (insn
& (1 << 7))
2182 val
= -(int32_t)val
;
2183 gen_op_movl_T2_im(val
);
2184 gen_op_addl_T1_T2();
2185 gen_movl_reg_T1(s
, 13);
2188 case 4: case 5: case 0xc: case 0xd:
2190 gen_movl_T1_reg(s
, 13);
2191 if (insn
& (1 << 8))
2195 for (i
= 0; i
< 8; i
++) {
2196 if (insn
& (1 << i
))
2199 if ((insn
& (1 << 11)) == 0) {
2200 gen_op_movl_T2_im(-offset
);
2201 gen_op_addl_T1_T2();
2203 gen_op_movl_T2_im(4);
2204 for (i
= 0; i
< 8; i
++) {
2205 if (insn
& (1 << i
)) {
2206 if (insn
& (1 << 11)) {
2209 gen_movl_reg_T0(s
, i
);
2212 gen_movl_T0_reg(s
, i
);
2215 /* advance to the next address. */
2216 gen_op_addl_T1_T2();
2219 if (insn
& (1 << 8)) {
2220 if (insn
& (1 << 11)) {
2223 /* don't set the pc until the rest of the instruction
2227 gen_movl_T0_reg(s
, 14);
2230 gen_op_addl_T1_T2();
2232 if ((insn
& (1 << 11)) == 0) {
2233 gen_op_movl_T2_im(-offset
);
2234 gen_op_addl_T1_T2();
2236 /* write back the new stack pointer */
2237 gen_movl_reg_T1(s
, 13);
2238 /* set the new PC value */
2239 if ((insn
& 0x0900) == 0x0900)
2243 case 0xe: /* bkpt */
2244 gen_op_movl_T0_im((long)s
->pc
- 2);
2245 gen_op_movl_reg_TN
[0][15]();
2247 s
->is_jmp
= DISAS_JUMP
;
2256 /* load/store multiple */
2257 rn
= (insn
>> 8) & 0x7;
2258 gen_movl_T1_reg(s
, rn
);
2259 gen_op_movl_T2_im(4);
2260 for (i
= 0; i
< 8; i
++) {
2261 if (insn
& (1 << i
)) {
2262 if (insn
& (1 << 11)) {
2265 gen_movl_reg_T0(s
, i
);
2268 gen_movl_T0_reg(s
, i
);
2271 /* advance to the next address */
2272 gen_op_addl_T1_T2();
2275 /* Base register writeback. */
2276 if ((insn
& (1 << rn
)) == 0)
2277 gen_movl_reg_T1(s
, rn
);
2281 /* conditional branch or swi */
2282 cond
= (insn
>> 8) & 0xf;
2288 gen_op_movl_T0_im((long)s
->pc
| 1);
2289 /* Don't set r15. */
2290 gen_op_movl_reg_TN
[0][15]();
2292 s
->is_jmp
= DISAS_JUMP
;
2295 /* generate a conditional jump to next instruction */
2296 s
->condlabel
= gen_new_label();
2297 gen_test_cc
[cond
^ 1](s
->condlabel
);
2299 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2300 //s->is_jmp = DISAS_JUMP_NEXT;
2301 gen_movl_T1_reg(s
, 15);
2303 /* jump to the offset */
2304 val
= (uint32_t)s
->pc
+ 2;
2305 offset
= ((int32_t)insn
<< 24) >> 24;
2311 /* unconditional branch */
2312 if (insn
& (1 << 11)) {
2313 /* Second half of blx. */
2314 offset
= ((insn
& 0x7ff) << 1);
2315 gen_movl_T0_reg(s
, 14);
2316 gen_op_movl_T1_im(offset
);
2317 gen_op_addl_T0_T1();
2318 gen_op_movl_T1_im(0xfffffffc);
2319 gen_op_andl_T0_T1();
2321 val
= (uint32_t)s
->pc
;
2322 gen_op_movl_T1_im(val
| 1);
2323 gen_movl_reg_T1(s
, 14);
2327 val
= (uint32_t)s
->pc
;
2328 offset
= ((int32_t)insn
<< 21) >> 21;
2329 val
+= (offset
<< 1) + 2;
2334 /* branch and link [and switch to arm] */
2335 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
2336 /* Instruction spans a page boundary. Implement it as two
2337 16-bit instructions in case the second half causes an
2339 offset
= ((int32_t)insn
<< 21) >> 9;
2340 val
= s
->pc
+ 2 + offset
;
2341 gen_op_movl_T0_im(val
);
2342 gen_movl_reg_T0(s
, 14);
2345 if (insn
& (1 << 11)) {
2346 /* Second half of bl. */
2347 offset
= ((insn
& 0x7ff) << 1) | 1;
2348 gen_movl_T0_reg(s
, 14);
2349 gen_op_movl_T1_im(offset
);
2350 gen_op_addl_T0_T1();
2352 val
= (uint32_t)s
->pc
;
2353 gen_op_movl_T1_im(val
| 1);
2354 gen_movl_reg_T1(s
, 14);
2358 offset
= ((int32_t)insn
<< 21) >> 10;
2359 insn
= lduw_code(s
->pc
);
2360 offset
|= insn
& 0x7ff;
2362 val
= (uint32_t)s
->pc
+ 2;
2363 gen_op_movl_T1_im(val
| 1);
2364 gen_movl_reg_T1(s
, 14);
2367 if (insn
& (1 << 12)) {
2372 val
&= ~(uint32_t)2;
2373 gen_op_movl_T0_im(val
);
2379 gen_op_movl_T0_im((long)s
->pc
- 2);
2380 gen_op_movl_reg_TN
[0][15]();
2381 gen_op_undef_insn();
2382 s
->is_jmp
= DISAS_JUMP
;
2385 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2386 basic block 'tb'. If search_pc is TRUE, also generate PC
2387 information for each intermediate instruction. */
2388 static inline int gen_intermediate_code_internal(CPUState
*env
,
2389 TranslationBlock
*tb
,
2392 DisasContext dc1
, *dc
= &dc1
;
2393 uint16_t *gen_opc_end
;
2395 target_ulong pc_start
;
2396 uint32_t next_page_start
;
2398 /* generate intermediate code */
2403 gen_opc_ptr
= gen_opc_buf
;
2404 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2405 gen_opparam_ptr
= gen_opparam_buf
;
2407 dc
->is_jmp
= DISAS_NEXT
;
2409 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2411 dc
->thumb
= env
->thumb
;
2412 #if !defined(CONFIG_USER_ONLY)
2413 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
2415 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2419 if (env
->nb_breakpoints
> 0) {
2420 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
2421 if (env
->breakpoints
[j
] == dc
->pc
) {
2422 gen_op_movl_T0_im((long)dc
->pc
);
2423 gen_op_movl_reg_TN
[0][15]();
2425 dc
->is_jmp
= DISAS_JUMP
;
2431 j
= gen_opc_ptr
- gen_opc_buf
;
2435 gen_opc_instr_start
[lj
++] = 0;
2437 gen_opc_pc
[lj
] = dc
->pc
;
2438 gen_opc_instr_start
[lj
] = 1;
2442 disas_thumb_insn(dc
);
2444 disas_arm_insn(env
, dc
);
2446 if (dc
->condjmp
&& !dc
->is_jmp
) {
2447 gen_set_label(dc
->condlabel
);
2450 /* Translation stops when a conditional branch is enoutered.
2451 * Otherwise the subsequent code could get translated several times.
2452 * Also stop translation when a page boundary is reached. This
2453 * ensures prefech aborts occur at the right place. */
2454 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
2455 !env
->singlestep_enabled
&&
2456 dc
->pc
< next_page_start
);
2457 /* At this stage dc->condjmp will only be set when the skipped
2458 * instruction was a conditional branch, and the PC has already been
2460 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
2461 /* Make sure the pc is updated, and raise a debug exception. */
2464 gen_set_label(dc
->condlabel
);
2466 if (dc
->condjmp
|| !dc
->is_jmp
) {
2467 gen_op_movl_T0_im((long)dc
->pc
);
2468 gen_op_movl_reg_TN
[0][15]();
2473 switch(dc
->is_jmp
) {
2475 gen_goto_tb(dc
, 1, dc
->pc
);
2480 /* indicate that the hash table must be used to find the next TB */
2485 /* nothing more to generate */
2489 gen_set_label(dc
->condlabel
);
2490 gen_goto_tb(dc
, 1, dc
->pc
);
2494 *gen_opc_ptr
= INDEX_op_end
;
2497 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
2498 fprintf(logfile
, "----------------\n");
2499 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
2500 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
2501 fprintf(logfile
, "\n");
2502 if (loglevel
& (CPU_LOG_TB_OP
)) {
2503 fprintf(logfile
, "OP:\n");
2504 dump_ops(gen_opc_buf
, gen_opparam_buf
);
2505 fprintf(logfile
, "\n");
2510 j
= gen_opc_ptr
- gen_opc_buf
;
2513 gen_opc_instr_start
[lj
++] = 0;
2516 tb
->size
= dc
->pc
- pc_start
;
2521 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
2523 return gen_intermediate_code_internal(env
, tb
, 0);
2526 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
2528 return gen_intermediate_code_internal(env
, tb
, 1);
2531 static const char *cpu_mode_names
[16] = {
2532 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2533 "???", "???", "???", "und", "???", "???", "???", "sys"
2535 void cpu_dump_state(CPUState
*env
, FILE *f
,
2536 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
2545 /* ??? This assumes float64 and double have the same layout.
2546 Oh well, it's only debug dumps. */
2554 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
2556 cpu_fprintf(f
, "\n");
2558 cpu_fprintf(f
, " ");
2560 psr
= cpsr_read(env
);
2561 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2563 psr
& (1 << 31) ? 'N' : '-',
2564 psr
& (1 << 30) ? 'Z' : '-',
2565 psr
& (1 << 29) ? 'C' : '-',
2566 psr
& (1 << 28) ? 'V' : '-',
2567 psr
& CPSR_T
? 'T' : 'A',
2568 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
2570 for (i
= 0; i
< 16; i
++) {
2571 d
.d
= env
->vfp
.regs
[i
];
2575 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
2576 i
* 2, (int)s0
.i
, s0
.s
,
2577 i
* 2 + 1, (int)s1
.i
, s1
.s
,
2578 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
2581 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);