4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
31 typedef struct DisasContext
{
32 struct TranslationBlock
*tb
;
39 int singlestep_enabled
;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
51 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
54 BS_STOP
= 1, /* We want to stop translation for any reason */
55 BS_BRANCH
= 2, /* We reached a branch condition */
56 BS_EXCP
= 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env
;
61 static TCGv cpu_gregs
[24];
62 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
63 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
64 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
65 static TCGv cpu_fregs
[32];
67 /* internal register indexes */
68 static TCGv cpu_flags
, cpu_delayed_pc
;
70 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
72 #include "exec/gen-icount.h"
74 void sh4_translate_init(void)
77 static int done_init
= 0;
78 static const char * const gregnames
[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames
[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
101 for (i
= 0; i
< 24; i
++)
102 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
103 offsetof(CPUSH4State
, gregs
[i
]),
106 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
107 offsetof(CPUSH4State
, pc
), "PC");
108 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
109 offsetof(CPUSH4State
, sr
), "SR");
110 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
111 offsetof(CPUSH4State
, ssr
), "SSR");
112 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
113 offsetof(CPUSH4State
, spc
), "SPC");
114 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
115 offsetof(CPUSH4State
, gbr
), "GBR");
116 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
117 offsetof(CPUSH4State
, vbr
), "VBR");
118 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
119 offsetof(CPUSH4State
, sgr
), "SGR");
120 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
121 offsetof(CPUSH4State
, dbr
), "DBR");
122 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
123 offsetof(CPUSH4State
, mach
), "MACH");
124 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
125 offsetof(CPUSH4State
, macl
), "MACL");
126 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
127 offsetof(CPUSH4State
, pr
), "PR");
128 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
129 offsetof(CPUSH4State
, fpscr
), "FPSCR");
130 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
131 offsetof(CPUSH4State
, fpul
), "FPUL");
133 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, flags
), "_flags_");
135 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
136 offsetof(CPUSH4State
, delayed_pc
),
138 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, ldst
), "_ldst_");
141 for (i
= 0; i
< 32; i
++)
142 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
143 offsetof(CPUSH4State
, fregs
[i
]),
146 /* register helpers */
153 void cpu_dump_state(CPUSH4State
* env
, FILE * f
,
154 int (*cpu_fprintf
) (FILE * f
, const char *fmt
, ...),
158 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
159 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
160 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
161 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
162 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
163 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
164 for (i
= 0; i
< 24; i
+= 4) {
165 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
166 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
167 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
169 if (env
->flags
& DELAY_SLOT
) {
170 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
172 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
173 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
187 static sh4_def_t sh4_defs
[] = {
190 .id
= SH_CPU_SH7750R
,
194 .features
= SH_FEATURE_BCR3_AND_BCR4
,
197 .id
= SH_CPU_SH7751R
,
200 .cvr
= 0x00110000, /* Neutered caches, should be 0x20480000 */
201 .features
= SH_FEATURE_BCR3_AND_BCR4
,
208 .features
= SH_FEATURE_SH4A
,
212 static const sh4_def_t
*cpu_sh4_find_by_name(const char *name
)
216 if (strcasecmp(name
, "any") == 0)
219 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
220 if (strcasecmp(name
, sh4_defs
[i
].name
) == 0)
226 void sh4_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
230 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
231 (*cpu_fprintf
)(f
, "%s\n", sh4_defs
[i
].name
);
234 static void cpu_register(CPUSH4State
*env
, const sh4_def_t
*def
)
242 SuperHCPU
*cpu_sh4_init(const char *cpu_model
)
246 const sh4_def_t
*def
;
248 def
= cpu_sh4_find_by_name(cpu_model
);
251 cpu
= SUPERH_CPU(object_new(TYPE_SUPERH_CPU
));
253 env
->features
= def
->features
;
254 env
->cpu_model_str
= cpu_model
;
255 cpu_register(env
, def
);
257 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
262 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
264 TranslationBlock
*tb
;
267 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
268 !ctx
->singlestep_enabled
) {
269 /* Use a direct jump if in same page and singlestep not enabled */
271 tcg_gen_movi_i32(cpu_pc
, dest
);
272 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
274 tcg_gen_movi_i32(cpu_pc
, dest
);
275 if (ctx
->singlestep_enabled
)
276 gen_helper_debug(cpu_env
);
281 static void gen_jump(DisasContext
* ctx
)
283 if (ctx
->delayed_pc
== (uint32_t) - 1) {
284 /* Target is not statically known, it comes necessarily from a
285 delayed jump as immediate jump are conditinal jumps */
286 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
287 if (ctx
->singlestep_enabled
)
288 gen_helper_debug(cpu_env
);
291 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
295 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
298 int label
= gen_new_label();
299 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
301 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
302 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
303 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
304 gen_set_label(label
);
307 /* Immediate conditional jump (bt or bf) */
308 static void gen_conditional_jump(DisasContext
* ctx
,
309 target_ulong ift
, target_ulong ifnott
)
314 l1
= gen_new_label();
316 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
317 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
318 gen_goto_tb(ctx
, 0, ifnott
);
320 gen_goto_tb(ctx
, 1, ift
);
323 /* Delayed conditional jump (bt or bf) */
324 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
329 l1
= gen_new_label();
331 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
332 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
333 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
335 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
339 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
344 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
345 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
346 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
351 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
356 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
357 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
358 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
363 static inline void gen_store_flags(uint32_t flags
)
365 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
366 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
369 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
371 TCGv tmp
= tcg_temp_new();
376 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
377 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
379 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
381 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
382 tcg_gen_or_i32(t0
, t0
, tmp
);
387 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
389 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
392 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
394 TCGv_i32 tmp
= tcg_temp_new_i32();
395 tcg_gen_trunc_i64_i32(tmp
, t
);
396 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
397 tcg_gen_shri_i64(t
, t
, 32);
398 tcg_gen_trunc_i64_i32(tmp
, t
);
399 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
400 tcg_temp_free_i32(tmp
);
403 #define B3_0 (ctx->opcode & 0xf)
404 #define B6_4 ((ctx->opcode >> 4) & 0x7)
405 #define B7_4 ((ctx->opcode >> 4) & 0xf)
406 #define B7_0 (ctx->opcode & 0xff)
407 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
408 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
409 (ctx->opcode & 0xfff))
410 #define B11_8 ((ctx->opcode >> 8) & 0xf)
411 #define B15_12 ((ctx->opcode >> 12) & 0xf)
413 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
414 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
416 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
417 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
419 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
420 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
421 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
422 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
424 #define CHECK_NOT_DELAY_SLOT \
425 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
427 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
428 gen_helper_raise_slot_illegal_instruction(cpu_env); \
429 ctx->bstate = BS_BRANCH; \
433 #define CHECK_PRIVILEGED \
434 if (IS_USER(ctx)) { \
435 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
436 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
437 gen_helper_raise_slot_illegal_instruction(cpu_env); \
439 gen_helper_raise_illegal_instruction(cpu_env); \
441 ctx->bstate = BS_BRANCH; \
445 #define CHECK_FPU_ENABLED \
446 if (ctx->flags & SR_FD) { \
447 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
448 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
449 gen_helper_raise_slot_fpu_disable(cpu_env); \
451 gen_helper_raise_fpu_disable(cpu_env); \
453 ctx->bstate = BS_BRANCH; \
457 static void _decode_opc(DisasContext
* ctx
)
459 /* This code tries to make movcal emulation sufficiently
460 accurate for Linux purposes. This instruction writes
461 memory, and prior to that, always allocates a cache line.
462 It is used in two contexts:
463 - in memcpy, where data is copied in blocks, the first write
464 of to a block uses movca.l for performance.
465 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
466 to flush the cache. Here, the data written by movcal.l is never
467 written to memory, and the data written is just bogus.
469 To simulate this, we simulate movcal.l, we store the value to memory,
470 but we also remember the previous content. If we see ocbi, we check
471 if movcal.l for that address was done previously. If so, the write should
472 not have hit the memory, so we restore the previous content.
473 When we see an instruction that is neither movca.l
474 nor ocbi, the previous content is discarded.
476 To optimize, we only try to flush stores when we're at the start of
477 TB, or if we already saw movca.l in this TB and did not flush stores
481 int opcode
= ctx
->opcode
& 0xf0ff;
482 if (opcode
!= 0x0093 /* ocbi */
483 && opcode
!= 0x00c3 /* movca.l */)
485 gen_helper_discard_movcal_backup(cpu_env
);
491 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
494 switch (ctx
->opcode
) {
495 case 0x0019: /* div0u */
496 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
498 case 0x000b: /* rts */
500 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
501 ctx
->flags
|= DELAY_SLOT
;
502 ctx
->delayed_pc
= (uint32_t) - 1;
504 case 0x0028: /* clrmac */
505 tcg_gen_movi_i32(cpu_mach
, 0);
506 tcg_gen_movi_i32(cpu_macl
, 0);
508 case 0x0048: /* clrs */
509 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
511 case 0x0008: /* clrt */
512 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
514 case 0x0038: /* ldtlb */
516 gen_helper_ldtlb(cpu_env
);
518 case 0x002b: /* rte */
521 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
522 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
523 ctx
->flags
|= DELAY_SLOT
;
524 ctx
->delayed_pc
= (uint32_t) - 1;
526 case 0x0058: /* sets */
527 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
529 case 0x0018: /* sett */
530 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
532 case 0xfbfd: /* frchg */
533 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
534 ctx
->bstate
= BS_STOP
;
536 case 0xf3fd: /* fschg */
537 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
538 ctx
->bstate
= BS_STOP
;
540 case 0x0009: /* nop */
542 case 0x001b: /* sleep */
544 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
545 gen_helper_sleep(cpu_env
);
549 switch (ctx
->opcode
& 0xf000) {
550 case 0x1000: /* mov.l Rm,@(disp,Rn) */
552 TCGv addr
= tcg_temp_new();
553 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
554 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
558 case 0x5000: /* mov.l @(disp,Rm),Rn */
560 TCGv addr
= tcg_temp_new();
561 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
562 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
566 case 0xe000: /* mov #imm,Rn */
567 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
569 case 0x9000: /* mov.w @(disp,PC),Rn */
571 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
572 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
576 case 0xd000: /* mov.l @(disp,PC),Rn */
578 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
579 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
583 case 0x7000: /* add #imm,Rn */
584 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
586 case 0xa000: /* bra disp */
588 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
589 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
590 ctx
->flags
|= DELAY_SLOT
;
592 case 0xb000: /* bsr disp */
594 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
595 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
596 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
597 ctx
->flags
|= DELAY_SLOT
;
601 switch (ctx
->opcode
& 0xf00f) {
602 case 0x6003: /* mov Rm,Rn */
603 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
605 case 0x2000: /* mov.b Rm,@Rn */
606 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
608 case 0x2001: /* mov.w Rm,@Rn */
609 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
611 case 0x2002: /* mov.l Rm,@Rn */
612 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
614 case 0x6000: /* mov.b @Rm,Rn */
615 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
617 case 0x6001: /* mov.w @Rm,Rn */
618 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
620 case 0x6002: /* mov.l @Rm,Rn */
621 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
623 case 0x2004: /* mov.b Rm,@-Rn */
625 TCGv addr
= tcg_temp_new();
626 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
627 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
628 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
632 case 0x2005: /* mov.w Rm,@-Rn */
634 TCGv addr
= tcg_temp_new();
635 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
636 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
637 tcg_gen_mov_i32(REG(B11_8
), addr
);
641 case 0x2006: /* mov.l Rm,@-Rn */
643 TCGv addr
= tcg_temp_new();
644 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
645 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
646 tcg_gen_mov_i32(REG(B11_8
), addr
);
649 case 0x6004: /* mov.b @Rm+,Rn */
650 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
652 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
654 case 0x6005: /* mov.w @Rm+,Rn */
655 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
657 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
659 case 0x6006: /* mov.l @Rm+,Rn */
660 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
662 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
664 case 0x0004: /* mov.b Rm,@(R0,Rn) */
666 TCGv addr
= tcg_temp_new();
667 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
668 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
672 case 0x0005: /* mov.w Rm,@(R0,Rn) */
674 TCGv addr
= tcg_temp_new();
675 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
676 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
680 case 0x0006: /* mov.l Rm,@(R0,Rn) */
682 TCGv addr
= tcg_temp_new();
683 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
684 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
688 case 0x000c: /* mov.b @(R0,Rm),Rn */
690 TCGv addr
= tcg_temp_new();
691 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
692 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
696 case 0x000d: /* mov.w @(R0,Rm),Rn */
698 TCGv addr
= tcg_temp_new();
699 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
700 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
704 case 0x000e: /* mov.l @(R0,Rm),Rn */
706 TCGv addr
= tcg_temp_new();
707 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
708 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
712 case 0x6008: /* swap.b Rm,Rn */
715 high
= tcg_temp_new();
716 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
717 low
= tcg_temp_new();
718 tcg_gen_ext16u_i32(low
, REG(B7_4
));
719 tcg_gen_bswap16_i32(low
, low
);
720 tcg_gen_or_i32(REG(B11_8
), high
, low
);
725 case 0x6009: /* swap.w Rm,Rn */
726 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
728 case 0x200d: /* xtrct Rm,Rn */
731 high
= tcg_temp_new();
732 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
733 low
= tcg_temp_new();
734 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
735 tcg_gen_or_i32(REG(B11_8
), high
, low
);
740 case 0x300c: /* add Rm,Rn */
741 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
743 case 0x300e: /* addc Rm,Rn */
747 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
749 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
750 tcg_gen_add_i32(t0
, t0
, t1
);
752 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
753 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
754 tcg_gen_or_i32(t1
, t1
, t2
);
756 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
757 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
759 tcg_gen_mov_i32(REG(B11_8
), t0
);
763 case 0x300f: /* addv Rm,Rn */
767 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
769 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
771 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
772 tcg_gen_andc_i32(t1
, t1
, t2
);
774 tcg_gen_shri_i32(t1
, t1
, 31);
775 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
776 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
778 tcg_gen_mov_i32(REG(B7_4
), t0
);
782 case 0x2009: /* and Rm,Rn */
783 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
785 case 0x3000: /* cmp/eq Rm,Rn */
786 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
788 case 0x3003: /* cmp/ge Rm,Rn */
789 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
791 case 0x3007: /* cmp/gt Rm,Rn */
792 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
794 case 0x3006: /* cmp/hi Rm,Rn */
795 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
797 case 0x3002: /* cmp/hs Rm,Rn */
798 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
800 case 0x200c: /* cmp/str Rm,Rn */
802 TCGv cmp1
= tcg_temp_new();
803 TCGv cmp2
= tcg_temp_new();
804 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
805 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
806 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
807 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
808 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
809 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
810 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
811 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
812 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
813 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
814 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
815 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
816 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
817 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
822 case 0x2007: /* div0s Rm,Rn */
824 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
825 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
826 TCGv val
= tcg_temp_new();
827 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
828 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
832 case 0x3004: /* div1 Rm,Rn */
833 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
835 case 0x300d: /* dmuls.l Rm,Rn */
836 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
838 case 0x3005: /* dmulu.l Rm,Rn */
839 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
841 case 0x600e: /* exts.b Rm,Rn */
842 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
844 case 0x600f: /* exts.w Rm,Rn */
845 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
847 case 0x600c: /* extu.b Rm,Rn */
848 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
850 case 0x600d: /* extu.w Rm,Rn */
851 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
853 case 0x000f: /* mac.l @Rm+,@Rn+ */
856 arg0
= tcg_temp_new();
857 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
858 arg1
= tcg_temp_new();
859 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
860 gen_helper_macl(cpu_env
, arg0
, arg1
);
863 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
864 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
867 case 0x400f: /* mac.w @Rm+,@Rn+ */
870 arg0
= tcg_temp_new();
871 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
872 arg1
= tcg_temp_new();
873 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
874 gen_helper_macw(cpu_env
, arg0
, arg1
);
877 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
878 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
881 case 0x0007: /* mul.l Rm,Rn */
882 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
884 case 0x200f: /* muls.w Rm,Rn */
887 arg0
= tcg_temp_new();
888 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
889 arg1
= tcg_temp_new();
890 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
891 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
896 case 0x200e: /* mulu.w Rm,Rn */
899 arg0
= tcg_temp_new();
900 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
901 arg1
= tcg_temp_new();
902 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
903 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
908 case 0x600b: /* neg Rm,Rn */
909 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
911 case 0x600a: /* negc Rm,Rn */
915 tcg_gen_neg_i32(t0
, REG(B7_4
));
917 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
918 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
919 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
920 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
921 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
922 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
923 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
928 case 0x6007: /* not Rm,Rn */
929 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
931 case 0x200b: /* or Rm,Rn */
932 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
934 case 0x400c: /* shad Rm,Rn */
936 int label1
= gen_new_label();
937 int label2
= gen_new_label();
938 int label3
= gen_new_label();
939 int label4
= gen_new_label();
941 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
942 /* Rm positive, shift to the left */
943 shift
= tcg_temp_new();
944 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
945 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
946 tcg_temp_free(shift
);
948 /* Rm negative, shift to the right */
949 gen_set_label(label1
);
950 shift
= tcg_temp_new();
951 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
952 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
953 tcg_gen_not_i32(shift
, REG(B7_4
));
954 tcg_gen_andi_i32(shift
, shift
, 0x1f);
955 tcg_gen_addi_i32(shift
, shift
, 1);
956 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
957 tcg_temp_free(shift
);
960 gen_set_label(label2
);
961 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
962 tcg_gen_movi_i32(REG(B11_8
), 0);
964 gen_set_label(label3
);
965 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
966 gen_set_label(label4
);
969 case 0x400d: /* shld Rm,Rn */
971 int label1
= gen_new_label();
972 int label2
= gen_new_label();
973 int label3
= gen_new_label();
975 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
976 /* Rm positive, shift to the left */
977 shift
= tcg_temp_new();
978 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
979 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
980 tcg_temp_free(shift
);
982 /* Rm negative, shift to the right */
983 gen_set_label(label1
);
984 shift
= tcg_temp_new();
985 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
986 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
987 tcg_gen_not_i32(shift
, REG(B7_4
));
988 tcg_gen_andi_i32(shift
, shift
, 0x1f);
989 tcg_gen_addi_i32(shift
, shift
, 1);
990 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
991 tcg_temp_free(shift
);
994 gen_set_label(label2
);
995 tcg_gen_movi_i32(REG(B11_8
), 0);
996 gen_set_label(label3
);
999 case 0x3008: /* sub Rm,Rn */
1000 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1002 case 0x300a: /* subc Rm,Rn */
1005 t0
= tcg_temp_new();
1006 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
1007 t1
= tcg_temp_new();
1008 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
1009 tcg_gen_sub_i32(t0
, t1
, t0
);
1010 t2
= tcg_temp_new();
1011 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
1012 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
1013 tcg_gen_or_i32(t1
, t1
, t2
);
1015 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1016 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
1018 tcg_gen_mov_i32(REG(B11_8
), t0
);
1022 case 0x300b: /* subv Rm,Rn */
1025 t0
= tcg_temp_new();
1026 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
1027 t1
= tcg_temp_new();
1028 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
1029 t2
= tcg_temp_new();
1030 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
1031 tcg_gen_and_i32(t1
, t1
, t2
);
1033 tcg_gen_shri_i32(t1
, t1
, 31);
1034 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1035 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
1037 tcg_gen_mov_i32(REG(B11_8
), t0
);
1041 case 0x2008: /* tst Rm,Rn */
1043 TCGv val
= tcg_temp_new();
1044 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
1045 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1049 case 0x200a: /* xor Rm,Rn */
1050 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1052 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1054 if (ctx
->flags
& FPSCR_SZ
) {
1055 TCGv_i64 fp
= tcg_temp_new_i64();
1056 gen_load_fpr64(fp
, XREG(B7_4
));
1057 gen_store_fpr64(fp
, XREG(B11_8
));
1058 tcg_temp_free_i64(fp
);
1060 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1063 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1065 if (ctx
->flags
& FPSCR_SZ
) {
1066 TCGv addr_hi
= tcg_temp_new();
1067 int fr
= XREG(B7_4
);
1068 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
1069 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
1070 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1071 tcg_temp_free(addr_hi
);
1073 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
1076 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1078 if (ctx
->flags
& FPSCR_SZ
) {
1079 TCGv addr_hi
= tcg_temp_new();
1080 int fr
= XREG(B11_8
);
1081 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1082 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1083 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1084 tcg_temp_free(addr_hi
);
1086 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1089 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1091 if (ctx
->flags
& FPSCR_SZ
) {
1092 TCGv addr_hi
= tcg_temp_new();
1093 int fr
= XREG(B11_8
);
1094 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1095 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1096 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1097 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1098 tcg_temp_free(addr_hi
);
1100 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1101 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1104 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1106 if (ctx
->flags
& FPSCR_SZ
) {
1107 TCGv addr
= tcg_temp_new_i32();
1108 int fr
= XREG(B7_4
);
1109 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1110 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1111 tcg_gen_subi_i32(addr
, addr
, 4);
1112 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1113 tcg_gen_mov_i32(REG(B11_8
), addr
);
1114 tcg_temp_free(addr
);
1117 addr
= tcg_temp_new_i32();
1118 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1119 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1120 tcg_gen_mov_i32(REG(B11_8
), addr
);
1121 tcg_temp_free(addr
);
1124 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1127 TCGv addr
= tcg_temp_new_i32();
1128 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1129 if (ctx
->flags
& FPSCR_SZ
) {
1130 int fr
= XREG(B11_8
);
1131 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1132 tcg_gen_addi_i32(addr
, addr
, 4);
1133 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1135 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1137 tcg_temp_free(addr
);
1140 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1143 TCGv addr
= tcg_temp_new();
1144 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1145 if (ctx
->flags
& FPSCR_SZ
) {
1146 int fr
= XREG(B7_4
);
1147 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1148 tcg_gen_addi_i32(addr
, addr
, 4);
1149 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1151 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1153 tcg_temp_free(addr
);
1156 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1157 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1158 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1159 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1160 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1161 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1164 if (ctx
->flags
& FPSCR_PR
) {
1167 if (ctx
->opcode
& 0x0110)
1168 break; /* illegal instruction */
1169 fp0
= tcg_temp_new_i64();
1170 fp1
= tcg_temp_new_i64();
1171 gen_load_fpr64(fp0
, DREG(B11_8
));
1172 gen_load_fpr64(fp1
, DREG(B7_4
));
1173 switch (ctx
->opcode
& 0xf00f) {
1174 case 0xf000: /* fadd Rm,Rn */
1175 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1177 case 0xf001: /* fsub Rm,Rn */
1178 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1180 case 0xf002: /* fmul Rm,Rn */
1181 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1183 case 0xf003: /* fdiv Rm,Rn */
1184 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1186 case 0xf004: /* fcmp/eq Rm,Rn */
1187 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1189 case 0xf005: /* fcmp/gt Rm,Rn */
1190 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1193 gen_store_fpr64(fp0
, DREG(B11_8
));
1194 tcg_temp_free_i64(fp0
);
1195 tcg_temp_free_i64(fp1
);
1197 switch (ctx
->opcode
& 0xf00f) {
1198 case 0xf000: /* fadd Rm,Rn */
1199 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1200 cpu_fregs
[FREG(B11_8
)],
1201 cpu_fregs
[FREG(B7_4
)]);
1203 case 0xf001: /* fsub Rm,Rn */
1204 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1205 cpu_fregs
[FREG(B11_8
)],
1206 cpu_fregs
[FREG(B7_4
)]);
1208 case 0xf002: /* fmul Rm,Rn */
1209 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1210 cpu_fregs
[FREG(B11_8
)],
1211 cpu_fregs
[FREG(B7_4
)]);
1213 case 0xf003: /* fdiv Rm,Rn */
1214 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1215 cpu_fregs
[FREG(B11_8
)],
1216 cpu_fregs
[FREG(B7_4
)]);
1218 case 0xf004: /* fcmp/eq Rm,Rn */
1219 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1220 cpu_fregs
[FREG(B7_4
)]);
1222 case 0xf005: /* fcmp/gt Rm,Rn */
1223 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1224 cpu_fregs
[FREG(B7_4
)]);
1230 case 0xf00e: /* fmac FR0,RM,Rn */
1233 if (ctx
->flags
& FPSCR_PR
) {
1234 break; /* illegal instruction */
1236 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1237 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1238 cpu_fregs
[FREG(B11_8
)]);
1244 switch (ctx
->opcode
& 0xff00) {
1245 case 0xc900: /* and #imm,R0 */
1246 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1248 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1251 addr
= tcg_temp_new();
1252 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1253 val
= tcg_temp_new();
1254 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1255 tcg_gen_andi_i32(val
, val
, B7_0
);
1256 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1258 tcg_temp_free(addr
);
1261 case 0x8b00: /* bf label */
1262 CHECK_NOT_DELAY_SLOT
1263 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1264 ctx
->pc
+ 4 + B7_0s
* 2);
1265 ctx
->bstate
= BS_BRANCH
;
1267 case 0x8f00: /* bf/s label */
1268 CHECK_NOT_DELAY_SLOT
1269 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1270 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1272 case 0x8900: /* bt label */
1273 CHECK_NOT_DELAY_SLOT
1274 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1276 ctx
->bstate
= BS_BRANCH
;
1278 case 0x8d00: /* bt/s label */
1279 CHECK_NOT_DELAY_SLOT
1280 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1281 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1283 case 0x8800: /* cmp/eq #imm,R0 */
1284 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1286 case 0xc400: /* mov.b @(disp,GBR),R0 */
1288 TCGv addr
= tcg_temp_new();
1289 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1290 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1291 tcg_temp_free(addr
);
1294 case 0xc500: /* mov.w @(disp,GBR),R0 */
1296 TCGv addr
= tcg_temp_new();
1297 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1298 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1299 tcg_temp_free(addr
);
1302 case 0xc600: /* mov.l @(disp,GBR),R0 */
1304 TCGv addr
= tcg_temp_new();
1305 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1306 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1307 tcg_temp_free(addr
);
1310 case 0xc000: /* mov.b R0,@(disp,GBR) */
1312 TCGv addr
= tcg_temp_new();
1313 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1314 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1315 tcg_temp_free(addr
);
1318 case 0xc100: /* mov.w R0,@(disp,GBR) */
1320 TCGv addr
= tcg_temp_new();
1321 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1322 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1323 tcg_temp_free(addr
);
1326 case 0xc200: /* mov.l R0,@(disp,GBR) */
1328 TCGv addr
= tcg_temp_new();
1329 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1330 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1331 tcg_temp_free(addr
);
1334 case 0x8000: /* mov.b R0,@(disp,Rn) */
1336 TCGv addr
= tcg_temp_new();
1337 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1338 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1339 tcg_temp_free(addr
);
1342 case 0x8100: /* mov.w R0,@(disp,Rn) */
1344 TCGv addr
= tcg_temp_new();
1345 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1346 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1347 tcg_temp_free(addr
);
1350 case 0x8400: /* mov.b @(disp,Rn),R0 */
1352 TCGv addr
= tcg_temp_new();
1353 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1354 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1355 tcg_temp_free(addr
);
1358 case 0x8500: /* mov.w @(disp,Rn),R0 */
1360 TCGv addr
= tcg_temp_new();
1361 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1362 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1363 tcg_temp_free(addr
);
1366 case 0xc700: /* mova @(disp,PC),R0 */
1367 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1369 case 0xcb00: /* or #imm,R0 */
1370 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1372 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1375 addr
= tcg_temp_new();
1376 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1377 val
= tcg_temp_new();
1378 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1379 tcg_gen_ori_i32(val
, val
, B7_0
);
1380 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1382 tcg_temp_free(addr
);
1385 case 0xc300: /* trapa #imm */
1388 CHECK_NOT_DELAY_SLOT
1389 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1390 imm
= tcg_const_i32(B7_0
);
1391 gen_helper_trapa(cpu_env
, imm
);
1393 ctx
->bstate
= BS_BRANCH
;
1396 case 0xc800: /* tst #imm,R0 */
1398 TCGv val
= tcg_temp_new();
1399 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1400 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1404 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1406 TCGv val
= tcg_temp_new();
1407 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1408 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1409 tcg_gen_andi_i32(val
, val
, B7_0
);
1410 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1414 case 0xca00: /* xor #imm,R0 */
1415 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1417 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1420 addr
= tcg_temp_new();
1421 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1422 val
= tcg_temp_new();
1423 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1424 tcg_gen_xori_i32(val
, val
, B7_0
);
1425 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1427 tcg_temp_free(addr
);
1432 switch (ctx
->opcode
& 0xf08f) {
1433 case 0x408e: /* ldc Rm,Rn_BANK */
1435 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1437 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1439 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1440 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1442 case 0x0082: /* stc Rm_BANK,Rn */
1444 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1446 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1449 TCGv addr
= tcg_temp_new();
1450 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1451 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1452 tcg_gen_mov_i32(REG(B11_8
), addr
);
1453 tcg_temp_free(addr
);
1458 switch (ctx
->opcode
& 0xf0ff) {
1459 case 0x0023: /* braf Rn */
1460 CHECK_NOT_DELAY_SLOT
1461 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1462 ctx
->flags
|= DELAY_SLOT
;
1463 ctx
->delayed_pc
= (uint32_t) - 1;
1465 case 0x0003: /* bsrf Rn */
1466 CHECK_NOT_DELAY_SLOT
1467 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1468 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1469 ctx
->flags
|= DELAY_SLOT
;
1470 ctx
->delayed_pc
= (uint32_t) - 1;
1472 case 0x4015: /* cmp/pl Rn */
1473 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1475 case 0x4011: /* cmp/pz Rn */
1476 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1478 case 0x4010: /* dt Rn */
1479 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1480 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1482 case 0x402b: /* jmp @Rn */
1483 CHECK_NOT_DELAY_SLOT
1484 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1485 ctx
->flags
|= DELAY_SLOT
;
1486 ctx
->delayed_pc
= (uint32_t) - 1;
1488 case 0x400b: /* jsr @Rn */
1489 CHECK_NOT_DELAY_SLOT
1490 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1491 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1492 ctx
->flags
|= DELAY_SLOT
;
1493 ctx
->delayed_pc
= (uint32_t) - 1;
1495 case 0x400e: /* ldc Rm,SR */
1497 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1498 ctx
->bstate
= BS_STOP
;
1500 case 0x4007: /* ldc.l @Rm+,SR */
1503 TCGv val
= tcg_temp_new();
1504 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1505 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1507 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1508 ctx
->bstate
= BS_STOP
;
1511 case 0x0002: /* stc SR,Rn */
1513 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1515 case 0x4003: /* stc SR,@-Rn */
1518 TCGv addr
= tcg_temp_new();
1519 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1520 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1521 tcg_gen_mov_i32(REG(B11_8
), addr
);
1522 tcg_temp_free(addr
);
1525 #define LD(reg,ldnum,ldpnum,prechk) \
1528 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1532 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1533 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1535 #define ST(reg,stnum,stpnum,prechk) \
1538 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1543 TCGv addr = tcg_temp_new(); \
1544 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1545 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1546 tcg_gen_mov_i32(REG(B11_8), addr); \
1547 tcg_temp_free(addr); \
1550 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1551 LD(reg,ldnum,ldpnum,prechk) \
1552 ST(reg,stnum,stpnum,prechk)
1553 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1554 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1555 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1556 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1557 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1558 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1559 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1560 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1561 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1562 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1563 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1564 case 0x406a: /* lds Rm,FPSCR */
1566 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1567 ctx
->bstate
= BS_STOP
;
1569 case 0x4066: /* lds.l @Rm+,FPSCR */
1572 TCGv addr
= tcg_temp_new();
1573 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1574 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1575 gen_helper_ld_fpscr(cpu_env
, addr
);
1576 tcg_temp_free(addr
);
1577 ctx
->bstate
= BS_STOP
;
1580 case 0x006a: /* sts FPSCR,Rn */
1582 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1584 case 0x4062: /* sts FPSCR,@-Rn */
1588 val
= tcg_temp_new();
1589 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1590 addr
= tcg_temp_new();
1591 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1592 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1593 tcg_gen_mov_i32(REG(B11_8
), addr
);
1594 tcg_temp_free(addr
);
1598 case 0x00c3: /* movca.l R0,@Rm */
1600 TCGv val
= tcg_temp_new();
1601 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1602 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1603 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1605 ctx
->has_movcal
= 1;
1608 /* MOVUA.L @Rm,R0 (Rm) -> R0
1609 Load non-boundary-aligned data */
1610 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1613 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1614 Load non-boundary-aligned data */
1615 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1616 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1618 case 0x0029: /* movt Rn */
1619 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1624 If (T == 1) R0 -> (Rn)
1627 if (ctx
->features
& SH_FEATURE_SH4A
) {
1628 int label
= gen_new_label();
1629 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1630 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1631 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1632 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1633 gen_set_label(label
);
1634 tcg_gen_movi_i32(cpu_ldst
, 0);
1642 When interrupt/exception
1645 if (ctx
->features
& SH_FEATURE_SH4A
) {
1646 tcg_gen_movi_i32(cpu_ldst
, 0);
1647 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1648 tcg_gen_movi_i32(cpu_ldst
, 1);
1652 case 0x0093: /* ocbi @Rn */
1654 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1657 case 0x00a3: /* ocbp @Rn */
1658 case 0x00b3: /* ocbwb @Rn */
1659 /* These instructions are supposed to do nothing in case of
1660 a cache miss. Given that we only partially emulate caches
1661 it is safe to simply ignore them. */
1663 case 0x0083: /* pref @Rn */
1665 case 0x00d3: /* prefi @Rn */
1666 if (ctx
->features
& SH_FEATURE_SH4A
)
1670 case 0x00e3: /* icbi @Rn */
1671 if (ctx
->features
& SH_FEATURE_SH4A
)
1675 case 0x00ab: /* synco */
1676 if (ctx
->features
& SH_FEATURE_SH4A
)
1680 case 0x4024: /* rotcl Rn */
1682 TCGv tmp
= tcg_temp_new();
1683 tcg_gen_mov_i32(tmp
, cpu_sr
);
1684 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1685 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1686 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1690 case 0x4025: /* rotcr Rn */
1692 TCGv tmp
= tcg_temp_new();
1693 tcg_gen_mov_i32(tmp
, cpu_sr
);
1694 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1695 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1696 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1700 case 0x4004: /* rotl Rn */
1701 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1702 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1704 case 0x4005: /* rotr Rn */
1705 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1706 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1708 case 0x4000: /* shll Rn */
1709 case 0x4020: /* shal Rn */
1710 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1711 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1713 case 0x4021: /* shar Rn */
1714 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1715 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1717 case 0x4001: /* shlr Rn */
1718 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1719 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1721 case 0x4008: /* shll2 Rn */
1722 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1724 case 0x4018: /* shll8 Rn */
1725 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1727 case 0x4028: /* shll16 Rn */
1728 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1730 case 0x4009: /* shlr2 Rn */
1731 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1733 case 0x4019: /* shlr8 Rn */
1734 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1736 case 0x4029: /* shlr16 Rn */
1737 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1739 case 0x401b: /* tas.b @Rn */
1742 addr
= tcg_temp_local_new();
1743 tcg_gen_mov_i32(addr
, REG(B11_8
));
1744 val
= tcg_temp_local_new();
1745 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1746 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1747 tcg_gen_ori_i32(val
, val
, 0x80);
1748 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1750 tcg_temp_free(addr
);
1753 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1755 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1757 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1759 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1761 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1763 if (ctx
->flags
& FPSCR_PR
) {
1765 if (ctx
->opcode
& 0x0100)
1766 break; /* illegal instruction */
1767 fp
= tcg_temp_new_i64();
1768 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1769 gen_store_fpr64(fp
, DREG(B11_8
));
1770 tcg_temp_free_i64(fp
);
1773 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1776 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1778 if (ctx
->flags
& FPSCR_PR
) {
1780 if (ctx
->opcode
& 0x0100)
1781 break; /* illegal instruction */
1782 fp
= tcg_temp_new_i64();
1783 gen_load_fpr64(fp
, DREG(B11_8
));
1784 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1785 tcg_temp_free_i64(fp
);
1788 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1791 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1794 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1797 case 0xf05d: /* fabs FRn/DRn */
1799 if (ctx
->flags
& FPSCR_PR
) {
1800 if (ctx
->opcode
& 0x0100)
1801 break; /* illegal instruction */
1802 TCGv_i64 fp
= tcg_temp_new_i64();
1803 gen_load_fpr64(fp
, DREG(B11_8
));
1804 gen_helper_fabs_DT(fp
, fp
);
1805 gen_store_fpr64(fp
, DREG(B11_8
));
1806 tcg_temp_free_i64(fp
);
1808 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1811 case 0xf06d: /* fsqrt FRn */
1813 if (ctx
->flags
& FPSCR_PR
) {
1814 if (ctx
->opcode
& 0x0100)
1815 break; /* illegal instruction */
1816 TCGv_i64 fp
= tcg_temp_new_i64();
1817 gen_load_fpr64(fp
, DREG(B11_8
));
1818 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1819 gen_store_fpr64(fp
, DREG(B11_8
));
1820 tcg_temp_free_i64(fp
);
1822 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1823 cpu_fregs
[FREG(B11_8
)]);
1826 case 0xf07d: /* fsrra FRn */
1829 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1831 if (!(ctx
->flags
& FPSCR_PR
)) {
1832 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1835 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1837 if (!(ctx
->flags
& FPSCR_PR
)) {
1838 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1841 case 0xf0ad: /* fcnvsd FPUL,DRn */
1844 TCGv_i64 fp
= tcg_temp_new_i64();
1845 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1846 gen_store_fpr64(fp
, DREG(B11_8
));
1847 tcg_temp_free_i64(fp
);
1850 case 0xf0bd: /* fcnvds DRn,FPUL */
1853 TCGv_i64 fp
= tcg_temp_new_i64();
1854 gen_load_fpr64(fp
, DREG(B11_8
));
1855 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1856 tcg_temp_free_i64(fp
);
1859 case 0xf0ed: /* fipr FVm,FVn */
1861 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1863 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1864 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1865 gen_helper_fipr(cpu_env
, m
, n
);
1871 case 0xf0fd: /* ftrv XMTRX,FVn */
1873 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1874 (ctx
->flags
& FPSCR_PR
) == 0) {
1876 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1877 gen_helper_ftrv(cpu_env
, n
);
1884 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1885 ctx
->opcode
, ctx
->pc
);
1888 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1889 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1890 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1892 gen_helper_raise_illegal_instruction(cpu_env
);
1894 ctx
->bstate
= BS_BRANCH
;
1897 static void decode_opc(DisasContext
* ctx
)
1899 uint32_t old_flags
= ctx
->flags
;
1901 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1902 tcg_gen_debug_insn_start(ctx
->pc
);
1907 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1908 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1911 /* go out of the delay slot */
1912 uint32_t new_flags
= ctx
->flags
;
1913 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1914 gen_store_flags(new_flags
);
1917 ctx
->bstate
= BS_BRANCH
;
1918 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1919 gen_delayed_conditional_jump(ctx
);
1920 } else if (old_flags
& DELAY_SLOT
) {
1926 /* go into a delay slot */
1927 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1928 gen_store_flags(ctx
->flags
);
1932 gen_intermediate_code_internal(CPUSH4State
* env
, TranslationBlock
* tb
,
1936 target_ulong pc_start
;
1937 static uint16_t *gen_opc_end
;
1944 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1946 ctx
.flags
= (uint32_t)tb
->flags
;
1947 ctx
.bstate
= BS_NONE
;
1948 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1949 /* We don't know if the delayed pc came from a dynamic or static branch,
1950 so assume it is a dynamic branch. */
1951 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1953 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
1954 ctx
.features
= env
->features
;
1955 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1959 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1961 max_insns
= CF_COUNT_MASK
;
1963 while (ctx
.bstate
== BS_NONE
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
) {
1964 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1965 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1966 if (ctx
.pc
== bp
->pc
) {
1967 /* We have hit a breakpoint - make sure PC is up-to-date */
1968 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1969 gen_helper_debug(cpu_env
);
1970 ctx
.bstate
= BS_BRANCH
;
1976 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1980 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1982 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
1983 gen_opc_hflags
[ii
] = ctx
.flags
;
1984 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
1985 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
1987 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1990 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1993 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1997 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1999 if (env
->singlestep_enabled
)
2001 if (num_insns
>= max_insns
)
2006 if (tb
->cflags
& CF_LAST_IO
)
2008 if (env
->singlestep_enabled
) {
2009 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
2010 gen_helper_debug(cpu_env
);
2012 switch (ctx
.bstate
) {
2014 /* gen_op_interrupt_restart(); */
2018 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
2020 gen_goto_tb(&ctx
, 0, ctx
.pc
);
2023 /* gen_op_interrupt_restart(); */
2032 gen_icount_end(tb
, num_insns
);
2033 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
2035 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
2038 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
2040 tb
->size
= ctx
.pc
- pc_start
;
2041 tb
->icount
= num_insns
;
2045 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2046 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2047 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
2053 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2055 gen_intermediate_code_internal(env
, tb
, 0);
2058 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2060 gen_intermediate_code_internal(env
, tb
, 1);
2063 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
2065 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
2066 env
->flags
= gen_opc_hflags
[pc_pos
];