target-i386: Use mulu2 and muls2
[qemu/pbrook.git] / target-sh4 / translate.c
blobd255066e0ab789a7b185e845cceee9d2932df3dc
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
21 //#define SH4_SINGLE_STEP
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
27 #include "helper.h"
28 #define GEN_HELPER 1
29 #include "helper.h"
31 typedef struct DisasContext {
32 struct TranslationBlock *tb;
33 target_ulong pc;
34 uint16_t opcode;
35 uint32_t flags;
36 int bstate;
37 int memidx;
38 uint32_t delayed_pc;
39 int singlestep_enabled;
40 uint32_t features;
41 int has_movcal;
42 } DisasContext;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
46 #else
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
48 #endif
50 enum {
51 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
52 * exception condition
54 BS_STOP = 1, /* We want to stop translation for any reason */
55 BS_BRANCH = 2, /* We reached a branch condition */
56 BS_EXCP = 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env;
61 static TCGv cpu_gregs[24];
62 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
63 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
64 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
65 static TCGv cpu_fregs[32];
67 /* internal register indexes */
68 static TCGv cpu_flags, cpu_delayed_pc;
70 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
72 #include "exec/gen-icount.h"
74 void sh4_translate_init(void)
76 int i;
77 static int done_init = 0;
78 static const char * const gregnames[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96 if (done_init)
97 return;
99 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
101 for (i = 0; i < 24; i++)
102 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
103 offsetof(CPUSH4State, gregs[i]),
104 gregnames[i]);
106 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUSH4State, pc), "PC");
108 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUSH4State, sr), "SR");
110 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUSH4State, ssr), "SSR");
112 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUSH4State, spc), "SPC");
114 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUSH4State, gbr), "GBR");
116 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUSH4State, vbr), "VBR");
118 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUSH4State, sgr), "SGR");
120 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUSH4State, dbr), "DBR");
122 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUSH4State, mach), "MACH");
124 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUSH4State, macl), "MACL");
126 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUSH4State, pr), "PR");
128 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUSH4State, fpscr), "FPSCR");
130 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUSH4State, fpul), "FPUL");
133 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, flags), "_flags_");
135 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
136 offsetof(CPUSH4State, delayed_pc),
137 "_delayed_pc_");
138 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, ldst), "_ldst_");
141 for (i = 0; i < 32; i++)
142 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
143 offsetof(CPUSH4State, fregs[i]),
144 fregnames[i]);
146 /* register helpers */
147 #define GEN_HELPER 2
148 #include "helper.h"
150 done_init = 1;
153 void cpu_dump_state(CPUSH4State * env, FILE * f,
154 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
155 int flags)
157 int i;
158 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
159 env->pc, env->sr, env->pr, env->fpscr);
160 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
161 env->spc, env->ssr, env->gbr, env->vbr);
162 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
163 env->sgr, env->dbr, env->delayed_pc, env->fpul);
164 for (i = 0; i < 24; i += 4) {
165 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
166 i, env->gregs[i], i + 1, env->gregs[i + 1],
167 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
169 if (env->flags & DELAY_SLOT) {
170 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
171 env->delayed_pc);
172 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
173 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
174 env->delayed_pc);
178 typedef struct {
179 const char *name;
180 int id;
181 uint32_t pvr;
182 uint32_t prr;
183 uint32_t cvr;
184 uint32_t features;
185 } sh4_def_t;
187 static sh4_def_t sh4_defs[] = {
189 .name = "SH7750R",
190 .id = SH_CPU_SH7750R,
191 .pvr = 0x00050000,
192 .prr = 0x00000100,
193 .cvr = 0x00110000,
194 .features = SH_FEATURE_BCR3_AND_BCR4,
195 }, {
196 .name = "SH7751R",
197 .id = SH_CPU_SH7751R,
198 .pvr = 0x04050005,
199 .prr = 0x00000113,
200 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
201 .features = SH_FEATURE_BCR3_AND_BCR4,
202 }, {
203 .name = "SH7785",
204 .id = SH_CPU_SH7785,
205 .pvr = 0x10300700,
206 .prr = 0x00000200,
207 .cvr = 0x71440211,
208 .features = SH_FEATURE_SH4A,
212 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
214 int i;
216 if (strcasecmp(name, "any") == 0)
217 return &sh4_defs[0];
219 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
220 if (strcasecmp(name, sh4_defs[i].name) == 0)
221 return &sh4_defs[i];
223 return NULL;
226 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
228 int i;
230 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
231 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
234 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
236 env->pvr = def->pvr;
237 env->prr = def->prr;
238 env->cvr = def->cvr;
239 env->id = def->id;
242 SuperHCPU *cpu_sh4_init(const char *cpu_model)
244 SuperHCPU *cpu;
245 CPUSH4State *env;
246 const sh4_def_t *def;
248 def = cpu_sh4_find_by_name(cpu_model);
249 if (!def)
250 return NULL;
251 cpu = SUPERH_CPU(object_new(TYPE_SUPERH_CPU));
252 env = &cpu->env;
253 env->features = def->features;
254 env->cpu_model_str = cpu_model;
255 cpu_register(env, def);
257 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
259 return cpu;
262 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
264 TranslationBlock *tb;
265 tb = ctx->tb;
267 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
268 !ctx->singlestep_enabled) {
269 /* Use a direct jump if in same page and singlestep not enabled */
270 tcg_gen_goto_tb(n);
271 tcg_gen_movi_i32(cpu_pc, dest);
272 tcg_gen_exit_tb((tcg_target_long)tb + n);
273 } else {
274 tcg_gen_movi_i32(cpu_pc, dest);
275 if (ctx->singlestep_enabled)
276 gen_helper_debug(cpu_env);
277 tcg_gen_exit_tb(0);
281 static void gen_jump(DisasContext * ctx)
283 if (ctx->delayed_pc == (uint32_t) - 1) {
284 /* Target is not statically known, it comes necessarily from a
285 delayed jump as immediate jump are conditinal jumps */
286 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
287 if (ctx->singlestep_enabled)
288 gen_helper_debug(cpu_env);
289 tcg_gen_exit_tb(0);
290 } else {
291 gen_goto_tb(ctx, 0, ctx->delayed_pc);
295 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
297 TCGv sr;
298 int label = gen_new_label();
299 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
300 sr = tcg_temp_new();
301 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
302 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
303 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
304 gen_set_label(label);
307 /* Immediate conditional jump (bt or bf) */
308 static void gen_conditional_jump(DisasContext * ctx,
309 target_ulong ift, target_ulong ifnott)
311 int l1;
312 TCGv sr;
314 l1 = gen_new_label();
315 sr = tcg_temp_new();
316 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
317 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
318 gen_goto_tb(ctx, 0, ifnott);
319 gen_set_label(l1);
320 gen_goto_tb(ctx, 1, ift);
323 /* Delayed conditional jump (bt or bf) */
324 static void gen_delayed_conditional_jump(DisasContext * ctx)
326 int l1;
327 TCGv ds;
329 l1 = gen_new_label();
330 ds = tcg_temp_new();
331 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
332 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
333 gen_goto_tb(ctx, 1, ctx->pc + 2);
334 gen_set_label(l1);
335 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
336 gen_jump(ctx);
339 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
341 TCGv t;
343 t = tcg_temp_new();
344 tcg_gen_setcond_i32(cond, t, t1, t0);
345 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
346 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
348 tcg_temp_free(t);
351 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
353 TCGv t;
355 t = tcg_temp_new();
356 tcg_gen_setcondi_i32(cond, t, t0, imm);
357 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
358 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
360 tcg_temp_free(t);
363 static inline void gen_store_flags(uint32_t flags)
365 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
366 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
369 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
371 TCGv tmp = tcg_temp_new();
373 p0 &= 0x1f;
374 p1 &= 0x1f;
376 tcg_gen_andi_i32(tmp, t1, (1 << p1));
377 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
378 if (p0 < p1)
379 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
380 else if (p0 > p1)
381 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
382 tcg_gen_or_i32(t0, t0, tmp);
384 tcg_temp_free(tmp);
387 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
389 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
392 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
394 TCGv_i32 tmp = tcg_temp_new_i32();
395 tcg_gen_trunc_i64_i32(tmp, t);
396 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
397 tcg_gen_shri_i64(t, t, 32);
398 tcg_gen_trunc_i64_i32(tmp, t);
399 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
400 tcg_temp_free_i32(tmp);
403 #define B3_0 (ctx->opcode & 0xf)
404 #define B6_4 ((ctx->opcode >> 4) & 0x7)
405 #define B7_4 ((ctx->opcode >> 4) & 0xf)
406 #define B7_0 (ctx->opcode & 0xff)
407 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
408 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
409 (ctx->opcode & 0xfff))
410 #define B11_8 ((ctx->opcode >> 8) & 0xf)
411 #define B15_12 ((ctx->opcode >> 12) & 0xf)
413 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
414 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
416 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
417 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
419 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
420 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
421 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
422 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
424 #define CHECK_NOT_DELAY_SLOT \
425 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
427 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
428 gen_helper_raise_slot_illegal_instruction(cpu_env); \
429 ctx->bstate = BS_BRANCH; \
430 return; \
433 #define CHECK_PRIVILEGED \
434 if (IS_USER(ctx)) { \
435 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
436 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
437 gen_helper_raise_slot_illegal_instruction(cpu_env); \
438 } else { \
439 gen_helper_raise_illegal_instruction(cpu_env); \
441 ctx->bstate = BS_BRANCH; \
442 return; \
445 #define CHECK_FPU_ENABLED \
446 if (ctx->flags & SR_FD) { \
447 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
448 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
449 gen_helper_raise_slot_fpu_disable(cpu_env); \
450 } else { \
451 gen_helper_raise_fpu_disable(cpu_env); \
453 ctx->bstate = BS_BRANCH; \
454 return; \
457 static void _decode_opc(DisasContext * ctx)
459 /* This code tries to make movcal emulation sufficiently
460 accurate for Linux purposes. This instruction writes
461 memory, and prior to that, always allocates a cache line.
462 It is used in two contexts:
463 - in memcpy, where data is copied in blocks, the first write
464 of to a block uses movca.l for performance.
465 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
466 to flush the cache. Here, the data written by movcal.l is never
467 written to memory, and the data written is just bogus.
469 To simulate this, we simulate movcal.l, we store the value to memory,
470 but we also remember the previous content. If we see ocbi, we check
471 if movcal.l for that address was done previously. If so, the write should
472 not have hit the memory, so we restore the previous content.
473 When we see an instruction that is neither movca.l
474 nor ocbi, the previous content is discarded.
476 To optimize, we only try to flush stores when we're at the start of
477 TB, or if we already saw movca.l in this TB and did not flush stores
478 yet. */
479 if (ctx->has_movcal)
481 int opcode = ctx->opcode & 0xf0ff;
482 if (opcode != 0x0093 /* ocbi */
483 && opcode != 0x00c3 /* movca.l */)
485 gen_helper_discard_movcal_backup(cpu_env);
486 ctx->has_movcal = 0;
490 #if 0
491 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
492 #endif
494 switch (ctx->opcode) {
495 case 0x0019: /* div0u */
496 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
497 return;
498 case 0x000b: /* rts */
499 CHECK_NOT_DELAY_SLOT
500 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
501 ctx->flags |= DELAY_SLOT;
502 ctx->delayed_pc = (uint32_t) - 1;
503 return;
504 case 0x0028: /* clrmac */
505 tcg_gen_movi_i32(cpu_mach, 0);
506 tcg_gen_movi_i32(cpu_macl, 0);
507 return;
508 case 0x0048: /* clrs */
509 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
510 return;
511 case 0x0008: /* clrt */
512 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
513 return;
514 case 0x0038: /* ldtlb */
515 CHECK_PRIVILEGED
516 gen_helper_ldtlb(cpu_env);
517 return;
518 case 0x002b: /* rte */
519 CHECK_PRIVILEGED
520 CHECK_NOT_DELAY_SLOT
521 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
522 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
523 ctx->flags |= DELAY_SLOT;
524 ctx->delayed_pc = (uint32_t) - 1;
525 return;
526 case 0x0058: /* sets */
527 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
528 return;
529 case 0x0018: /* sett */
530 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
531 return;
532 case 0xfbfd: /* frchg */
533 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
534 ctx->bstate = BS_STOP;
535 return;
536 case 0xf3fd: /* fschg */
537 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
538 ctx->bstate = BS_STOP;
539 return;
540 case 0x0009: /* nop */
541 return;
542 case 0x001b: /* sleep */
543 CHECK_PRIVILEGED
544 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
545 gen_helper_sleep(cpu_env);
546 return;
549 switch (ctx->opcode & 0xf000) {
550 case 0x1000: /* mov.l Rm,@(disp,Rn) */
552 TCGv addr = tcg_temp_new();
553 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
554 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
555 tcg_temp_free(addr);
557 return;
558 case 0x5000: /* mov.l @(disp,Rm),Rn */
560 TCGv addr = tcg_temp_new();
561 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
562 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
563 tcg_temp_free(addr);
565 return;
566 case 0xe000: /* mov #imm,Rn */
567 tcg_gen_movi_i32(REG(B11_8), B7_0s);
568 return;
569 case 0x9000: /* mov.w @(disp,PC),Rn */
571 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
572 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
573 tcg_temp_free(addr);
575 return;
576 case 0xd000: /* mov.l @(disp,PC),Rn */
578 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
579 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
580 tcg_temp_free(addr);
582 return;
583 case 0x7000: /* add #imm,Rn */
584 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
585 return;
586 case 0xa000: /* bra disp */
587 CHECK_NOT_DELAY_SLOT
588 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
589 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
590 ctx->flags |= DELAY_SLOT;
591 return;
592 case 0xb000: /* bsr disp */
593 CHECK_NOT_DELAY_SLOT
594 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
595 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
596 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
597 ctx->flags |= DELAY_SLOT;
598 return;
601 switch (ctx->opcode & 0xf00f) {
602 case 0x6003: /* mov Rm,Rn */
603 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
604 return;
605 case 0x2000: /* mov.b Rm,@Rn */
606 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
607 return;
608 case 0x2001: /* mov.w Rm,@Rn */
609 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
610 return;
611 case 0x2002: /* mov.l Rm,@Rn */
612 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
613 return;
614 case 0x6000: /* mov.b @Rm,Rn */
615 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
616 return;
617 case 0x6001: /* mov.w @Rm,Rn */
618 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
619 return;
620 case 0x6002: /* mov.l @Rm,Rn */
621 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
622 return;
623 case 0x2004: /* mov.b Rm,@-Rn */
625 TCGv addr = tcg_temp_new();
626 tcg_gen_subi_i32(addr, REG(B11_8), 1);
627 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
628 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
629 tcg_temp_free(addr);
631 return;
632 case 0x2005: /* mov.w Rm,@-Rn */
634 TCGv addr = tcg_temp_new();
635 tcg_gen_subi_i32(addr, REG(B11_8), 2);
636 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
637 tcg_gen_mov_i32(REG(B11_8), addr);
638 tcg_temp_free(addr);
640 return;
641 case 0x2006: /* mov.l Rm,@-Rn */
643 TCGv addr = tcg_temp_new();
644 tcg_gen_subi_i32(addr, REG(B11_8), 4);
645 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
646 tcg_gen_mov_i32(REG(B11_8), addr);
648 return;
649 case 0x6004: /* mov.b @Rm+,Rn */
650 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
651 if ( B11_8 != B7_4 )
652 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
653 return;
654 case 0x6005: /* mov.w @Rm+,Rn */
655 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
656 if ( B11_8 != B7_4 )
657 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
658 return;
659 case 0x6006: /* mov.l @Rm+,Rn */
660 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
661 if ( B11_8 != B7_4 )
662 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
663 return;
664 case 0x0004: /* mov.b Rm,@(R0,Rn) */
666 TCGv addr = tcg_temp_new();
667 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
668 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
669 tcg_temp_free(addr);
671 return;
672 case 0x0005: /* mov.w Rm,@(R0,Rn) */
674 TCGv addr = tcg_temp_new();
675 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
676 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
677 tcg_temp_free(addr);
679 return;
680 case 0x0006: /* mov.l Rm,@(R0,Rn) */
682 TCGv addr = tcg_temp_new();
683 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
684 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
685 tcg_temp_free(addr);
687 return;
688 case 0x000c: /* mov.b @(R0,Rm),Rn */
690 TCGv addr = tcg_temp_new();
691 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
692 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
693 tcg_temp_free(addr);
695 return;
696 case 0x000d: /* mov.w @(R0,Rm),Rn */
698 TCGv addr = tcg_temp_new();
699 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
700 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
701 tcg_temp_free(addr);
703 return;
704 case 0x000e: /* mov.l @(R0,Rm),Rn */
706 TCGv addr = tcg_temp_new();
707 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
708 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
709 tcg_temp_free(addr);
711 return;
712 case 0x6008: /* swap.b Rm,Rn */
714 TCGv high, low;
715 high = tcg_temp_new();
716 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
717 low = tcg_temp_new();
718 tcg_gen_ext16u_i32(low, REG(B7_4));
719 tcg_gen_bswap16_i32(low, low);
720 tcg_gen_or_i32(REG(B11_8), high, low);
721 tcg_temp_free(low);
722 tcg_temp_free(high);
724 return;
725 case 0x6009: /* swap.w Rm,Rn */
726 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
727 return;
728 case 0x200d: /* xtrct Rm,Rn */
730 TCGv high, low;
731 high = tcg_temp_new();
732 tcg_gen_shli_i32(high, REG(B7_4), 16);
733 low = tcg_temp_new();
734 tcg_gen_shri_i32(low, REG(B11_8), 16);
735 tcg_gen_or_i32(REG(B11_8), high, low);
736 tcg_temp_free(low);
737 tcg_temp_free(high);
739 return;
740 case 0x300c: /* add Rm,Rn */
741 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
742 return;
743 case 0x300e: /* addc Rm,Rn */
745 TCGv t0, t1, t2;
746 t0 = tcg_temp_new();
747 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
748 t1 = tcg_temp_new();
749 tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
750 tcg_gen_add_i32(t0, t0, t1);
751 t2 = tcg_temp_new();
752 tcg_gen_setcond_i32(TCG_COND_GTU, t2, REG(B11_8), t1);
753 tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
754 tcg_gen_or_i32(t1, t1, t2);
755 tcg_temp_free(t2);
756 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
757 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
758 tcg_temp_free(t1);
759 tcg_gen_mov_i32(REG(B11_8), t0);
760 tcg_temp_free(t0);
762 return;
763 case 0x300f: /* addv Rm,Rn */
765 TCGv t0, t1, t2;
766 t0 = tcg_temp_new();
767 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
768 t1 = tcg_temp_new();
769 tcg_gen_xor_i32(t1, t0, REG(B11_8));
770 t2 = tcg_temp_new();
771 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
772 tcg_gen_andc_i32(t1, t1, t2);
773 tcg_temp_free(t2);
774 tcg_gen_shri_i32(t1, t1, 31);
775 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
776 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
777 tcg_temp_free(t1);
778 tcg_gen_mov_i32(REG(B7_4), t0);
779 tcg_temp_free(t0);
781 return;
782 case 0x2009: /* and Rm,Rn */
783 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
784 return;
785 case 0x3000: /* cmp/eq Rm,Rn */
786 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
787 return;
788 case 0x3003: /* cmp/ge Rm,Rn */
789 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
790 return;
791 case 0x3007: /* cmp/gt Rm,Rn */
792 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
793 return;
794 case 0x3006: /* cmp/hi Rm,Rn */
795 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
796 return;
797 case 0x3002: /* cmp/hs Rm,Rn */
798 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
799 return;
800 case 0x200c: /* cmp/str Rm,Rn */
802 TCGv cmp1 = tcg_temp_new();
803 TCGv cmp2 = tcg_temp_new();
804 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
805 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
806 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
807 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
808 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
809 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
810 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
811 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
812 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
813 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
814 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
815 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
816 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
817 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
818 tcg_temp_free(cmp2);
819 tcg_temp_free(cmp1);
821 return;
822 case 0x2007: /* div0s Rm,Rn */
824 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
825 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
826 TCGv val = tcg_temp_new();
827 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
828 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
829 tcg_temp_free(val);
831 return;
832 case 0x3004: /* div1 Rm,Rn */
833 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
834 return;
835 case 0x300d: /* dmuls.l Rm,Rn */
836 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
837 return;
838 case 0x3005: /* dmulu.l Rm,Rn */
839 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
840 return;
841 case 0x600e: /* exts.b Rm,Rn */
842 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
843 return;
844 case 0x600f: /* exts.w Rm,Rn */
845 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
846 return;
847 case 0x600c: /* extu.b Rm,Rn */
848 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
849 return;
850 case 0x600d: /* extu.w Rm,Rn */
851 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
852 return;
853 case 0x000f: /* mac.l @Rm+,@Rn+ */
855 TCGv arg0, arg1;
856 arg0 = tcg_temp_new();
857 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
858 arg1 = tcg_temp_new();
859 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
860 gen_helper_macl(cpu_env, arg0, arg1);
861 tcg_temp_free(arg1);
862 tcg_temp_free(arg0);
863 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
864 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
866 return;
867 case 0x400f: /* mac.w @Rm+,@Rn+ */
869 TCGv arg0, arg1;
870 arg0 = tcg_temp_new();
871 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
872 arg1 = tcg_temp_new();
873 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
874 gen_helper_macw(cpu_env, arg0, arg1);
875 tcg_temp_free(arg1);
876 tcg_temp_free(arg0);
877 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
878 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
880 return;
881 case 0x0007: /* mul.l Rm,Rn */
882 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
883 return;
884 case 0x200f: /* muls.w Rm,Rn */
886 TCGv arg0, arg1;
887 arg0 = tcg_temp_new();
888 tcg_gen_ext16s_i32(arg0, REG(B7_4));
889 arg1 = tcg_temp_new();
890 tcg_gen_ext16s_i32(arg1, REG(B11_8));
891 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
892 tcg_temp_free(arg1);
893 tcg_temp_free(arg0);
895 return;
896 case 0x200e: /* mulu.w Rm,Rn */
898 TCGv arg0, arg1;
899 arg0 = tcg_temp_new();
900 tcg_gen_ext16u_i32(arg0, REG(B7_4));
901 arg1 = tcg_temp_new();
902 tcg_gen_ext16u_i32(arg1, REG(B11_8));
903 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
904 tcg_temp_free(arg1);
905 tcg_temp_free(arg0);
907 return;
908 case 0x600b: /* neg Rm,Rn */
909 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
910 return;
911 case 0x600a: /* negc Rm,Rn */
913 TCGv t0, t1;
914 t0 = tcg_temp_new();
915 tcg_gen_neg_i32(t0, REG(B7_4));
916 t1 = tcg_temp_new();
917 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
918 tcg_gen_sub_i32(REG(B11_8), t0, t1);
919 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
920 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
921 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
922 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
923 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
924 tcg_temp_free(t0);
925 tcg_temp_free(t1);
927 return;
928 case 0x6007: /* not Rm,Rn */
929 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
930 return;
931 case 0x200b: /* or Rm,Rn */
932 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
933 return;
934 case 0x400c: /* shad Rm,Rn */
936 int label1 = gen_new_label();
937 int label2 = gen_new_label();
938 int label3 = gen_new_label();
939 int label4 = gen_new_label();
940 TCGv shift;
941 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
942 /* Rm positive, shift to the left */
943 shift = tcg_temp_new();
944 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
945 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
946 tcg_temp_free(shift);
947 tcg_gen_br(label4);
948 /* Rm negative, shift to the right */
949 gen_set_label(label1);
950 shift = tcg_temp_new();
951 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
952 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
953 tcg_gen_not_i32(shift, REG(B7_4));
954 tcg_gen_andi_i32(shift, shift, 0x1f);
955 tcg_gen_addi_i32(shift, shift, 1);
956 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
957 tcg_temp_free(shift);
958 tcg_gen_br(label4);
959 /* Rm = -32 */
960 gen_set_label(label2);
961 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
962 tcg_gen_movi_i32(REG(B11_8), 0);
963 tcg_gen_br(label4);
964 gen_set_label(label3);
965 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
966 gen_set_label(label4);
968 return;
969 case 0x400d: /* shld Rm,Rn */
971 int label1 = gen_new_label();
972 int label2 = gen_new_label();
973 int label3 = gen_new_label();
974 TCGv shift;
975 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
976 /* Rm positive, shift to the left */
977 shift = tcg_temp_new();
978 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
979 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
980 tcg_temp_free(shift);
981 tcg_gen_br(label3);
982 /* Rm negative, shift to the right */
983 gen_set_label(label1);
984 shift = tcg_temp_new();
985 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
986 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
987 tcg_gen_not_i32(shift, REG(B7_4));
988 tcg_gen_andi_i32(shift, shift, 0x1f);
989 tcg_gen_addi_i32(shift, shift, 1);
990 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
991 tcg_temp_free(shift);
992 tcg_gen_br(label3);
993 /* Rm = -32 */
994 gen_set_label(label2);
995 tcg_gen_movi_i32(REG(B11_8), 0);
996 gen_set_label(label3);
998 return;
999 case 0x3008: /* sub Rm,Rn */
1000 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1001 return;
1002 case 0x300a: /* subc Rm,Rn */
1004 TCGv t0, t1, t2;
1005 t0 = tcg_temp_new();
1006 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
1007 t1 = tcg_temp_new();
1008 tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
1009 tcg_gen_sub_i32(t0, t1, t0);
1010 t2 = tcg_temp_new();
1011 tcg_gen_setcond_i32(TCG_COND_LTU, t2, REG(B11_8), t1);
1012 tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
1013 tcg_gen_or_i32(t1, t1, t2);
1014 tcg_temp_free(t2);
1015 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1016 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
1017 tcg_temp_free(t1);
1018 tcg_gen_mov_i32(REG(B11_8), t0);
1019 tcg_temp_free(t0);
1021 return;
1022 case 0x300b: /* subv Rm,Rn */
1024 TCGv t0, t1, t2;
1025 t0 = tcg_temp_new();
1026 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
1027 t1 = tcg_temp_new();
1028 tcg_gen_xor_i32(t1, t0, REG(B7_4));
1029 t2 = tcg_temp_new();
1030 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
1031 tcg_gen_and_i32(t1, t1, t2);
1032 tcg_temp_free(t2);
1033 tcg_gen_shri_i32(t1, t1, 31);
1034 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1035 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
1036 tcg_temp_free(t1);
1037 tcg_gen_mov_i32(REG(B11_8), t0);
1038 tcg_temp_free(t0);
1040 return;
1041 case 0x2008: /* tst Rm,Rn */
1043 TCGv val = tcg_temp_new();
1044 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1045 gen_cmp_imm(TCG_COND_EQ, val, 0);
1046 tcg_temp_free(val);
1048 return;
1049 case 0x200a: /* xor Rm,Rn */
1050 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1051 return;
1052 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1053 CHECK_FPU_ENABLED
1054 if (ctx->flags & FPSCR_SZ) {
1055 TCGv_i64 fp = tcg_temp_new_i64();
1056 gen_load_fpr64(fp, XREG(B7_4));
1057 gen_store_fpr64(fp, XREG(B11_8));
1058 tcg_temp_free_i64(fp);
1059 } else {
1060 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1062 return;
1063 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1064 CHECK_FPU_ENABLED
1065 if (ctx->flags & FPSCR_SZ) {
1066 TCGv addr_hi = tcg_temp_new();
1067 int fr = XREG(B7_4);
1068 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1069 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1070 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1071 tcg_temp_free(addr_hi);
1072 } else {
1073 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1075 return;
1076 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1077 CHECK_FPU_ENABLED
1078 if (ctx->flags & FPSCR_SZ) {
1079 TCGv addr_hi = tcg_temp_new();
1080 int fr = XREG(B11_8);
1081 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1082 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1083 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1084 tcg_temp_free(addr_hi);
1085 } else {
1086 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1088 return;
1089 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1090 CHECK_FPU_ENABLED
1091 if (ctx->flags & FPSCR_SZ) {
1092 TCGv addr_hi = tcg_temp_new();
1093 int fr = XREG(B11_8);
1094 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1095 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1096 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1097 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1098 tcg_temp_free(addr_hi);
1099 } else {
1100 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1101 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1103 return;
1104 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1105 CHECK_FPU_ENABLED
1106 if (ctx->flags & FPSCR_SZ) {
1107 TCGv addr = tcg_temp_new_i32();
1108 int fr = XREG(B7_4);
1109 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1110 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1111 tcg_gen_subi_i32(addr, addr, 4);
1112 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1113 tcg_gen_mov_i32(REG(B11_8), addr);
1114 tcg_temp_free(addr);
1115 } else {
1116 TCGv addr;
1117 addr = tcg_temp_new_i32();
1118 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1119 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1120 tcg_gen_mov_i32(REG(B11_8), addr);
1121 tcg_temp_free(addr);
1123 return;
1124 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1125 CHECK_FPU_ENABLED
1127 TCGv addr = tcg_temp_new_i32();
1128 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1129 if (ctx->flags & FPSCR_SZ) {
1130 int fr = XREG(B11_8);
1131 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1132 tcg_gen_addi_i32(addr, addr, 4);
1133 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1134 } else {
1135 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1137 tcg_temp_free(addr);
1139 return;
1140 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1141 CHECK_FPU_ENABLED
1143 TCGv addr = tcg_temp_new();
1144 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1145 if (ctx->flags & FPSCR_SZ) {
1146 int fr = XREG(B7_4);
1147 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1148 tcg_gen_addi_i32(addr, addr, 4);
1149 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1150 } else {
1151 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1153 tcg_temp_free(addr);
1155 return;
1156 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1157 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1158 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1159 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1160 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1161 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1163 CHECK_FPU_ENABLED
1164 if (ctx->flags & FPSCR_PR) {
1165 TCGv_i64 fp0, fp1;
1167 if (ctx->opcode & 0x0110)
1168 break; /* illegal instruction */
1169 fp0 = tcg_temp_new_i64();
1170 fp1 = tcg_temp_new_i64();
1171 gen_load_fpr64(fp0, DREG(B11_8));
1172 gen_load_fpr64(fp1, DREG(B7_4));
1173 switch (ctx->opcode & 0xf00f) {
1174 case 0xf000: /* fadd Rm,Rn */
1175 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1176 break;
1177 case 0xf001: /* fsub Rm,Rn */
1178 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1179 break;
1180 case 0xf002: /* fmul Rm,Rn */
1181 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1182 break;
1183 case 0xf003: /* fdiv Rm,Rn */
1184 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1185 break;
1186 case 0xf004: /* fcmp/eq Rm,Rn */
1187 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1188 return;
1189 case 0xf005: /* fcmp/gt Rm,Rn */
1190 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1191 return;
1193 gen_store_fpr64(fp0, DREG(B11_8));
1194 tcg_temp_free_i64(fp0);
1195 tcg_temp_free_i64(fp1);
1196 } else {
1197 switch (ctx->opcode & 0xf00f) {
1198 case 0xf000: /* fadd Rm,Rn */
1199 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1200 cpu_fregs[FREG(B11_8)],
1201 cpu_fregs[FREG(B7_4)]);
1202 break;
1203 case 0xf001: /* fsub Rm,Rn */
1204 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1205 cpu_fregs[FREG(B11_8)],
1206 cpu_fregs[FREG(B7_4)]);
1207 break;
1208 case 0xf002: /* fmul Rm,Rn */
1209 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1210 cpu_fregs[FREG(B11_8)],
1211 cpu_fregs[FREG(B7_4)]);
1212 break;
1213 case 0xf003: /* fdiv Rm,Rn */
1214 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1215 cpu_fregs[FREG(B11_8)],
1216 cpu_fregs[FREG(B7_4)]);
1217 break;
1218 case 0xf004: /* fcmp/eq Rm,Rn */
1219 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1220 cpu_fregs[FREG(B7_4)]);
1221 return;
1222 case 0xf005: /* fcmp/gt Rm,Rn */
1223 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1224 cpu_fregs[FREG(B7_4)]);
1225 return;
1229 return;
1230 case 0xf00e: /* fmac FR0,RM,Rn */
1232 CHECK_FPU_ENABLED
1233 if (ctx->flags & FPSCR_PR) {
1234 break; /* illegal instruction */
1235 } else {
1236 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1237 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1238 cpu_fregs[FREG(B11_8)]);
1239 return;
1244 switch (ctx->opcode & 0xff00) {
1245 case 0xc900: /* and #imm,R0 */
1246 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1247 return;
1248 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1250 TCGv addr, val;
1251 addr = tcg_temp_new();
1252 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1253 val = tcg_temp_new();
1254 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1255 tcg_gen_andi_i32(val, val, B7_0);
1256 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1257 tcg_temp_free(val);
1258 tcg_temp_free(addr);
1260 return;
1261 case 0x8b00: /* bf label */
1262 CHECK_NOT_DELAY_SLOT
1263 gen_conditional_jump(ctx, ctx->pc + 2,
1264 ctx->pc + 4 + B7_0s * 2);
1265 ctx->bstate = BS_BRANCH;
1266 return;
1267 case 0x8f00: /* bf/s label */
1268 CHECK_NOT_DELAY_SLOT
1269 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1270 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1271 return;
1272 case 0x8900: /* bt label */
1273 CHECK_NOT_DELAY_SLOT
1274 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1275 ctx->pc + 2);
1276 ctx->bstate = BS_BRANCH;
1277 return;
1278 case 0x8d00: /* bt/s label */
1279 CHECK_NOT_DELAY_SLOT
1280 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1281 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1282 return;
1283 case 0x8800: /* cmp/eq #imm,R0 */
1284 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1285 return;
1286 case 0xc400: /* mov.b @(disp,GBR),R0 */
1288 TCGv addr = tcg_temp_new();
1289 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1290 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1291 tcg_temp_free(addr);
1293 return;
1294 case 0xc500: /* mov.w @(disp,GBR),R0 */
1296 TCGv addr = tcg_temp_new();
1297 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1298 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1299 tcg_temp_free(addr);
1301 return;
1302 case 0xc600: /* mov.l @(disp,GBR),R0 */
1304 TCGv addr = tcg_temp_new();
1305 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1306 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1307 tcg_temp_free(addr);
1309 return;
1310 case 0xc000: /* mov.b R0,@(disp,GBR) */
1312 TCGv addr = tcg_temp_new();
1313 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1314 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1315 tcg_temp_free(addr);
1317 return;
1318 case 0xc100: /* mov.w R0,@(disp,GBR) */
1320 TCGv addr = tcg_temp_new();
1321 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1322 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1323 tcg_temp_free(addr);
1325 return;
1326 case 0xc200: /* mov.l R0,@(disp,GBR) */
1328 TCGv addr = tcg_temp_new();
1329 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1330 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1331 tcg_temp_free(addr);
1333 return;
1334 case 0x8000: /* mov.b R0,@(disp,Rn) */
1336 TCGv addr = tcg_temp_new();
1337 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1338 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1339 tcg_temp_free(addr);
1341 return;
1342 case 0x8100: /* mov.w R0,@(disp,Rn) */
1344 TCGv addr = tcg_temp_new();
1345 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1346 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1347 tcg_temp_free(addr);
1349 return;
1350 case 0x8400: /* mov.b @(disp,Rn),R0 */
1352 TCGv addr = tcg_temp_new();
1353 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1354 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1355 tcg_temp_free(addr);
1357 return;
1358 case 0x8500: /* mov.w @(disp,Rn),R0 */
1360 TCGv addr = tcg_temp_new();
1361 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1362 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1363 tcg_temp_free(addr);
1365 return;
1366 case 0xc700: /* mova @(disp,PC),R0 */
1367 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1368 return;
1369 case 0xcb00: /* or #imm,R0 */
1370 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1371 return;
1372 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1374 TCGv addr, val;
1375 addr = tcg_temp_new();
1376 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1377 val = tcg_temp_new();
1378 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1379 tcg_gen_ori_i32(val, val, B7_0);
1380 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1381 tcg_temp_free(val);
1382 tcg_temp_free(addr);
1384 return;
1385 case 0xc300: /* trapa #imm */
1387 TCGv imm;
1388 CHECK_NOT_DELAY_SLOT
1389 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1390 imm = tcg_const_i32(B7_0);
1391 gen_helper_trapa(cpu_env, imm);
1392 tcg_temp_free(imm);
1393 ctx->bstate = BS_BRANCH;
1395 return;
1396 case 0xc800: /* tst #imm,R0 */
1398 TCGv val = tcg_temp_new();
1399 tcg_gen_andi_i32(val, REG(0), B7_0);
1400 gen_cmp_imm(TCG_COND_EQ, val, 0);
1401 tcg_temp_free(val);
1403 return;
1404 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1406 TCGv val = tcg_temp_new();
1407 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1408 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1409 tcg_gen_andi_i32(val, val, B7_0);
1410 gen_cmp_imm(TCG_COND_EQ, val, 0);
1411 tcg_temp_free(val);
1413 return;
1414 case 0xca00: /* xor #imm,R0 */
1415 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1416 return;
1417 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1419 TCGv addr, val;
1420 addr = tcg_temp_new();
1421 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1422 val = tcg_temp_new();
1423 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1424 tcg_gen_xori_i32(val, val, B7_0);
1425 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1426 tcg_temp_free(val);
1427 tcg_temp_free(addr);
1429 return;
1432 switch (ctx->opcode & 0xf08f) {
1433 case 0x408e: /* ldc Rm,Rn_BANK */
1434 CHECK_PRIVILEGED
1435 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1436 return;
1437 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1438 CHECK_PRIVILEGED
1439 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1440 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1441 return;
1442 case 0x0082: /* stc Rm_BANK,Rn */
1443 CHECK_PRIVILEGED
1444 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1445 return;
1446 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1447 CHECK_PRIVILEGED
1449 TCGv addr = tcg_temp_new();
1450 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1451 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1452 tcg_gen_mov_i32(REG(B11_8), addr);
1453 tcg_temp_free(addr);
1455 return;
1458 switch (ctx->opcode & 0xf0ff) {
1459 case 0x0023: /* braf Rn */
1460 CHECK_NOT_DELAY_SLOT
1461 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1462 ctx->flags |= DELAY_SLOT;
1463 ctx->delayed_pc = (uint32_t) - 1;
1464 return;
1465 case 0x0003: /* bsrf Rn */
1466 CHECK_NOT_DELAY_SLOT
1467 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1468 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1469 ctx->flags |= DELAY_SLOT;
1470 ctx->delayed_pc = (uint32_t) - 1;
1471 return;
1472 case 0x4015: /* cmp/pl Rn */
1473 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1474 return;
1475 case 0x4011: /* cmp/pz Rn */
1476 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1477 return;
1478 case 0x4010: /* dt Rn */
1479 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1480 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1481 return;
1482 case 0x402b: /* jmp @Rn */
1483 CHECK_NOT_DELAY_SLOT
1484 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1485 ctx->flags |= DELAY_SLOT;
1486 ctx->delayed_pc = (uint32_t) - 1;
1487 return;
1488 case 0x400b: /* jsr @Rn */
1489 CHECK_NOT_DELAY_SLOT
1490 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1491 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1492 ctx->flags |= DELAY_SLOT;
1493 ctx->delayed_pc = (uint32_t) - 1;
1494 return;
1495 case 0x400e: /* ldc Rm,SR */
1496 CHECK_PRIVILEGED
1497 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1498 ctx->bstate = BS_STOP;
1499 return;
1500 case 0x4007: /* ldc.l @Rm+,SR */
1501 CHECK_PRIVILEGED
1503 TCGv val = tcg_temp_new();
1504 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1505 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1506 tcg_temp_free(val);
1507 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1508 ctx->bstate = BS_STOP;
1510 return;
1511 case 0x0002: /* stc SR,Rn */
1512 CHECK_PRIVILEGED
1513 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1514 return;
1515 case 0x4003: /* stc SR,@-Rn */
1516 CHECK_PRIVILEGED
1518 TCGv addr = tcg_temp_new();
1519 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1520 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1521 tcg_gen_mov_i32(REG(B11_8), addr);
1522 tcg_temp_free(addr);
1524 return;
1525 #define LD(reg,ldnum,ldpnum,prechk) \
1526 case ldnum: \
1527 prechk \
1528 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1529 return; \
1530 case ldpnum: \
1531 prechk \
1532 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1533 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1534 return;
1535 #define ST(reg,stnum,stpnum,prechk) \
1536 case stnum: \
1537 prechk \
1538 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1539 return; \
1540 case stpnum: \
1541 prechk \
1543 TCGv addr = tcg_temp_new(); \
1544 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1545 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1546 tcg_gen_mov_i32(REG(B11_8), addr); \
1547 tcg_temp_free(addr); \
1549 return;
1550 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1551 LD(reg,ldnum,ldpnum,prechk) \
1552 ST(reg,stnum,stpnum,prechk)
1553 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1554 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1555 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1556 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1557 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1558 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1559 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1560 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1561 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1562 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1563 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1564 case 0x406a: /* lds Rm,FPSCR */
1565 CHECK_FPU_ENABLED
1566 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1567 ctx->bstate = BS_STOP;
1568 return;
1569 case 0x4066: /* lds.l @Rm+,FPSCR */
1570 CHECK_FPU_ENABLED
1572 TCGv addr = tcg_temp_new();
1573 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1574 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1575 gen_helper_ld_fpscr(cpu_env, addr);
1576 tcg_temp_free(addr);
1577 ctx->bstate = BS_STOP;
1579 return;
1580 case 0x006a: /* sts FPSCR,Rn */
1581 CHECK_FPU_ENABLED
1582 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1583 return;
1584 case 0x4062: /* sts FPSCR,@-Rn */
1585 CHECK_FPU_ENABLED
1587 TCGv addr, val;
1588 val = tcg_temp_new();
1589 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1590 addr = tcg_temp_new();
1591 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1592 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1593 tcg_gen_mov_i32(REG(B11_8), addr);
1594 tcg_temp_free(addr);
1595 tcg_temp_free(val);
1597 return;
1598 case 0x00c3: /* movca.l R0,@Rm */
1600 TCGv val = tcg_temp_new();
1601 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1602 gen_helper_movcal(cpu_env, REG(B11_8), val);
1603 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1605 ctx->has_movcal = 1;
1606 return;
1607 case 0x40a9:
1608 /* MOVUA.L @Rm,R0 (Rm) -> R0
1609 Load non-boundary-aligned data */
1610 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1611 return;
1612 case 0x40e9:
1613 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1614 Load non-boundary-aligned data */
1615 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1616 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1617 return;
1618 case 0x0029: /* movt Rn */
1619 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1620 return;
1621 case 0x0073:
1622 /* MOVCO.L
1623 LDST -> T
1624 If (T == 1) R0 -> (Rn)
1625 0 -> LDST
1627 if (ctx->features & SH_FEATURE_SH4A) {
1628 int label = gen_new_label();
1629 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1630 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1631 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1632 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1633 gen_set_label(label);
1634 tcg_gen_movi_i32(cpu_ldst, 0);
1635 return;
1636 } else
1637 break;
1638 case 0x0063:
1639 /* MOVLI.L @Rm,R0
1640 1 -> LDST
1641 (Rm) -> R0
1642 When interrupt/exception
1643 occurred 0 -> LDST
1645 if (ctx->features & SH_FEATURE_SH4A) {
1646 tcg_gen_movi_i32(cpu_ldst, 0);
1647 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1648 tcg_gen_movi_i32(cpu_ldst, 1);
1649 return;
1650 } else
1651 break;
1652 case 0x0093: /* ocbi @Rn */
1654 gen_helper_ocbi(cpu_env, REG(B11_8));
1656 return;
1657 case 0x00a3: /* ocbp @Rn */
1658 case 0x00b3: /* ocbwb @Rn */
1659 /* These instructions are supposed to do nothing in case of
1660 a cache miss. Given that we only partially emulate caches
1661 it is safe to simply ignore them. */
1662 return;
1663 case 0x0083: /* pref @Rn */
1664 return;
1665 case 0x00d3: /* prefi @Rn */
1666 if (ctx->features & SH_FEATURE_SH4A)
1667 return;
1668 else
1669 break;
1670 case 0x00e3: /* icbi @Rn */
1671 if (ctx->features & SH_FEATURE_SH4A)
1672 return;
1673 else
1674 break;
1675 case 0x00ab: /* synco */
1676 if (ctx->features & SH_FEATURE_SH4A)
1677 return;
1678 else
1679 break;
1680 case 0x4024: /* rotcl Rn */
1682 TCGv tmp = tcg_temp_new();
1683 tcg_gen_mov_i32(tmp, cpu_sr);
1684 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1685 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1686 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1687 tcg_temp_free(tmp);
1689 return;
1690 case 0x4025: /* rotcr Rn */
1692 TCGv tmp = tcg_temp_new();
1693 tcg_gen_mov_i32(tmp, cpu_sr);
1694 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1695 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1696 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1697 tcg_temp_free(tmp);
1699 return;
1700 case 0x4004: /* rotl Rn */
1701 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1702 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1703 return;
1704 case 0x4005: /* rotr Rn */
1705 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1706 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1707 return;
1708 case 0x4000: /* shll Rn */
1709 case 0x4020: /* shal Rn */
1710 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1711 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1712 return;
1713 case 0x4021: /* shar Rn */
1714 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1715 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1716 return;
1717 case 0x4001: /* shlr Rn */
1718 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1719 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1720 return;
1721 case 0x4008: /* shll2 Rn */
1722 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1723 return;
1724 case 0x4018: /* shll8 Rn */
1725 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1726 return;
1727 case 0x4028: /* shll16 Rn */
1728 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1729 return;
1730 case 0x4009: /* shlr2 Rn */
1731 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1732 return;
1733 case 0x4019: /* shlr8 Rn */
1734 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1735 return;
1736 case 0x4029: /* shlr16 Rn */
1737 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1738 return;
1739 case 0x401b: /* tas.b @Rn */
1741 TCGv addr, val;
1742 addr = tcg_temp_local_new();
1743 tcg_gen_mov_i32(addr, REG(B11_8));
1744 val = tcg_temp_local_new();
1745 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1746 gen_cmp_imm(TCG_COND_EQ, val, 0);
1747 tcg_gen_ori_i32(val, val, 0x80);
1748 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1749 tcg_temp_free(val);
1750 tcg_temp_free(addr);
1752 return;
1753 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1754 CHECK_FPU_ENABLED
1755 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1756 return;
1757 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1758 CHECK_FPU_ENABLED
1759 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1760 return;
1761 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1762 CHECK_FPU_ENABLED
1763 if (ctx->flags & FPSCR_PR) {
1764 TCGv_i64 fp;
1765 if (ctx->opcode & 0x0100)
1766 break; /* illegal instruction */
1767 fp = tcg_temp_new_i64();
1768 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1769 gen_store_fpr64(fp, DREG(B11_8));
1770 tcg_temp_free_i64(fp);
1772 else {
1773 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1775 return;
1776 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1777 CHECK_FPU_ENABLED
1778 if (ctx->flags & FPSCR_PR) {
1779 TCGv_i64 fp;
1780 if (ctx->opcode & 0x0100)
1781 break; /* illegal instruction */
1782 fp = tcg_temp_new_i64();
1783 gen_load_fpr64(fp, DREG(B11_8));
1784 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1785 tcg_temp_free_i64(fp);
1787 else {
1788 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1790 return;
1791 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1792 CHECK_FPU_ENABLED
1794 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1796 return;
1797 case 0xf05d: /* fabs FRn/DRn */
1798 CHECK_FPU_ENABLED
1799 if (ctx->flags & FPSCR_PR) {
1800 if (ctx->opcode & 0x0100)
1801 break; /* illegal instruction */
1802 TCGv_i64 fp = tcg_temp_new_i64();
1803 gen_load_fpr64(fp, DREG(B11_8));
1804 gen_helper_fabs_DT(fp, fp);
1805 gen_store_fpr64(fp, DREG(B11_8));
1806 tcg_temp_free_i64(fp);
1807 } else {
1808 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1810 return;
1811 case 0xf06d: /* fsqrt FRn */
1812 CHECK_FPU_ENABLED
1813 if (ctx->flags & FPSCR_PR) {
1814 if (ctx->opcode & 0x0100)
1815 break; /* illegal instruction */
1816 TCGv_i64 fp = tcg_temp_new_i64();
1817 gen_load_fpr64(fp, DREG(B11_8));
1818 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1819 gen_store_fpr64(fp, DREG(B11_8));
1820 tcg_temp_free_i64(fp);
1821 } else {
1822 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1823 cpu_fregs[FREG(B11_8)]);
1825 return;
1826 case 0xf07d: /* fsrra FRn */
1827 CHECK_FPU_ENABLED
1828 break;
1829 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1830 CHECK_FPU_ENABLED
1831 if (!(ctx->flags & FPSCR_PR)) {
1832 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1834 return;
1835 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1836 CHECK_FPU_ENABLED
1837 if (!(ctx->flags & FPSCR_PR)) {
1838 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1840 return;
1841 case 0xf0ad: /* fcnvsd FPUL,DRn */
1842 CHECK_FPU_ENABLED
1844 TCGv_i64 fp = tcg_temp_new_i64();
1845 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1846 gen_store_fpr64(fp, DREG(B11_8));
1847 tcg_temp_free_i64(fp);
1849 return;
1850 case 0xf0bd: /* fcnvds DRn,FPUL */
1851 CHECK_FPU_ENABLED
1853 TCGv_i64 fp = tcg_temp_new_i64();
1854 gen_load_fpr64(fp, DREG(B11_8));
1855 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1856 tcg_temp_free_i64(fp);
1858 return;
1859 case 0xf0ed: /* fipr FVm,FVn */
1860 CHECK_FPU_ENABLED
1861 if ((ctx->flags & FPSCR_PR) == 0) {
1862 TCGv m, n;
1863 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1864 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1865 gen_helper_fipr(cpu_env, m, n);
1866 tcg_temp_free(m);
1867 tcg_temp_free(n);
1868 return;
1870 break;
1871 case 0xf0fd: /* ftrv XMTRX,FVn */
1872 CHECK_FPU_ENABLED
1873 if ((ctx->opcode & 0x0300) == 0x0100 &&
1874 (ctx->flags & FPSCR_PR) == 0) {
1875 TCGv n;
1876 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1877 gen_helper_ftrv(cpu_env, n);
1878 tcg_temp_free(n);
1879 return;
1881 break;
1883 #if 0
1884 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1885 ctx->opcode, ctx->pc);
1886 fflush(stderr);
1887 #endif
1888 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1889 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1890 gen_helper_raise_slot_illegal_instruction(cpu_env);
1891 } else {
1892 gen_helper_raise_illegal_instruction(cpu_env);
1894 ctx->bstate = BS_BRANCH;
1897 static void decode_opc(DisasContext * ctx)
1899 uint32_t old_flags = ctx->flags;
1901 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1902 tcg_gen_debug_insn_start(ctx->pc);
1905 _decode_opc(ctx);
1907 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1908 if (ctx->flags & DELAY_SLOT_CLEARME) {
1909 gen_store_flags(0);
1910 } else {
1911 /* go out of the delay slot */
1912 uint32_t new_flags = ctx->flags;
1913 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1914 gen_store_flags(new_flags);
1916 ctx->flags = 0;
1917 ctx->bstate = BS_BRANCH;
1918 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1919 gen_delayed_conditional_jump(ctx);
1920 } else if (old_flags & DELAY_SLOT) {
1921 gen_jump(ctx);
1926 /* go into a delay slot */
1927 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1928 gen_store_flags(ctx->flags);
1931 static inline void
1932 gen_intermediate_code_internal(CPUSH4State * env, TranslationBlock * tb,
1933 int search_pc)
1935 DisasContext ctx;
1936 target_ulong pc_start;
1937 static uint16_t *gen_opc_end;
1938 CPUBreakpoint *bp;
1939 int i, ii;
1940 int num_insns;
1941 int max_insns;
1943 pc_start = tb->pc;
1944 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1945 ctx.pc = pc_start;
1946 ctx.flags = (uint32_t)tb->flags;
1947 ctx.bstate = BS_NONE;
1948 ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
1949 /* We don't know if the delayed pc came from a dynamic or static branch,
1950 so assume it is a dynamic branch. */
1951 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1952 ctx.tb = tb;
1953 ctx.singlestep_enabled = env->singlestep_enabled;
1954 ctx.features = env->features;
1955 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1957 ii = -1;
1958 num_insns = 0;
1959 max_insns = tb->cflags & CF_COUNT_MASK;
1960 if (max_insns == 0)
1961 max_insns = CF_COUNT_MASK;
1962 gen_icount_start();
1963 while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end) {
1964 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1965 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1966 if (ctx.pc == bp->pc) {
1967 /* We have hit a breakpoint - make sure PC is up-to-date */
1968 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1969 gen_helper_debug(cpu_env);
1970 ctx.bstate = BS_BRANCH;
1971 break;
1975 if (search_pc) {
1976 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1977 if (ii < i) {
1978 ii++;
1979 while (ii < i)
1980 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1982 tcg_ctx.gen_opc_pc[ii] = ctx.pc;
1983 gen_opc_hflags[ii] = ctx.flags;
1984 tcg_ctx.gen_opc_instr_start[ii] = 1;
1985 tcg_ctx.gen_opc_icount[ii] = num_insns;
1987 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1988 gen_io_start();
1989 #if 0
1990 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1991 fflush(stderr);
1992 #endif
1993 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1994 decode_opc(&ctx);
1995 num_insns++;
1996 ctx.pc += 2;
1997 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1998 break;
1999 if (env->singlestep_enabled)
2000 break;
2001 if (num_insns >= max_insns)
2002 break;
2003 if (singlestep)
2004 break;
2006 if (tb->cflags & CF_LAST_IO)
2007 gen_io_end();
2008 if (env->singlestep_enabled) {
2009 tcg_gen_movi_i32(cpu_pc, ctx.pc);
2010 gen_helper_debug(cpu_env);
2011 } else {
2012 switch (ctx.bstate) {
2013 case BS_STOP:
2014 /* gen_op_interrupt_restart(); */
2015 /* fall through */
2016 case BS_NONE:
2017 if (ctx.flags) {
2018 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
2020 gen_goto_tb(&ctx, 0, ctx.pc);
2021 break;
2022 case BS_EXCP:
2023 /* gen_op_interrupt_restart(); */
2024 tcg_gen_exit_tb(0);
2025 break;
2026 case BS_BRANCH:
2027 default:
2028 break;
2032 gen_icount_end(tb, num_insns);
2033 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2034 if (search_pc) {
2035 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2036 ii++;
2037 while (ii <= i)
2038 tcg_ctx.gen_opc_instr_start[ii++] = 0;
2039 } else {
2040 tb->size = ctx.pc - pc_start;
2041 tb->icount = num_insns;
2044 #ifdef DEBUG_DISAS
2045 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2046 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2047 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
2048 qemu_log("\n");
2050 #endif
2053 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2055 gen_intermediate_code_internal(env, tb, 0);
2058 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
2060 gen_intermediate_code_internal(env, tb, 1);
2063 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
2065 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
2066 env->flags = gen_opc_hflags[pc_pos];