tcg/arm: Use tcg_out_mov_reg rather than inline equivalent code
[qemu/pbrook.git] / target-sh4 / translate.c
blob9d955eb204dd19fb9e680680296efa1ee6883b64
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
21 //#define SH4_SINGLE_STEP
23 #include "cpu.h"
24 #include "disas.h"
25 #include "tcg-op.h"
27 #include "helper.h"
28 #define GEN_HELPER 1
29 #include "helper.h"
31 typedef struct DisasContext {
32 struct TranslationBlock *tb;
33 target_ulong pc;
34 uint16_t opcode;
35 uint32_t flags;
36 int bstate;
37 int memidx;
38 uint32_t delayed_pc;
39 int singlestep_enabled;
40 uint32_t features;
41 int has_movcal;
42 } DisasContext;
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
46 #else
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
48 #endif
50 enum {
51 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
52 * exception condition
54 BS_STOP = 1, /* We want to stop translation for any reason */
55 BS_BRANCH = 2, /* We reached a branch condition */
56 BS_EXCP = 3, /* We reached an exception condition */
59 /* global register indexes */
60 static TCGv_ptr cpu_env;
61 static TCGv cpu_gregs[24];
62 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
63 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
64 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
65 static TCGv cpu_fregs[32];
67 /* internal register indexes */
68 static TCGv cpu_flags, cpu_delayed_pc;
70 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
72 #include "gen-icount.h"
74 static void sh4_translate_init(void)
76 int i;
77 static int done_init = 0;
78 static const char * const gregnames[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 static const char * const fregnames[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96 if (done_init)
97 return;
99 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
101 for (i = 0; i < 24; i++)
102 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
103 offsetof(CPUSH4State, gregs[i]),
104 gregnames[i]);
106 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUSH4State, pc), "PC");
108 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUSH4State, sr), "SR");
110 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUSH4State, ssr), "SSR");
112 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUSH4State, spc), "SPC");
114 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUSH4State, gbr), "GBR");
116 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUSH4State, vbr), "VBR");
118 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUSH4State, sgr), "SGR");
120 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUSH4State, dbr), "DBR");
122 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUSH4State, mach), "MACH");
124 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUSH4State, macl), "MACL");
126 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUSH4State, pr), "PR");
128 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUSH4State, fpscr), "FPSCR");
130 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUSH4State, fpul), "FPUL");
133 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, flags), "_flags_");
135 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
136 offsetof(CPUSH4State, delayed_pc),
137 "_delayed_pc_");
138 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, ldst), "_ldst_");
141 for (i = 0; i < 32; i++)
142 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
143 offsetof(CPUSH4State, fregs[i]),
144 fregnames[i]);
146 /* register helpers */
147 #define GEN_HELPER 2
148 #include "helper.h"
150 done_init = 1;
153 void cpu_dump_state(CPUSH4State * env, FILE * f,
154 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
155 int flags)
157 int i;
158 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
159 env->pc, env->sr, env->pr, env->fpscr);
160 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
161 env->spc, env->ssr, env->gbr, env->vbr);
162 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
163 env->sgr, env->dbr, env->delayed_pc, env->fpul);
164 for (i = 0; i < 24; i += 4) {
165 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
166 i, env->gregs[i], i + 1, env->gregs[i + 1],
167 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
169 if (env->flags & DELAY_SLOT) {
170 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
171 env->delayed_pc);
172 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
173 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
174 env->delayed_pc);
178 typedef struct {
179 const char *name;
180 int id;
181 uint32_t pvr;
182 uint32_t prr;
183 uint32_t cvr;
184 uint32_t features;
185 } sh4_def_t;
187 static sh4_def_t sh4_defs[] = {
189 .name = "SH7750R",
190 .id = SH_CPU_SH7750R,
191 .pvr = 0x00050000,
192 .prr = 0x00000100,
193 .cvr = 0x00110000,
194 .features = SH_FEATURE_BCR3_AND_BCR4,
195 }, {
196 .name = "SH7751R",
197 .id = SH_CPU_SH7751R,
198 .pvr = 0x04050005,
199 .prr = 0x00000113,
200 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
201 .features = SH_FEATURE_BCR3_AND_BCR4,
202 }, {
203 .name = "SH7785",
204 .id = SH_CPU_SH7785,
205 .pvr = 0x10300700,
206 .prr = 0x00000200,
207 .cvr = 0x71440211,
208 .features = SH_FEATURE_SH4A,
212 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
214 int i;
216 if (strcasecmp(name, "any") == 0)
217 return &sh4_defs[0];
219 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
220 if (strcasecmp(name, sh4_defs[i].name) == 0)
221 return &sh4_defs[i];
223 return NULL;
226 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
228 int i;
230 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
231 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
234 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
236 env->pvr = def->pvr;
237 env->prr = def->prr;
238 env->cvr = def->cvr;
239 env->id = def->id;
242 SuperHCPU *cpu_sh4_init(const char *cpu_model)
244 SuperHCPU *cpu;
245 CPUSH4State *env;
246 const sh4_def_t *def;
248 def = cpu_sh4_find_by_name(cpu_model);
249 if (!def)
250 return NULL;
251 cpu = SUPERH_CPU(object_new(TYPE_SUPERH_CPU));
252 env = &cpu->env;
253 env->features = def->features;
254 sh4_translate_init();
255 env->cpu_model_str = cpu_model;
256 cpu_reset(CPU(cpu));
257 cpu_register(env, def);
258 qemu_init_vcpu(env);
259 return cpu;
262 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
264 TranslationBlock *tb;
265 tb = ctx->tb;
267 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
268 !ctx->singlestep_enabled) {
269 /* Use a direct jump if in same page and singlestep not enabled */
270 tcg_gen_goto_tb(n);
271 tcg_gen_movi_i32(cpu_pc, dest);
272 tcg_gen_exit_tb((tcg_target_long)tb + n);
273 } else {
274 tcg_gen_movi_i32(cpu_pc, dest);
275 if (ctx->singlestep_enabled)
276 gen_helper_debug(cpu_env);
277 tcg_gen_exit_tb(0);
281 static void gen_jump(DisasContext * ctx)
283 if (ctx->delayed_pc == (uint32_t) - 1) {
284 /* Target is not statically known, it comes necessarily from a
285 delayed jump as immediate jump are conditinal jumps */
286 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
287 if (ctx->singlestep_enabled)
288 gen_helper_debug(cpu_env);
289 tcg_gen_exit_tb(0);
290 } else {
291 gen_goto_tb(ctx, 0, ctx->delayed_pc);
295 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
297 TCGv sr;
298 int label = gen_new_label();
299 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
300 sr = tcg_temp_new();
301 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
302 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
303 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
304 gen_set_label(label);
307 /* Immediate conditional jump (bt or bf) */
308 static void gen_conditional_jump(DisasContext * ctx,
309 target_ulong ift, target_ulong ifnott)
311 int l1;
312 TCGv sr;
314 l1 = gen_new_label();
315 sr = tcg_temp_new();
316 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
317 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
318 gen_goto_tb(ctx, 0, ifnott);
319 gen_set_label(l1);
320 gen_goto_tb(ctx, 1, ift);
323 /* Delayed conditional jump (bt or bf) */
324 static void gen_delayed_conditional_jump(DisasContext * ctx)
326 int l1;
327 TCGv ds;
329 l1 = gen_new_label();
330 ds = tcg_temp_new();
331 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
332 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
333 gen_goto_tb(ctx, 1, ctx->pc + 2);
334 gen_set_label(l1);
335 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
336 gen_jump(ctx);
339 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
341 TCGv t;
343 t = tcg_temp_new();
344 tcg_gen_setcond_i32(cond, t, t1, t0);
345 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
346 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
348 tcg_temp_free(t);
351 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
353 TCGv t;
355 t = tcg_temp_new();
356 tcg_gen_setcondi_i32(cond, t, t0, imm);
357 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
358 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
360 tcg_temp_free(t);
363 static inline void gen_store_flags(uint32_t flags)
365 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
366 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
369 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
371 TCGv tmp = tcg_temp_new();
373 p0 &= 0x1f;
374 p1 &= 0x1f;
376 tcg_gen_andi_i32(tmp, t1, (1 << p1));
377 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
378 if (p0 < p1)
379 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
380 else if (p0 > p1)
381 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
382 tcg_gen_or_i32(t0, t0, tmp);
384 tcg_temp_free(tmp);
387 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
389 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
392 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
394 TCGv_i32 tmp = tcg_temp_new_i32();
395 tcg_gen_trunc_i64_i32(tmp, t);
396 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
397 tcg_gen_shri_i64(t, t, 32);
398 tcg_gen_trunc_i64_i32(tmp, t);
399 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
400 tcg_temp_free_i32(tmp);
403 #define B3_0 (ctx->opcode & 0xf)
404 #define B6_4 ((ctx->opcode >> 4) & 0x7)
405 #define B7_4 ((ctx->opcode >> 4) & 0xf)
406 #define B7_0 (ctx->opcode & 0xff)
407 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
408 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
409 (ctx->opcode & 0xfff))
410 #define B11_8 ((ctx->opcode >> 8) & 0xf)
411 #define B15_12 ((ctx->opcode >> 12) & 0xf)
413 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
414 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
416 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
417 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
419 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
420 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
421 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
422 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
424 #define CHECK_NOT_DELAY_SLOT \
425 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
427 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
428 gen_helper_raise_slot_illegal_instruction(cpu_env); \
429 ctx->bstate = BS_BRANCH; \
430 return; \
433 #define CHECK_PRIVILEGED \
434 if (IS_USER(ctx)) { \
435 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
436 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
437 gen_helper_raise_slot_illegal_instruction(cpu_env); \
438 } else { \
439 gen_helper_raise_illegal_instruction(cpu_env); \
441 ctx->bstate = BS_BRANCH; \
442 return; \
445 #define CHECK_FPU_ENABLED \
446 if (ctx->flags & SR_FD) { \
447 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
448 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
449 gen_helper_raise_slot_fpu_disable(cpu_env); \
450 } else { \
451 gen_helper_raise_fpu_disable(cpu_env); \
453 ctx->bstate = BS_BRANCH; \
454 return; \
457 static void _decode_opc(DisasContext * ctx)
459 /* This code tries to make movcal emulation sufficiently
460 accurate for Linux purposes. This instruction writes
461 memory, and prior to that, always allocates a cache line.
462 It is used in two contexts:
463 - in memcpy, where data is copied in blocks, the first write
464 of to a block uses movca.l for performance.
465 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
466 to flush the cache. Here, the data written by movcal.l is never
467 written to memory, and the data written is just bogus.
469 To simulate this, we simulate movcal.l, we store the value to memory,
470 but we also remember the previous content. If we see ocbi, we check
471 if movcal.l for that address was done previously. If so, the write should
472 not have hit the memory, so we restore the previous content.
473 When we see an instruction that is neither movca.l
474 nor ocbi, the previous content is discarded.
476 To optimize, we only try to flush stores when we're at the start of
477 TB, or if we already saw movca.l in this TB and did not flush stores
478 yet. */
479 if (ctx->has_movcal)
481 int opcode = ctx->opcode & 0xf0ff;
482 if (opcode != 0x0093 /* ocbi */
483 && opcode != 0x00c3 /* movca.l */)
485 gen_helper_discard_movcal_backup(cpu_env);
486 ctx->has_movcal = 0;
490 #if 0
491 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
492 #endif
494 switch (ctx->opcode) {
495 case 0x0019: /* div0u */
496 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
497 return;
498 case 0x000b: /* rts */
499 CHECK_NOT_DELAY_SLOT
500 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
501 ctx->flags |= DELAY_SLOT;
502 ctx->delayed_pc = (uint32_t) - 1;
503 return;
504 case 0x0028: /* clrmac */
505 tcg_gen_movi_i32(cpu_mach, 0);
506 tcg_gen_movi_i32(cpu_macl, 0);
507 return;
508 case 0x0048: /* clrs */
509 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
510 return;
511 case 0x0008: /* clrt */
512 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
513 return;
514 case 0x0038: /* ldtlb */
515 CHECK_PRIVILEGED
516 gen_helper_ldtlb(cpu_env);
517 return;
518 case 0x002b: /* rte */
519 CHECK_PRIVILEGED
520 CHECK_NOT_DELAY_SLOT
521 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
522 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
523 ctx->flags |= DELAY_SLOT;
524 ctx->delayed_pc = (uint32_t) - 1;
525 return;
526 case 0x0058: /* sets */
527 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
528 return;
529 case 0x0018: /* sett */
530 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
531 return;
532 case 0xfbfd: /* frchg */
533 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
534 ctx->bstate = BS_STOP;
535 return;
536 case 0xf3fd: /* fschg */
537 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
538 ctx->bstate = BS_STOP;
539 return;
540 case 0x0009: /* nop */
541 return;
542 case 0x001b: /* sleep */
543 CHECK_PRIVILEGED
544 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
545 gen_helper_sleep(cpu_env);
546 return;
549 switch (ctx->opcode & 0xf000) {
550 case 0x1000: /* mov.l Rm,@(disp,Rn) */
552 TCGv addr = tcg_temp_new();
553 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
554 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
555 tcg_temp_free(addr);
557 return;
558 case 0x5000: /* mov.l @(disp,Rm),Rn */
560 TCGv addr = tcg_temp_new();
561 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
562 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
563 tcg_temp_free(addr);
565 return;
566 case 0xe000: /* mov #imm,Rn */
567 tcg_gen_movi_i32(REG(B11_8), B7_0s);
568 return;
569 case 0x9000: /* mov.w @(disp,PC),Rn */
571 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
572 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
573 tcg_temp_free(addr);
575 return;
576 case 0xd000: /* mov.l @(disp,PC),Rn */
578 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
579 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
580 tcg_temp_free(addr);
582 return;
583 case 0x7000: /* add #imm,Rn */
584 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
585 return;
586 case 0xa000: /* bra disp */
587 CHECK_NOT_DELAY_SLOT
588 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
589 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
590 ctx->flags |= DELAY_SLOT;
591 return;
592 case 0xb000: /* bsr disp */
593 CHECK_NOT_DELAY_SLOT
594 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
595 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
596 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
597 ctx->flags |= DELAY_SLOT;
598 return;
601 switch (ctx->opcode & 0xf00f) {
602 case 0x6003: /* mov Rm,Rn */
603 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
604 return;
605 case 0x2000: /* mov.b Rm,@Rn */
606 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
607 return;
608 case 0x2001: /* mov.w Rm,@Rn */
609 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
610 return;
611 case 0x2002: /* mov.l Rm,@Rn */
612 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
613 return;
614 case 0x6000: /* mov.b @Rm,Rn */
615 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
616 return;
617 case 0x6001: /* mov.w @Rm,Rn */
618 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
619 return;
620 case 0x6002: /* mov.l @Rm,Rn */
621 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
622 return;
623 case 0x2004: /* mov.b Rm,@-Rn */
625 TCGv addr = tcg_temp_new();
626 tcg_gen_subi_i32(addr, REG(B11_8), 1);
627 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
628 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
629 tcg_temp_free(addr);
631 return;
632 case 0x2005: /* mov.w Rm,@-Rn */
634 TCGv addr = tcg_temp_new();
635 tcg_gen_subi_i32(addr, REG(B11_8), 2);
636 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
637 tcg_gen_mov_i32(REG(B11_8), addr);
638 tcg_temp_free(addr);
640 return;
641 case 0x2006: /* mov.l Rm,@-Rn */
643 TCGv addr = tcg_temp_new();
644 tcg_gen_subi_i32(addr, REG(B11_8), 4);
645 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
646 tcg_gen_mov_i32(REG(B11_8), addr);
648 return;
649 case 0x6004: /* mov.b @Rm+,Rn */
650 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
651 if ( B11_8 != B7_4 )
652 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
653 return;
654 case 0x6005: /* mov.w @Rm+,Rn */
655 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
656 if ( B11_8 != B7_4 )
657 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
658 return;
659 case 0x6006: /* mov.l @Rm+,Rn */
660 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
661 if ( B11_8 != B7_4 )
662 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
663 return;
664 case 0x0004: /* mov.b Rm,@(R0,Rn) */
666 TCGv addr = tcg_temp_new();
667 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
668 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
669 tcg_temp_free(addr);
671 return;
672 case 0x0005: /* mov.w Rm,@(R0,Rn) */
674 TCGv addr = tcg_temp_new();
675 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
676 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
677 tcg_temp_free(addr);
679 return;
680 case 0x0006: /* mov.l Rm,@(R0,Rn) */
682 TCGv addr = tcg_temp_new();
683 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
684 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
685 tcg_temp_free(addr);
687 return;
688 case 0x000c: /* mov.b @(R0,Rm),Rn */
690 TCGv addr = tcg_temp_new();
691 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
692 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
693 tcg_temp_free(addr);
695 return;
696 case 0x000d: /* mov.w @(R0,Rm),Rn */
698 TCGv addr = tcg_temp_new();
699 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
700 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
701 tcg_temp_free(addr);
703 return;
704 case 0x000e: /* mov.l @(R0,Rm),Rn */
706 TCGv addr = tcg_temp_new();
707 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
708 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
709 tcg_temp_free(addr);
711 return;
712 case 0x6008: /* swap.b Rm,Rn */
714 TCGv high, low;
715 high = tcg_temp_new();
716 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
717 low = tcg_temp_new();
718 tcg_gen_ext16u_i32(low, REG(B7_4));
719 tcg_gen_bswap16_i32(low, low);
720 tcg_gen_or_i32(REG(B11_8), high, low);
721 tcg_temp_free(low);
722 tcg_temp_free(high);
724 return;
725 case 0x6009: /* swap.w Rm,Rn */
726 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
727 return;
728 case 0x200d: /* xtrct Rm,Rn */
730 TCGv high, low;
731 high = tcg_temp_new();
732 tcg_gen_shli_i32(high, REG(B7_4), 16);
733 low = tcg_temp_new();
734 tcg_gen_shri_i32(low, REG(B11_8), 16);
735 tcg_gen_or_i32(REG(B11_8), high, low);
736 tcg_temp_free(low);
737 tcg_temp_free(high);
739 return;
740 case 0x300c: /* add Rm,Rn */
741 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
742 return;
743 case 0x300e: /* addc Rm,Rn */
745 TCGv t0, t1, t2;
746 t0 = tcg_temp_new();
747 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
748 t1 = tcg_temp_new();
749 tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
750 tcg_gen_add_i32(t0, t0, t1);
751 t2 = tcg_temp_new();
752 tcg_gen_setcond_i32(TCG_COND_GTU, t2, REG(B11_8), t1);
753 tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
754 tcg_gen_or_i32(t1, t1, t2);
755 tcg_temp_free(t2);
756 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
757 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
758 tcg_temp_free(t1);
759 tcg_gen_mov_i32(REG(B11_8), t0);
760 tcg_temp_free(t0);
762 return;
763 case 0x300f: /* addv Rm,Rn */
765 TCGv t0, t1, t2;
766 t0 = tcg_temp_new();
767 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
768 t1 = tcg_temp_new();
769 tcg_gen_xor_i32(t1, t0, REG(B11_8));
770 t2 = tcg_temp_new();
771 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
772 tcg_gen_andc_i32(t1, t1, t2);
773 tcg_temp_free(t2);
774 tcg_gen_shri_i32(t1, t1, 31);
775 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
776 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
777 tcg_temp_free(t1);
778 tcg_gen_mov_i32(REG(B7_4), t0);
779 tcg_temp_free(t0);
781 return;
782 case 0x2009: /* and Rm,Rn */
783 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
784 return;
785 case 0x3000: /* cmp/eq Rm,Rn */
786 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
787 return;
788 case 0x3003: /* cmp/ge Rm,Rn */
789 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
790 return;
791 case 0x3007: /* cmp/gt Rm,Rn */
792 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
793 return;
794 case 0x3006: /* cmp/hi Rm,Rn */
795 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
796 return;
797 case 0x3002: /* cmp/hs Rm,Rn */
798 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
799 return;
800 case 0x200c: /* cmp/str Rm,Rn */
802 TCGv cmp1 = tcg_temp_new();
803 TCGv cmp2 = tcg_temp_new();
804 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
805 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
806 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
807 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
808 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
809 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
810 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
811 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
812 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
813 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
814 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
815 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
816 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
817 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
818 tcg_temp_free(cmp2);
819 tcg_temp_free(cmp1);
821 return;
822 case 0x2007: /* div0s Rm,Rn */
824 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
825 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
826 TCGv val = tcg_temp_new();
827 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
828 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
829 tcg_temp_free(val);
831 return;
832 case 0x3004: /* div1 Rm,Rn */
833 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
834 return;
835 case 0x300d: /* dmuls.l Rm,Rn */
837 TCGv_i64 tmp1 = tcg_temp_new_i64();
838 TCGv_i64 tmp2 = tcg_temp_new_i64();
840 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
841 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
842 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
843 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
844 tcg_gen_shri_i64(tmp1, tmp1, 32);
845 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
847 tcg_temp_free_i64(tmp2);
848 tcg_temp_free_i64(tmp1);
850 return;
851 case 0x3005: /* dmulu.l Rm,Rn */
853 TCGv_i64 tmp1 = tcg_temp_new_i64();
854 TCGv_i64 tmp2 = tcg_temp_new_i64();
856 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
857 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
858 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
859 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
860 tcg_gen_shri_i64(tmp1, tmp1, 32);
861 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
863 tcg_temp_free_i64(tmp2);
864 tcg_temp_free_i64(tmp1);
866 return;
867 case 0x600e: /* exts.b Rm,Rn */
868 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
869 return;
870 case 0x600f: /* exts.w Rm,Rn */
871 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
872 return;
873 case 0x600c: /* extu.b Rm,Rn */
874 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
875 return;
876 case 0x600d: /* extu.w Rm,Rn */
877 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
878 return;
879 case 0x000f: /* mac.l @Rm+,@Rn+ */
881 TCGv arg0, arg1;
882 arg0 = tcg_temp_new();
883 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
884 arg1 = tcg_temp_new();
885 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
886 gen_helper_macl(cpu_env, arg0, arg1);
887 tcg_temp_free(arg1);
888 tcg_temp_free(arg0);
889 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
890 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
892 return;
893 case 0x400f: /* mac.w @Rm+,@Rn+ */
895 TCGv arg0, arg1;
896 arg0 = tcg_temp_new();
897 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
898 arg1 = tcg_temp_new();
899 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
900 gen_helper_macw(cpu_env, arg0, arg1);
901 tcg_temp_free(arg1);
902 tcg_temp_free(arg0);
903 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
904 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
906 return;
907 case 0x0007: /* mul.l Rm,Rn */
908 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
909 return;
910 case 0x200f: /* muls.w Rm,Rn */
912 TCGv arg0, arg1;
913 arg0 = tcg_temp_new();
914 tcg_gen_ext16s_i32(arg0, REG(B7_4));
915 arg1 = tcg_temp_new();
916 tcg_gen_ext16s_i32(arg1, REG(B11_8));
917 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
918 tcg_temp_free(arg1);
919 tcg_temp_free(arg0);
921 return;
922 case 0x200e: /* mulu.w Rm,Rn */
924 TCGv arg0, arg1;
925 arg0 = tcg_temp_new();
926 tcg_gen_ext16u_i32(arg0, REG(B7_4));
927 arg1 = tcg_temp_new();
928 tcg_gen_ext16u_i32(arg1, REG(B11_8));
929 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
930 tcg_temp_free(arg1);
931 tcg_temp_free(arg0);
933 return;
934 case 0x600b: /* neg Rm,Rn */
935 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
936 return;
937 case 0x600a: /* negc Rm,Rn */
939 TCGv t0, t1;
940 t0 = tcg_temp_new();
941 tcg_gen_neg_i32(t0, REG(B7_4));
942 t1 = tcg_temp_new();
943 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
944 tcg_gen_sub_i32(REG(B11_8), t0, t1);
945 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
946 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
947 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
948 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
949 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
950 tcg_temp_free(t0);
951 tcg_temp_free(t1);
953 return;
954 case 0x6007: /* not Rm,Rn */
955 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
956 return;
957 case 0x200b: /* or Rm,Rn */
958 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
959 return;
960 case 0x400c: /* shad Rm,Rn */
962 int label1 = gen_new_label();
963 int label2 = gen_new_label();
964 int label3 = gen_new_label();
965 int label4 = gen_new_label();
966 TCGv shift;
967 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
968 /* Rm positive, shift to the left */
969 shift = tcg_temp_new();
970 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
971 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
972 tcg_temp_free(shift);
973 tcg_gen_br(label4);
974 /* Rm negative, shift to the right */
975 gen_set_label(label1);
976 shift = tcg_temp_new();
977 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
978 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
979 tcg_gen_not_i32(shift, REG(B7_4));
980 tcg_gen_andi_i32(shift, shift, 0x1f);
981 tcg_gen_addi_i32(shift, shift, 1);
982 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
983 tcg_temp_free(shift);
984 tcg_gen_br(label4);
985 /* Rm = -32 */
986 gen_set_label(label2);
987 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
988 tcg_gen_movi_i32(REG(B11_8), 0);
989 tcg_gen_br(label4);
990 gen_set_label(label3);
991 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
992 gen_set_label(label4);
994 return;
995 case 0x400d: /* shld Rm,Rn */
997 int label1 = gen_new_label();
998 int label2 = gen_new_label();
999 int label3 = gen_new_label();
1000 TCGv shift;
1001 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1002 /* Rm positive, shift to the left */
1003 shift = tcg_temp_new();
1004 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1005 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1006 tcg_temp_free(shift);
1007 tcg_gen_br(label3);
1008 /* Rm negative, shift to the right */
1009 gen_set_label(label1);
1010 shift = tcg_temp_new();
1011 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1012 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1013 tcg_gen_not_i32(shift, REG(B7_4));
1014 tcg_gen_andi_i32(shift, shift, 0x1f);
1015 tcg_gen_addi_i32(shift, shift, 1);
1016 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1017 tcg_temp_free(shift);
1018 tcg_gen_br(label3);
1019 /* Rm = -32 */
1020 gen_set_label(label2);
1021 tcg_gen_movi_i32(REG(B11_8), 0);
1022 gen_set_label(label3);
1024 return;
1025 case 0x3008: /* sub Rm,Rn */
1026 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1027 return;
1028 case 0x300a: /* subc Rm,Rn */
1030 TCGv t0, t1, t2;
1031 t0 = tcg_temp_new();
1032 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
1033 t1 = tcg_temp_new();
1034 tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
1035 tcg_gen_sub_i32(t0, t1, t0);
1036 t2 = tcg_temp_new();
1037 tcg_gen_setcond_i32(TCG_COND_LTU, t2, REG(B11_8), t1);
1038 tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
1039 tcg_gen_or_i32(t1, t1, t2);
1040 tcg_temp_free(t2);
1041 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1042 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
1043 tcg_temp_free(t1);
1044 tcg_gen_mov_i32(REG(B11_8), t0);
1045 tcg_temp_free(t0);
1047 return;
1048 case 0x300b: /* subv Rm,Rn */
1050 TCGv t0, t1, t2;
1051 t0 = tcg_temp_new();
1052 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
1053 t1 = tcg_temp_new();
1054 tcg_gen_xor_i32(t1, t0, REG(B7_4));
1055 t2 = tcg_temp_new();
1056 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
1057 tcg_gen_and_i32(t1, t1, t2);
1058 tcg_temp_free(t2);
1059 tcg_gen_shri_i32(t1, t1, 31);
1060 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1061 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
1062 tcg_temp_free(t1);
1063 tcg_gen_mov_i32(REG(B11_8), t0);
1064 tcg_temp_free(t0);
1066 return;
1067 case 0x2008: /* tst Rm,Rn */
1069 TCGv val = tcg_temp_new();
1070 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1071 gen_cmp_imm(TCG_COND_EQ, val, 0);
1072 tcg_temp_free(val);
1074 return;
1075 case 0x200a: /* xor Rm,Rn */
1076 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1077 return;
1078 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1079 CHECK_FPU_ENABLED
1080 if (ctx->flags & FPSCR_SZ) {
1081 TCGv_i64 fp = tcg_temp_new_i64();
1082 gen_load_fpr64(fp, XREG(B7_4));
1083 gen_store_fpr64(fp, XREG(B11_8));
1084 tcg_temp_free_i64(fp);
1085 } else {
1086 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1088 return;
1089 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1090 CHECK_FPU_ENABLED
1091 if (ctx->flags & FPSCR_SZ) {
1092 TCGv addr_hi = tcg_temp_new();
1093 int fr = XREG(B7_4);
1094 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1095 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1096 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1097 tcg_temp_free(addr_hi);
1098 } else {
1099 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1101 return;
1102 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1103 CHECK_FPU_ENABLED
1104 if (ctx->flags & FPSCR_SZ) {
1105 TCGv addr_hi = tcg_temp_new();
1106 int fr = XREG(B11_8);
1107 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1108 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1109 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1110 tcg_temp_free(addr_hi);
1111 } else {
1112 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1114 return;
1115 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1116 CHECK_FPU_ENABLED
1117 if (ctx->flags & FPSCR_SZ) {
1118 TCGv addr_hi = tcg_temp_new();
1119 int fr = XREG(B11_8);
1120 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1121 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1122 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1123 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1124 tcg_temp_free(addr_hi);
1125 } else {
1126 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1127 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1129 return;
1130 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1131 CHECK_FPU_ENABLED
1132 if (ctx->flags & FPSCR_SZ) {
1133 TCGv addr = tcg_temp_new_i32();
1134 int fr = XREG(B7_4);
1135 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1136 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1137 tcg_gen_subi_i32(addr, addr, 4);
1138 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1139 tcg_gen_mov_i32(REG(B11_8), addr);
1140 tcg_temp_free(addr);
1141 } else {
1142 TCGv addr;
1143 addr = tcg_temp_new_i32();
1144 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1145 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1146 tcg_gen_mov_i32(REG(B11_8), addr);
1147 tcg_temp_free(addr);
1149 return;
1150 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1151 CHECK_FPU_ENABLED
1153 TCGv addr = tcg_temp_new_i32();
1154 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1155 if (ctx->flags & FPSCR_SZ) {
1156 int fr = XREG(B11_8);
1157 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1158 tcg_gen_addi_i32(addr, addr, 4);
1159 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1160 } else {
1161 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1163 tcg_temp_free(addr);
1165 return;
1166 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1167 CHECK_FPU_ENABLED
1169 TCGv addr = tcg_temp_new();
1170 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1171 if (ctx->flags & FPSCR_SZ) {
1172 int fr = XREG(B7_4);
1173 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1174 tcg_gen_addi_i32(addr, addr, 4);
1175 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1176 } else {
1177 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1179 tcg_temp_free(addr);
1181 return;
1182 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1183 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1184 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1185 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1186 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1187 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1189 CHECK_FPU_ENABLED
1190 if (ctx->flags & FPSCR_PR) {
1191 TCGv_i64 fp0, fp1;
1193 if (ctx->opcode & 0x0110)
1194 break; /* illegal instruction */
1195 fp0 = tcg_temp_new_i64();
1196 fp1 = tcg_temp_new_i64();
1197 gen_load_fpr64(fp0, DREG(B11_8));
1198 gen_load_fpr64(fp1, DREG(B7_4));
1199 switch (ctx->opcode & 0xf00f) {
1200 case 0xf000: /* fadd Rm,Rn */
1201 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1202 break;
1203 case 0xf001: /* fsub Rm,Rn */
1204 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1205 break;
1206 case 0xf002: /* fmul Rm,Rn */
1207 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1208 break;
1209 case 0xf003: /* fdiv Rm,Rn */
1210 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1211 break;
1212 case 0xf004: /* fcmp/eq Rm,Rn */
1213 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1214 return;
1215 case 0xf005: /* fcmp/gt Rm,Rn */
1216 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1217 return;
1219 gen_store_fpr64(fp0, DREG(B11_8));
1220 tcg_temp_free_i64(fp0);
1221 tcg_temp_free_i64(fp1);
1222 } else {
1223 switch (ctx->opcode & 0xf00f) {
1224 case 0xf000: /* fadd Rm,Rn */
1225 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1226 cpu_fregs[FREG(B11_8)],
1227 cpu_fregs[FREG(B7_4)]);
1228 break;
1229 case 0xf001: /* fsub Rm,Rn */
1230 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1231 cpu_fregs[FREG(B11_8)],
1232 cpu_fregs[FREG(B7_4)]);
1233 break;
1234 case 0xf002: /* fmul Rm,Rn */
1235 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1236 cpu_fregs[FREG(B11_8)],
1237 cpu_fregs[FREG(B7_4)]);
1238 break;
1239 case 0xf003: /* fdiv Rm,Rn */
1240 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1241 cpu_fregs[FREG(B11_8)],
1242 cpu_fregs[FREG(B7_4)]);
1243 break;
1244 case 0xf004: /* fcmp/eq Rm,Rn */
1245 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1246 cpu_fregs[FREG(B7_4)]);
1247 return;
1248 case 0xf005: /* fcmp/gt Rm,Rn */
1249 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1250 cpu_fregs[FREG(B7_4)]);
1251 return;
1255 return;
1256 case 0xf00e: /* fmac FR0,RM,Rn */
1258 CHECK_FPU_ENABLED
1259 if (ctx->flags & FPSCR_PR) {
1260 break; /* illegal instruction */
1261 } else {
1262 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1263 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1264 cpu_fregs[FREG(B11_8)]);
1265 return;
1270 switch (ctx->opcode & 0xff00) {
1271 case 0xc900: /* and #imm,R0 */
1272 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1273 return;
1274 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1276 TCGv addr, val;
1277 addr = tcg_temp_new();
1278 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1279 val = tcg_temp_new();
1280 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1281 tcg_gen_andi_i32(val, val, B7_0);
1282 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1283 tcg_temp_free(val);
1284 tcg_temp_free(addr);
1286 return;
1287 case 0x8b00: /* bf label */
1288 CHECK_NOT_DELAY_SLOT
1289 gen_conditional_jump(ctx, ctx->pc + 2,
1290 ctx->pc + 4 + B7_0s * 2);
1291 ctx->bstate = BS_BRANCH;
1292 return;
1293 case 0x8f00: /* bf/s label */
1294 CHECK_NOT_DELAY_SLOT
1295 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1296 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1297 return;
1298 case 0x8900: /* bt label */
1299 CHECK_NOT_DELAY_SLOT
1300 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1301 ctx->pc + 2);
1302 ctx->bstate = BS_BRANCH;
1303 return;
1304 case 0x8d00: /* bt/s label */
1305 CHECK_NOT_DELAY_SLOT
1306 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1307 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1308 return;
1309 case 0x8800: /* cmp/eq #imm,R0 */
1310 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1311 return;
1312 case 0xc400: /* mov.b @(disp,GBR),R0 */
1314 TCGv addr = tcg_temp_new();
1315 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1316 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1317 tcg_temp_free(addr);
1319 return;
1320 case 0xc500: /* mov.w @(disp,GBR),R0 */
1322 TCGv addr = tcg_temp_new();
1323 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1324 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1325 tcg_temp_free(addr);
1327 return;
1328 case 0xc600: /* mov.l @(disp,GBR),R0 */
1330 TCGv addr = tcg_temp_new();
1331 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1332 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1333 tcg_temp_free(addr);
1335 return;
1336 case 0xc000: /* mov.b R0,@(disp,GBR) */
1338 TCGv addr = tcg_temp_new();
1339 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1340 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1341 tcg_temp_free(addr);
1343 return;
1344 case 0xc100: /* mov.w R0,@(disp,GBR) */
1346 TCGv addr = tcg_temp_new();
1347 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1348 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1349 tcg_temp_free(addr);
1351 return;
1352 case 0xc200: /* mov.l R0,@(disp,GBR) */
1354 TCGv addr = tcg_temp_new();
1355 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1356 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1357 tcg_temp_free(addr);
1359 return;
1360 case 0x8000: /* mov.b R0,@(disp,Rn) */
1362 TCGv addr = tcg_temp_new();
1363 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1364 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1365 tcg_temp_free(addr);
1367 return;
1368 case 0x8100: /* mov.w R0,@(disp,Rn) */
1370 TCGv addr = tcg_temp_new();
1371 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1372 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1373 tcg_temp_free(addr);
1375 return;
1376 case 0x8400: /* mov.b @(disp,Rn),R0 */
1378 TCGv addr = tcg_temp_new();
1379 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1380 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1381 tcg_temp_free(addr);
1383 return;
1384 case 0x8500: /* mov.w @(disp,Rn),R0 */
1386 TCGv addr = tcg_temp_new();
1387 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1388 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1389 tcg_temp_free(addr);
1391 return;
1392 case 0xc700: /* mova @(disp,PC),R0 */
1393 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1394 return;
1395 case 0xcb00: /* or #imm,R0 */
1396 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1397 return;
1398 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1400 TCGv addr, val;
1401 addr = tcg_temp_new();
1402 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1403 val = tcg_temp_new();
1404 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1405 tcg_gen_ori_i32(val, val, B7_0);
1406 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1407 tcg_temp_free(val);
1408 tcg_temp_free(addr);
1410 return;
1411 case 0xc300: /* trapa #imm */
1413 TCGv imm;
1414 CHECK_NOT_DELAY_SLOT
1415 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1416 imm = tcg_const_i32(B7_0);
1417 gen_helper_trapa(cpu_env, imm);
1418 tcg_temp_free(imm);
1419 ctx->bstate = BS_BRANCH;
1421 return;
1422 case 0xc800: /* tst #imm,R0 */
1424 TCGv val = tcg_temp_new();
1425 tcg_gen_andi_i32(val, REG(0), B7_0);
1426 gen_cmp_imm(TCG_COND_EQ, val, 0);
1427 tcg_temp_free(val);
1429 return;
1430 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1432 TCGv val = tcg_temp_new();
1433 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1434 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1435 tcg_gen_andi_i32(val, val, B7_0);
1436 gen_cmp_imm(TCG_COND_EQ, val, 0);
1437 tcg_temp_free(val);
1439 return;
1440 case 0xca00: /* xor #imm,R0 */
1441 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1442 return;
1443 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1445 TCGv addr, val;
1446 addr = tcg_temp_new();
1447 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1448 val = tcg_temp_new();
1449 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1450 tcg_gen_xori_i32(val, val, B7_0);
1451 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1452 tcg_temp_free(val);
1453 tcg_temp_free(addr);
1455 return;
1458 switch (ctx->opcode & 0xf08f) {
1459 case 0x408e: /* ldc Rm,Rn_BANK */
1460 CHECK_PRIVILEGED
1461 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1462 return;
1463 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1464 CHECK_PRIVILEGED
1465 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1466 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1467 return;
1468 case 0x0082: /* stc Rm_BANK,Rn */
1469 CHECK_PRIVILEGED
1470 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1471 return;
1472 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1473 CHECK_PRIVILEGED
1475 TCGv addr = tcg_temp_new();
1476 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1477 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1478 tcg_gen_mov_i32(REG(B11_8), addr);
1479 tcg_temp_free(addr);
1481 return;
1484 switch (ctx->opcode & 0xf0ff) {
1485 case 0x0023: /* braf Rn */
1486 CHECK_NOT_DELAY_SLOT
1487 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1488 ctx->flags |= DELAY_SLOT;
1489 ctx->delayed_pc = (uint32_t) - 1;
1490 return;
1491 case 0x0003: /* bsrf Rn */
1492 CHECK_NOT_DELAY_SLOT
1493 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1494 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1495 ctx->flags |= DELAY_SLOT;
1496 ctx->delayed_pc = (uint32_t) - 1;
1497 return;
1498 case 0x4015: /* cmp/pl Rn */
1499 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1500 return;
1501 case 0x4011: /* cmp/pz Rn */
1502 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1503 return;
1504 case 0x4010: /* dt Rn */
1505 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1506 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1507 return;
1508 case 0x402b: /* jmp @Rn */
1509 CHECK_NOT_DELAY_SLOT
1510 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1511 ctx->flags |= DELAY_SLOT;
1512 ctx->delayed_pc = (uint32_t) - 1;
1513 return;
1514 case 0x400b: /* jsr @Rn */
1515 CHECK_NOT_DELAY_SLOT
1516 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1517 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1518 ctx->flags |= DELAY_SLOT;
1519 ctx->delayed_pc = (uint32_t) - 1;
1520 return;
1521 case 0x400e: /* ldc Rm,SR */
1522 CHECK_PRIVILEGED
1523 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1524 ctx->bstate = BS_STOP;
1525 return;
1526 case 0x4007: /* ldc.l @Rm+,SR */
1527 CHECK_PRIVILEGED
1529 TCGv val = tcg_temp_new();
1530 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1531 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1532 tcg_temp_free(val);
1533 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1534 ctx->bstate = BS_STOP;
1536 return;
1537 case 0x0002: /* stc SR,Rn */
1538 CHECK_PRIVILEGED
1539 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1540 return;
1541 case 0x4003: /* stc SR,@-Rn */
1542 CHECK_PRIVILEGED
1544 TCGv addr = tcg_temp_new();
1545 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1546 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1547 tcg_gen_mov_i32(REG(B11_8), addr);
1548 tcg_temp_free(addr);
1550 return;
1551 #define LD(reg,ldnum,ldpnum,prechk) \
1552 case ldnum: \
1553 prechk \
1554 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1555 return; \
1556 case ldpnum: \
1557 prechk \
1558 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1559 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1560 return;
1561 #define ST(reg,stnum,stpnum,prechk) \
1562 case stnum: \
1563 prechk \
1564 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1565 return; \
1566 case stpnum: \
1567 prechk \
1569 TCGv addr = tcg_temp_new(); \
1570 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1571 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1572 tcg_gen_mov_i32(REG(B11_8), addr); \
1573 tcg_temp_free(addr); \
1575 return;
1576 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1577 LD(reg,ldnum,ldpnum,prechk) \
1578 ST(reg,stnum,stpnum,prechk)
1579 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1580 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1581 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1582 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1583 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1584 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1585 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1586 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1587 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1588 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1589 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1590 case 0x406a: /* lds Rm,FPSCR */
1591 CHECK_FPU_ENABLED
1592 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1593 ctx->bstate = BS_STOP;
1594 return;
1595 case 0x4066: /* lds.l @Rm+,FPSCR */
1596 CHECK_FPU_ENABLED
1598 TCGv addr = tcg_temp_new();
1599 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1600 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1601 gen_helper_ld_fpscr(cpu_env, addr);
1602 tcg_temp_free(addr);
1603 ctx->bstate = BS_STOP;
1605 return;
1606 case 0x006a: /* sts FPSCR,Rn */
1607 CHECK_FPU_ENABLED
1608 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1609 return;
1610 case 0x4062: /* sts FPSCR,@-Rn */
1611 CHECK_FPU_ENABLED
1613 TCGv addr, val;
1614 val = tcg_temp_new();
1615 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1616 addr = tcg_temp_new();
1617 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1618 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1619 tcg_gen_mov_i32(REG(B11_8), addr);
1620 tcg_temp_free(addr);
1621 tcg_temp_free(val);
1623 return;
1624 case 0x00c3: /* movca.l R0,@Rm */
1626 TCGv val = tcg_temp_new();
1627 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1628 gen_helper_movcal(cpu_env, REG(B11_8), val);
1629 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1631 ctx->has_movcal = 1;
1632 return;
1633 case 0x40a9:
1634 /* MOVUA.L @Rm,R0 (Rm) -> R0
1635 Load non-boundary-aligned data */
1636 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1637 return;
1638 case 0x40e9:
1639 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1640 Load non-boundary-aligned data */
1641 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1642 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1643 return;
1644 case 0x0029: /* movt Rn */
1645 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1646 return;
1647 case 0x0073:
1648 /* MOVCO.L
1649 LDST -> T
1650 If (T == 1) R0 -> (Rn)
1651 0 -> LDST
1653 if (ctx->features & SH_FEATURE_SH4A) {
1654 int label = gen_new_label();
1655 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1656 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1657 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1658 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1659 gen_set_label(label);
1660 tcg_gen_movi_i32(cpu_ldst, 0);
1661 return;
1662 } else
1663 break;
1664 case 0x0063:
1665 /* MOVLI.L @Rm,R0
1666 1 -> LDST
1667 (Rm) -> R0
1668 When interrupt/exception
1669 occurred 0 -> LDST
1671 if (ctx->features & SH_FEATURE_SH4A) {
1672 tcg_gen_movi_i32(cpu_ldst, 0);
1673 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1674 tcg_gen_movi_i32(cpu_ldst, 1);
1675 return;
1676 } else
1677 break;
1678 case 0x0093: /* ocbi @Rn */
1680 gen_helper_ocbi(cpu_env, REG(B11_8));
1682 return;
1683 case 0x00a3: /* ocbp @Rn */
1684 case 0x00b3: /* ocbwb @Rn */
1685 /* These instructions are supposed to do nothing in case of
1686 a cache miss. Given that we only partially emulate caches
1687 it is safe to simply ignore them. */
1688 return;
1689 case 0x0083: /* pref @Rn */
1690 return;
1691 case 0x00d3: /* prefi @Rn */
1692 if (ctx->features & SH_FEATURE_SH4A)
1693 return;
1694 else
1695 break;
1696 case 0x00e3: /* icbi @Rn */
1697 if (ctx->features & SH_FEATURE_SH4A)
1698 return;
1699 else
1700 break;
1701 case 0x00ab: /* synco */
1702 if (ctx->features & SH_FEATURE_SH4A)
1703 return;
1704 else
1705 break;
1706 case 0x4024: /* rotcl Rn */
1708 TCGv tmp = tcg_temp_new();
1709 tcg_gen_mov_i32(tmp, cpu_sr);
1710 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1711 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1712 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1713 tcg_temp_free(tmp);
1715 return;
1716 case 0x4025: /* rotcr Rn */
1718 TCGv tmp = tcg_temp_new();
1719 tcg_gen_mov_i32(tmp, cpu_sr);
1720 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1721 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1722 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1723 tcg_temp_free(tmp);
1725 return;
1726 case 0x4004: /* rotl Rn */
1727 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1728 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1729 return;
1730 case 0x4005: /* rotr Rn */
1731 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1732 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1733 return;
1734 case 0x4000: /* shll Rn */
1735 case 0x4020: /* shal Rn */
1736 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1737 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1738 return;
1739 case 0x4021: /* shar Rn */
1740 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1741 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1742 return;
1743 case 0x4001: /* shlr Rn */
1744 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1745 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1746 return;
1747 case 0x4008: /* shll2 Rn */
1748 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1749 return;
1750 case 0x4018: /* shll8 Rn */
1751 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1752 return;
1753 case 0x4028: /* shll16 Rn */
1754 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1755 return;
1756 case 0x4009: /* shlr2 Rn */
1757 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1758 return;
1759 case 0x4019: /* shlr8 Rn */
1760 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1761 return;
1762 case 0x4029: /* shlr16 Rn */
1763 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1764 return;
1765 case 0x401b: /* tas.b @Rn */
1767 TCGv addr, val;
1768 addr = tcg_temp_local_new();
1769 tcg_gen_mov_i32(addr, REG(B11_8));
1770 val = tcg_temp_local_new();
1771 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1772 gen_cmp_imm(TCG_COND_EQ, val, 0);
1773 tcg_gen_ori_i32(val, val, 0x80);
1774 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1775 tcg_temp_free(val);
1776 tcg_temp_free(addr);
1778 return;
1779 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1780 CHECK_FPU_ENABLED
1781 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1782 return;
1783 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1784 CHECK_FPU_ENABLED
1785 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1786 return;
1787 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1788 CHECK_FPU_ENABLED
1789 if (ctx->flags & FPSCR_PR) {
1790 TCGv_i64 fp;
1791 if (ctx->opcode & 0x0100)
1792 break; /* illegal instruction */
1793 fp = tcg_temp_new_i64();
1794 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1795 gen_store_fpr64(fp, DREG(B11_8));
1796 tcg_temp_free_i64(fp);
1798 else {
1799 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1801 return;
1802 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1803 CHECK_FPU_ENABLED
1804 if (ctx->flags & FPSCR_PR) {
1805 TCGv_i64 fp;
1806 if (ctx->opcode & 0x0100)
1807 break; /* illegal instruction */
1808 fp = tcg_temp_new_i64();
1809 gen_load_fpr64(fp, DREG(B11_8));
1810 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1811 tcg_temp_free_i64(fp);
1813 else {
1814 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1816 return;
1817 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1818 CHECK_FPU_ENABLED
1820 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1822 return;
1823 case 0xf05d: /* fabs FRn/DRn */
1824 CHECK_FPU_ENABLED
1825 if (ctx->flags & FPSCR_PR) {
1826 if (ctx->opcode & 0x0100)
1827 break; /* illegal instruction */
1828 TCGv_i64 fp = tcg_temp_new_i64();
1829 gen_load_fpr64(fp, DREG(B11_8));
1830 gen_helper_fabs_DT(fp, fp);
1831 gen_store_fpr64(fp, DREG(B11_8));
1832 tcg_temp_free_i64(fp);
1833 } else {
1834 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1836 return;
1837 case 0xf06d: /* fsqrt FRn */
1838 CHECK_FPU_ENABLED
1839 if (ctx->flags & FPSCR_PR) {
1840 if (ctx->opcode & 0x0100)
1841 break; /* illegal instruction */
1842 TCGv_i64 fp = tcg_temp_new_i64();
1843 gen_load_fpr64(fp, DREG(B11_8));
1844 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1845 gen_store_fpr64(fp, DREG(B11_8));
1846 tcg_temp_free_i64(fp);
1847 } else {
1848 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1849 cpu_fregs[FREG(B11_8)]);
1851 return;
1852 case 0xf07d: /* fsrra FRn */
1853 CHECK_FPU_ENABLED
1854 break;
1855 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1856 CHECK_FPU_ENABLED
1857 if (!(ctx->flags & FPSCR_PR)) {
1858 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1860 return;
1861 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1862 CHECK_FPU_ENABLED
1863 if (!(ctx->flags & FPSCR_PR)) {
1864 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1866 return;
1867 case 0xf0ad: /* fcnvsd FPUL,DRn */
1868 CHECK_FPU_ENABLED
1870 TCGv_i64 fp = tcg_temp_new_i64();
1871 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1872 gen_store_fpr64(fp, DREG(B11_8));
1873 tcg_temp_free_i64(fp);
1875 return;
1876 case 0xf0bd: /* fcnvds DRn,FPUL */
1877 CHECK_FPU_ENABLED
1879 TCGv_i64 fp = tcg_temp_new_i64();
1880 gen_load_fpr64(fp, DREG(B11_8));
1881 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1882 tcg_temp_free_i64(fp);
1884 return;
1885 case 0xf0ed: /* fipr FVm,FVn */
1886 CHECK_FPU_ENABLED
1887 if ((ctx->flags & FPSCR_PR) == 0) {
1888 TCGv m, n;
1889 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1890 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1891 gen_helper_fipr(cpu_env, m, n);
1892 tcg_temp_free(m);
1893 tcg_temp_free(n);
1894 return;
1896 break;
1897 case 0xf0fd: /* ftrv XMTRX,FVn */
1898 CHECK_FPU_ENABLED
1899 if ((ctx->opcode & 0x0300) == 0x0100 &&
1900 (ctx->flags & FPSCR_PR) == 0) {
1901 TCGv n;
1902 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1903 gen_helper_ftrv(cpu_env, n);
1904 tcg_temp_free(n);
1905 return;
1907 break;
1909 #if 0
1910 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1911 ctx->opcode, ctx->pc);
1912 fflush(stderr);
1913 #endif
1914 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1915 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1916 gen_helper_raise_slot_illegal_instruction(cpu_env);
1917 } else {
1918 gen_helper_raise_illegal_instruction(cpu_env);
1920 ctx->bstate = BS_BRANCH;
1923 static void decode_opc(DisasContext * ctx)
1925 uint32_t old_flags = ctx->flags;
1927 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1928 tcg_gen_debug_insn_start(ctx->pc);
1931 _decode_opc(ctx);
1933 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1934 if (ctx->flags & DELAY_SLOT_CLEARME) {
1935 gen_store_flags(0);
1936 } else {
1937 /* go out of the delay slot */
1938 uint32_t new_flags = ctx->flags;
1939 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1940 gen_store_flags(new_flags);
1942 ctx->flags = 0;
1943 ctx->bstate = BS_BRANCH;
1944 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1945 gen_delayed_conditional_jump(ctx);
1946 } else if (old_flags & DELAY_SLOT) {
1947 gen_jump(ctx);
1952 /* go into a delay slot */
1953 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1954 gen_store_flags(ctx->flags);
1957 static inline void
1958 gen_intermediate_code_internal(CPUSH4State * env, TranslationBlock * tb,
1959 int search_pc)
1961 DisasContext ctx;
1962 target_ulong pc_start;
1963 static uint16_t *gen_opc_end;
1964 CPUBreakpoint *bp;
1965 int i, ii;
1966 int num_insns;
1967 int max_insns;
1969 pc_start = tb->pc;
1970 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1971 ctx.pc = pc_start;
1972 ctx.flags = (uint32_t)tb->flags;
1973 ctx.bstate = BS_NONE;
1974 ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
1975 /* We don't know if the delayed pc came from a dynamic or static branch,
1976 so assume it is a dynamic branch. */
1977 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1978 ctx.tb = tb;
1979 ctx.singlestep_enabled = env->singlestep_enabled;
1980 ctx.features = env->features;
1981 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1983 ii = -1;
1984 num_insns = 0;
1985 max_insns = tb->cflags & CF_COUNT_MASK;
1986 if (max_insns == 0)
1987 max_insns = CF_COUNT_MASK;
1988 gen_icount_start();
1989 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1990 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1991 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1992 if (ctx.pc == bp->pc) {
1993 /* We have hit a breakpoint - make sure PC is up-to-date */
1994 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1995 gen_helper_debug(cpu_env);
1996 ctx.bstate = BS_BRANCH;
1997 break;
2001 if (search_pc) {
2002 i = gen_opc_ptr - gen_opc_buf;
2003 if (ii < i) {
2004 ii++;
2005 while (ii < i)
2006 gen_opc_instr_start[ii++] = 0;
2008 gen_opc_pc[ii] = ctx.pc;
2009 gen_opc_hflags[ii] = ctx.flags;
2010 gen_opc_instr_start[ii] = 1;
2011 gen_opc_icount[ii] = num_insns;
2013 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2014 gen_io_start();
2015 #if 0
2016 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
2017 fflush(stderr);
2018 #endif
2019 ctx.opcode = cpu_lduw_code(env, ctx.pc);
2020 decode_opc(&ctx);
2021 num_insns++;
2022 ctx.pc += 2;
2023 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2024 break;
2025 if (env->singlestep_enabled)
2026 break;
2027 if (num_insns >= max_insns)
2028 break;
2029 if (singlestep)
2030 break;
2032 if (tb->cflags & CF_LAST_IO)
2033 gen_io_end();
2034 if (env->singlestep_enabled) {
2035 tcg_gen_movi_i32(cpu_pc, ctx.pc);
2036 gen_helper_debug(cpu_env);
2037 } else {
2038 switch (ctx.bstate) {
2039 case BS_STOP:
2040 /* gen_op_interrupt_restart(); */
2041 /* fall through */
2042 case BS_NONE:
2043 if (ctx.flags) {
2044 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
2046 gen_goto_tb(&ctx, 0, ctx.pc);
2047 break;
2048 case BS_EXCP:
2049 /* gen_op_interrupt_restart(); */
2050 tcg_gen_exit_tb(0);
2051 break;
2052 case BS_BRANCH:
2053 default:
2054 break;
2058 gen_icount_end(tb, num_insns);
2059 *gen_opc_ptr = INDEX_op_end;
2060 if (search_pc) {
2061 i = gen_opc_ptr - gen_opc_buf;
2062 ii++;
2063 while (ii <= i)
2064 gen_opc_instr_start[ii++] = 0;
2065 } else {
2066 tb->size = ctx.pc - pc_start;
2067 tb->icount = num_insns;
2070 #ifdef DEBUG_DISAS
2071 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2072 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2073 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2074 qemu_log("\n");
2076 #endif
2079 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2081 gen_intermediate_code_internal(env, tb, 0);
2084 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
2086 gen_intermediate_code_internal(env, tb, 1);
2089 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
2091 env->pc = gen_opc_pc[pc_pos];
2092 env->flags = gen_opc_hflags[pc_pos];