virtagent: various bits to build QEMU with virtagent
[qemu/mdroth.git] / target-sh4 / translate.c
blobf41813993172b976f3ec5ac3ac60671ee86bb57e
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #define DEBUG_DISAS
26 #define SH4_DEBUG_DISAS
27 //#define SH4_SINGLE_STEP
29 #include "cpu.h"
30 #include "exec-all.h"
31 #include "disas.h"
32 #include "tcg-op.h"
33 #include "qemu-common.h"
35 #include "helper.h"
36 #define GEN_HELPER 1
37 #include "helper.h"
39 typedef struct DisasContext {
40 struct TranslationBlock *tb;
41 target_ulong pc;
42 uint32_t sr;
43 uint32_t fpscr;
44 uint16_t opcode;
45 uint32_t flags;
46 int bstate;
47 int memidx;
48 uint32_t delayed_pc;
49 int singlestep_enabled;
50 uint32_t features;
51 int has_movcal;
52 } DisasContext;
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(ctx) 1
56 #else
57 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
58 #endif
60 enum {
61 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
62 * exception condition
64 BS_STOP = 1, /* We want to stop translation for any reason */
65 BS_BRANCH = 2, /* We reached a branch condition */
66 BS_EXCP = 3, /* We reached an exception condition */
69 /* global register indexes */
70 static TCGv_ptr cpu_env;
71 static TCGv cpu_gregs[24];
72 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
73 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
74 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
75 static TCGv cpu_fregs[32];
77 /* internal register indexes */
78 static TCGv cpu_flags, cpu_delayed_pc;
80 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
82 #include "gen-icount.h"
84 static void sh4_translate_init(void)
86 int i;
87 static int done_init = 0;
88 static const char * const gregnames[24] = {
89 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
90 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
91 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
92 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
93 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
95 static const char * const fregnames[32] = {
96 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
97 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
98 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
99 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
100 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
101 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
102 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
103 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
106 if (done_init)
107 return;
109 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111 for (i = 0; i < 24; i++)
112 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUState, gregs[i]),
114 gregnames[i]);
116 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, pc), "PC");
118 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, sr), "SR");
120 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, ssr), "SSR");
122 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, spc), "SPC");
124 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, gbr), "GBR");
126 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, vbr), "VBR");
128 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUState, sgr), "SGR");
130 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUState, dbr), "DBR");
132 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
133 offsetof(CPUState, mach), "MACH");
134 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUState, macl), "MACL");
136 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUState, pr), "PR");
138 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUState, fpscr), "FPSCR");
140 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
141 offsetof(CPUState, fpul), "FPUL");
143 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
144 offsetof(CPUState, flags), "_flags_");
145 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUState, delayed_pc),
147 "_delayed_pc_");
148 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
149 offsetof(CPUState, ldst), "_ldst_");
151 for (i = 0; i < 32; i++)
152 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
153 offsetof(CPUState, fregs[i]),
154 fregnames[i]);
156 /* register helpers */
157 #define GEN_HELPER 2
158 #include "helper.h"
160 done_init = 1;
163 void cpu_dump_state(CPUState * env, FILE * f,
164 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
165 int flags)
167 int i;
168 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
169 env->pc, env->sr, env->pr, env->fpscr);
170 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
171 env->spc, env->ssr, env->gbr, env->vbr);
172 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
173 env->sgr, env->dbr, env->delayed_pc, env->fpul);
174 for (i = 0; i < 24; i += 4) {
175 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
176 i, env->gregs[i], i + 1, env->gregs[i + 1],
177 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
179 if (env->flags & DELAY_SLOT) {
180 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
183 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
188 static void cpu_sh4_reset(CPUSH4State * env)
190 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
191 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
192 log_cpu_state(env, 0);
195 #if defined(CONFIG_USER_ONLY)
196 env->sr = 0;
197 #else
198 env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
199 #endif
200 env->vbr = 0;
201 env->pc = 0xA0000000;
202 #if defined(CONFIG_USER_ONLY)
203 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
204 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
205 #else
206 env->fpscr = 0x00040001; /* CPU reset value according to SH4 manual */
207 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
208 #endif
209 env->mmucr = 0;
212 typedef struct {
213 const char *name;
214 int id;
215 uint32_t pvr;
216 uint32_t prr;
217 uint32_t cvr;
218 uint32_t features;
219 } sh4_def_t;
221 static sh4_def_t sh4_defs[] = {
223 .name = "SH7750R",
224 .id = SH_CPU_SH7750R,
225 .pvr = 0x00050000,
226 .prr = 0x00000100,
227 .cvr = 0x00110000,
228 .features = SH_FEATURE_BCR3_AND_BCR4,
229 }, {
230 .name = "SH7751R",
231 .id = SH_CPU_SH7751R,
232 .pvr = 0x04050005,
233 .prr = 0x00000113,
234 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
235 .features = SH_FEATURE_BCR3_AND_BCR4,
236 }, {
237 .name = "SH7785",
238 .id = SH_CPU_SH7785,
239 .pvr = 0x10300700,
240 .prr = 0x00000200,
241 .cvr = 0x71440211,
242 .features = SH_FEATURE_SH4A,
246 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
248 int i;
250 if (strcasecmp(name, "any") == 0)
251 return &sh4_defs[0];
253 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
254 if (strcasecmp(name, sh4_defs[i].name) == 0)
255 return &sh4_defs[i];
257 return NULL;
260 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
262 int i;
264 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
265 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
268 static void cpu_sh4_register(CPUSH4State *env, const sh4_def_t *def)
270 env->pvr = def->pvr;
271 env->prr = def->prr;
272 env->cvr = def->cvr;
273 env->id = def->id;
276 CPUSH4State *cpu_sh4_init(const char *cpu_model)
278 CPUSH4State *env;
279 const sh4_def_t *def;
281 def = cpu_sh4_find_by_name(cpu_model);
282 if (!def)
283 return NULL;
284 env = qemu_mallocz(sizeof(CPUSH4State));
285 env->features = def->features;
286 cpu_exec_init(env);
287 env->movcal_backup_tail = &(env->movcal_backup);
288 sh4_translate_init();
289 env->cpu_model_str = cpu_model;
290 cpu_sh4_reset(env);
291 cpu_sh4_register(env, def);
292 tlb_flush(env, 1);
293 qemu_init_vcpu(env);
294 return env;
297 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
299 TranslationBlock *tb;
300 tb = ctx->tb;
302 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
303 !ctx->singlestep_enabled) {
304 /* Use a direct jump if in same page and singlestep not enabled */
305 tcg_gen_goto_tb(n);
306 tcg_gen_movi_i32(cpu_pc, dest);
307 tcg_gen_exit_tb((long) tb + n);
308 } else {
309 tcg_gen_movi_i32(cpu_pc, dest);
310 if (ctx->singlestep_enabled)
311 gen_helper_debug();
312 tcg_gen_exit_tb(0);
316 static void gen_jump(DisasContext * ctx)
318 if (ctx->delayed_pc == (uint32_t) - 1) {
319 /* Target is not statically known, it comes necessarily from a
320 delayed jump as immediate jump are conditinal jumps */
321 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
322 if (ctx->singlestep_enabled)
323 gen_helper_debug();
324 tcg_gen_exit_tb(0);
325 } else {
326 gen_goto_tb(ctx, 0, ctx->delayed_pc);
330 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
332 TCGv sr;
333 int label = gen_new_label();
334 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
335 sr = tcg_temp_new();
336 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
337 tcg_gen_brcondi_i32(TCG_COND_NE, sr, t ? SR_T : 0, label);
338 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
339 gen_set_label(label);
342 /* Immediate conditional jump (bt or bf) */
343 static void gen_conditional_jump(DisasContext * ctx,
344 target_ulong ift, target_ulong ifnott)
346 int l1;
347 TCGv sr;
349 l1 = gen_new_label();
350 sr = tcg_temp_new();
351 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
352 tcg_gen_brcondi_i32(TCG_COND_EQ, sr, SR_T, l1);
353 gen_goto_tb(ctx, 0, ifnott);
354 gen_set_label(l1);
355 gen_goto_tb(ctx, 1, ift);
358 /* Delayed conditional jump (bt or bf) */
359 static void gen_delayed_conditional_jump(DisasContext * ctx)
361 int l1;
362 TCGv ds;
364 l1 = gen_new_label();
365 ds = tcg_temp_new();
366 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
367 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, DELAY_SLOT_TRUE, l1);
368 gen_goto_tb(ctx, 1, ctx->pc + 2);
369 gen_set_label(l1);
370 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
371 gen_jump(ctx);
374 static inline void gen_set_t(void)
376 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
379 static inline void gen_clr_t(void)
381 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
384 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
386 int label1 = gen_new_label();
387 int label2 = gen_new_label();
388 tcg_gen_brcond_i32(cond, t1, t0, label1);
389 gen_clr_t();
390 tcg_gen_br(label2);
391 gen_set_label(label1);
392 gen_set_t();
393 gen_set_label(label2);
396 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
398 int label1 = gen_new_label();
399 int label2 = gen_new_label();
400 tcg_gen_brcondi_i32(cond, t0, imm, label1);
401 gen_clr_t();
402 tcg_gen_br(label2);
403 gen_set_label(label1);
404 gen_set_t();
405 gen_set_label(label2);
408 static inline void gen_store_flags(uint32_t flags)
410 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
411 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
414 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
416 TCGv tmp = tcg_temp_new();
418 p0 &= 0x1f;
419 p1 &= 0x1f;
421 tcg_gen_andi_i32(tmp, t1, (1 << p1));
422 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
423 if (p0 < p1)
424 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
425 else if (p0 > p1)
426 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
427 tcg_gen_or_i32(t0, t0, tmp);
429 tcg_temp_free(tmp);
432 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
434 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
437 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
439 TCGv_i32 tmp = tcg_temp_new_i32();
440 tcg_gen_trunc_i64_i32(tmp, t);
441 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
442 tcg_gen_shri_i64(t, t, 32);
443 tcg_gen_trunc_i64_i32(tmp, t);
444 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
445 tcg_temp_free_i32(tmp);
448 #define B3_0 (ctx->opcode & 0xf)
449 #define B6_4 ((ctx->opcode >> 4) & 0x7)
450 #define B7_4 ((ctx->opcode >> 4) & 0xf)
451 #define B7_0 (ctx->opcode & 0xff)
452 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
453 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
454 (ctx->opcode & 0xfff))
455 #define B11_8 ((ctx->opcode >> 8) & 0xf)
456 #define B15_12 ((ctx->opcode >> 12) & 0xf)
458 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
459 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
461 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
462 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
464 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
465 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
466 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
467 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
469 #define CHECK_NOT_DELAY_SLOT \
470 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
472 tcg_gen_movi_i32(cpu_pc, ctx->pc-2); \
473 gen_helper_raise_slot_illegal_instruction(); \
474 ctx->bstate = BS_EXCP; \
475 return; \
478 #define CHECK_PRIVILEGED \
479 if (IS_USER(ctx)) { \
480 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
481 gen_helper_raise_illegal_instruction(); \
482 ctx->bstate = BS_EXCP; \
483 return; \
486 #define CHECK_FPU_ENABLED \
487 if (ctx->flags & SR_FD) { \
488 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
489 tcg_gen_movi_i32(cpu_pc, ctx->pc-2); \
490 gen_helper_raise_slot_fpu_disable(); \
491 } else { \
492 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
493 gen_helper_raise_fpu_disable(); \
495 ctx->bstate = BS_EXCP; \
496 return; \
499 static void _decode_opc(DisasContext * ctx)
501 /* This code tries to make movcal emulation sufficiently
502 accurate for Linux purposes. This instruction writes
503 memory, and prior to that, always allocates a cache line.
504 It is used in two contexts:
505 - in memcpy, where data is copied in blocks, the first write
506 of to a block uses movca.l for performance.
507 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
508 to flush the cache. Here, the data written by movcal.l is never
509 written to memory, and the data written is just bogus.
511 To simulate this, we simulate movcal.l, we store the value to memory,
512 but we also remember the previous content. If we see ocbi, we check
513 if movcal.l for that address was done previously. If so, the write should
514 not have hit the memory, so we restore the previous content.
515 When we see an instruction that is neither movca.l
516 nor ocbi, the previous content is discarded.
518 To optimize, we only try to flush stores when we're at the start of
519 TB, or if we already saw movca.l in this TB and did not flush stores
520 yet. */
521 if (ctx->has_movcal)
523 int opcode = ctx->opcode & 0xf0ff;
524 if (opcode != 0x0093 /* ocbi */
525 && opcode != 0x00c3 /* movca.l */)
527 gen_helper_discard_movcal_backup ();
528 ctx->has_movcal = 0;
532 #if 0
533 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
534 #endif
536 switch (ctx->opcode) {
537 case 0x0019: /* div0u */
538 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
539 return;
540 case 0x000b: /* rts */
541 CHECK_NOT_DELAY_SLOT
542 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
543 ctx->flags |= DELAY_SLOT;
544 ctx->delayed_pc = (uint32_t) - 1;
545 return;
546 case 0x0028: /* clrmac */
547 tcg_gen_movi_i32(cpu_mach, 0);
548 tcg_gen_movi_i32(cpu_macl, 0);
549 return;
550 case 0x0048: /* clrs */
551 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
552 return;
553 case 0x0008: /* clrt */
554 gen_clr_t();
555 return;
556 case 0x0038: /* ldtlb */
557 CHECK_PRIVILEGED
558 gen_helper_ldtlb();
559 return;
560 case 0x002b: /* rte */
561 CHECK_PRIVILEGED
562 CHECK_NOT_DELAY_SLOT
563 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
564 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
565 ctx->flags |= DELAY_SLOT;
566 ctx->delayed_pc = (uint32_t) - 1;
567 return;
568 case 0x0058: /* sets */
569 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
570 return;
571 case 0x0018: /* sett */
572 gen_set_t();
573 return;
574 case 0xfbfd: /* frchg */
575 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
576 ctx->bstate = BS_STOP;
577 return;
578 case 0xf3fd: /* fschg */
579 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
580 ctx->bstate = BS_STOP;
581 return;
582 case 0x0009: /* nop */
583 return;
584 case 0x001b: /* sleep */
585 CHECK_PRIVILEGED
586 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
587 return;
590 switch (ctx->opcode & 0xf000) {
591 case 0x1000: /* mov.l Rm,@(disp,Rn) */
593 TCGv addr = tcg_temp_new();
594 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
595 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
596 tcg_temp_free(addr);
598 return;
599 case 0x5000: /* mov.l @(disp,Rm),Rn */
601 TCGv addr = tcg_temp_new();
602 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
603 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
604 tcg_temp_free(addr);
606 return;
607 case 0xe000: /* mov #imm,Rn */
608 tcg_gen_movi_i32(REG(B11_8), B7_0s);
609 return;
610 case 0x9000: /* mov.w @(disp,PC),Rn */
612 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
613 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
614 tcg_temp_free(addr);
616 return;
617 case 0xd000: /* mov.l @(disp,PC),Rn */
619 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
620 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
621 tcg_temp_free(addr);
623 return;
624 case 0x7000: /* add #imm,Rn */
625 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
626 return;
627 case 0xa000: /* bra disp */
628 CHECK_NOT_DELAY_SLOT
629 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
630 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
631 ctx->flags |= DELAY_SLOT;
632 return;
633 case 0xb000: /* bsr disp */
634 CHECK_NOT_DELAY_SLOT
635 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
636 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
637 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
638 ctx->flags |= DELAY_SLOT;
639 return;
642 switch (ctx->opcode & 0xf00f) {
643 case 0x6003: /* mov Rm,Rn */
644 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
645 return;
646 case 0x2000: /* mov.b Rm,@Rn */
647 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
648 return;
649 case 0x2001: /* mov.w Rm,@Rn */
650 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
651 return;
652 case 0x2002: /* mov.l Rm,@Rn */
653 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
654 return;
655 case 0x6000: /* mov.b @Rm,Rn */
656 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
657 return;
658 case 0x6001: /* mov.w @Rm,Rn */
659 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
660 return;
661 case 0x6002: /* mov.l @Rm,Rn */
662 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
663 return;
664 case 0x2004: /* mov.b Rm,@-Rn */
666 TCGv addr = tcg_temp_new();
667 tcg_gen_subi_i32(addr, REG(B11_8), 1);
668 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
669 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
670 tcg_temp_free(addr);
672 return;
673 case 0x2005: /* mov.w Rm,@-Rn */
675 TCGv addr = tcg_temp_new();
676 tcg_gen_subi_i32(addr, REG(B11_8), 2);
677 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
678 tcg_gen_mov_i32(REG(B11_8), addr);
679 tcg_temp_free(addr);
681 return;
682 case 0x2006: /* mov.l Rm,@-Rn */
684 TCGv addr = tcg_temp_new();
685 tcg_gen_subi_i32(addr, REG(B11_8), 4);
686 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
687 tcg_gen_mov_i32(REG(B11_8), addr);
689 return;
690 case 0x6004: /* mov.b @Rm+,Rn */
691 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
692 if ( B11_8 != B7_4 )
693 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
694 return;
695 case 0x6005: /* mov.w @Rm+,Rn */
696 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
697 if ( B11_8 != B7_4 )
698 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
699 return;
700 case 0x6006: /* mov.l @Rm+,Rn */
701 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
702 if ( B11_8 != B7_4 )
703 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
704 return;
705 case 0x0004: /* mov.b Rm,@(R0,Rn) */
707 TCGv addr = tcg_temp_new();
708 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
709 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
710 tcg_temp_free(addr);
712 return;
713 case 0x0005: /* mov.w Rm,@(R0,Rn) */
715 TCGv addr = tcg_temp_new();
716 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
717 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
718 tcg_temp_free(addr);
720 return;
721 case 0x0006: /* mov.l Rm,@(R0,Rn) */
723 TCGv addr = tcg_temp_new();
724 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
725 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
726 tcg_temp_free(addr);
728 return;
729 case 0x000c: /* mov.b @(R0,Rm),Rn */
731 TCGv addr = tcg_temp_new();
732 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
733 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
734 tcg_temp_free(addr);
736 return;
737 case 0x000d: /* mov.w @(R0,Rm),Rn */
739 TCGv addr = tcg_temp_new();
740 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
741 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
742 tcg_temp_free(addr);
744 return;
745 case 0x000e: /* mov.l @(R0,Rm),Rn */
747 TCGv addr = tcg_temp_new();
748 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
749 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
750 tcg_temp_free(addr);
752 return;
753 case 0x6008: /* swap.b Rm,Rn */
755 TCGv high, low;
756 high = tcg_temp_new();
757 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
758 low = tcg_temp_new();
759 tcg_gen_ext16u_i32(low, REG(B7_4));
760 tcg_gen_bswap16_i32(low, low);
761 tcg_gen_or_i32(REG(B11_8), high, low);
762 tcg_temp_free(low);
763 tcg_temp_free(high);
765 return;
766 case 0x6009: /* swap.w Rm,Rn */
768 TCGv high, low;
769 high = tcg_temp_new();
770 tcg_gen_shli_i32(high, REG(B7_4), 16);
771 low = tcg_temp_new();
772 tcg_gen_shri_i32(low, REG(B7_4), 16);
773 tcg_gen_ext16u_i32(low, low);
774 tcg_gen_or_i32(REG(B11_8), high, low);
775 tcg_temp_free(low);
776 tcg_temp_free(high);
778 return;
779 case 0x200d: /* xtrct Rm,Rn */
781 TCGv high, low;
782 high = tcg_temp_new();
783 tcg_gen_shli_i32(high, REG(B7_4), 16);
784 low = tcg_temp_new();
785 tcg_gen_shri_i32(low, REG(B11_8), 16);
786 tcg_gen_ext16u_i32(low, low);
787 tcg_gen_or_i32(REG(B11_8), high, low);
788 tcg_temp_free(low);
789 tcg_temp_free(high);
791 return;
792 case 0x300c: /* add Rm,Rn */
793 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
794 return;
795 case 0x300e: /* addc Rm,Rn */
796 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
797 return;
798 case 0x300f: /* addv Rm,Rn */
799 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
800 return;
801 case 0x2009: /* and Rm,Rn */
802 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
803 return;
804 case 0x3000: /* cmp/eq Rm,Rn */
805 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
806 return;
807 case 0x3003: /* cmp/ge Rm,Rn */
808 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
809 return;
810 case 0x3007: /* cmp/gt Rm,Rn */
811 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
812 return;
813 case 0x3006: /* cmp/hi Rm,Rn */
814 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
815 return;
816 case 0x3002: /* cmp/hs Rm,Rn */
817 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
818 return;
819 case 0x200c: /* cmp/str Rm,Rn */
821 int label1 = gen_new_label();
822 int label2 = gen_new_label();
823 TCGv cmp1 = tcg_temp_local_new();
824 TCGv cmp2 = tcg_temp_local_new();
825 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
826 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
827 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
828 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
829 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
830 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
831 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
832 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
833 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
834 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
835 tcg_gen_br(label2);
836 gen_set_label(label1);
837 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
838 gen_set_label(label2);
839 tcg_temp_free(cmp2);
840 tcg_temp_free(cmp1);
842 return;
843 case 0x2007: /* div0s Rm,Rn */
845 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
846 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
847 TCGv val = tcg_temp_new();
848 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
849 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
850 tcg_temp_free(val);
852 return;
853 case 0x3004: /* div1 Rm,Rn */
854 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
855 return;
856 case 0x300d: /* dmuls.l Rm,Rn */
858 TCGv_i64 tmp1 = tcg_temp_new_i64();
859 TCGv_i64 tmp2 = tcg_temp_new_i64();
861 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
862 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
863 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
864 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
865 tcg_gen_shri_i64(tmp1, tmp1, 32);
866 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
868 tcg_temp_free_i64(tmp2);
869 tcg_temp_free_i64(tmp1);
871 return;
872 case 0x3005: /* dmulu.l Rm,Rn */
874 TCGv_i64 tmp1 = tcg_temp_new_i64();
875 TCGv_i64 tmp2 = tcg_temp_new_i64();
877 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
878 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
879 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
880 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
881 tcg_gen_shri_i64(tmp1, tmp1, 32);
882 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
884 tcg_temp_free_i64(tmp2);
885 tcg_temp_free_i64(tmp1);
887 return;
888 case 0x600e: /* exts.b Rm,Rn */
889 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
890 return;
891 case 0x600f: /* exts.w Rm,Rn */
892 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
893 return;
894 case 0x600c: /* extu.b Rm,Rn */
895 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
896 return;
897 case 0x600d: /* extu.w Rm,Rn */
898 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
899 return;
900 case 0x000f: /* mac.l @Rm+,@Rn+ */
902 TCGv arg0, arg1;
903 arg0 = tcg_temp_new();
904 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
905 arg1 = tcg_temp_new();
906 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
907 gen_helper_macl(arg0, arg1);
908 tcg_temp_free(arg1);
909 tcg_temp_free(arg0);
910 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
911 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
913 return;
914 case 0x400f: /* mac.w @Rm+,@Rn+ */
916 TCGv arg0, arg1;
917 arg0 = tcg_temp_new();
918 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
919 arg1 = tcg_temp_new();
920 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
921 gen_helper_macw(arg0, arg1);
922 tcg_temp_free(arg1);
923 tcg_temp_free(arg0);
924 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
925 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
927 return;
928 case 0x0007: /* mul.l Rm,Rn */
929 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
930 return;
931 case 0x200f: /* muls.w Rm,Rn */
933 TCGv arg0, arg1;
934 arg0 = tcg_temp_new();
935 tcg_gen_ext16s_i32(arg0, REG(B7_4));
936 arg1 = tcg_temp_new();
937 tcg_gen_ext16s_i32(arg1, REG(B11_8));
938 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
939 tcg_temp_free(arg1);
940 tcg_temp_free(arg0);
942 return;
943 case 0x200e: /* mulu.w Rm,Rn */
945 TCGv arg0, arg1;
946 arg0 = tcg_temp_new();
947 tcg_gen_ext16u_i32(arg0, REG(B7_4));
948 arg1 = tcg_temp_new();
949 tcg_gen_ext16u_i32(arg1, REG(B11_8));
950 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
951 tcg_temp_free(arg1);
952 tcg_temp_free(arg0);
954 return;
955 case 0x600b: /* neg Rm,Rn */
956 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
957 return;
958 case 0x600a: /* negc Rm,Rn */
959 gen_helper_negc(REG(B11_8), REG(B7_4));
960 return;
961 case 0x6007: /* not Rm,Rn */
962 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
963 return;
964 case 0x200b: /* or Rm,Rn */
965 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
966 return;
967 case 0x400c: /* shad Rm,Rn */
969 int label1 = gen_new_label();
970 int label2 = gen_new_label();
971 int label3 = gen_new_label();
972 int label4 = gen_new_label();
973 TCGv shift;
974 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
975 /* Rm positive, shift to the left */
976 shift = tcg_temp_new();
977 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
978 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
979 tcg_temp_free(shift);
980 tcg_gen_br(label4);
981 /* Rm negative, shift to the right */
982 gen_set_label(label1);
983 shift = tcg_temp_new();
984 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
985 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
986 tcg_gen_not_i32(shift, REG(B7_4));
987 tcg_gen_andi_i32(shift, shift, 0x1f);
988 tcg_gen_addi_i32(shift, shift, 1);
989 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
990 tcg_temp_free(shift);
991 tcg_gen_br(label4);
992 /* Rm = -32 */
993 gen_set_label(label2);
994 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
995 tcg_gen_movi_i32(REG(B11_8), 0);
996 tcg_gen_br(label4);
997 gen_set_label(label3);
998 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
999 gen_set_label(label4);
1001 return;
1002 case 0x400d: /* shld Rm,Rn */
1004 int label1 = gen_new_label();
1005 int label2 = gen_new_label();
1006 int label3 = gen_new_label();
1007 TCGv shift;
1008 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1009 /* Rm positive, shift to the left */
1010 shift = tcg_temp_new();
1011 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1012 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1013 tcg_temp_free(shift);
1014 tcg_gen_br(label3);
1015 /* Rm negative, shift to the right */
1016 gen_set_label(label1);
1017 shift = tcg_temp_new();
1018 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1019 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1020 tcg_gen_not_i32(shift, REG(B7_4));
1021 tcg_gen_andi_i32(shift, shift, 0x1f);
1022 tcg_gen_addi_i32(shift, shift, 1);
1023 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1024 tcg_temp_free(shift);
1025 tcg_gen_br(label3);
1026 /* Rm = -32 */
1027 gen_set_label(label2);
1028 tcg_gen_movi_i32(REG(B11_8), 0);
1029 gen_set_label(label3);
1031 return;
1032 case 0x3008: /* sub Rm,Rn */
1033 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1034 return;
1035 case 0x300a: /* subc Rm,Rn */
1036 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1037 return;
1038 case 0x300b: /* subv Rm,Rn */
1039 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1040 return;
1041 case 0x2008: /* tst Rm,Rn */
1043 TCGv val = tcg_temp_new();
1044 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1045 gen_cmp_imm(TCG_COND_EQ, val, 0);
1046 tcg_temp_free(val);
1048 return;
1049 case 0x200a: /* xor Rm,Rn */
1050 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1051 return;
1052 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1053 CHECK_FPU_ENABLED
1054 if (ctx->fpscr & FPSCR_SZ) {
1055 TCGv_i64 fp = tcg_temp_new_i64();
1056 gen_load_fpr64(fp, XREG(B7_4));
1057 gen_store_fpr64(fp, XREG(B11_8));
1058 tcg_temp_free_i64(fp);
1059 } else {
1060 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1062 return;
1063 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1064 CHECK_FPU_ENABLED
1065 if (ctx->fpscr & FPSCR_SZ) {
1066 TCGv addr_hi = tcg_temp_new();
1067 int fr = XREG(B7_4);
1068 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1069 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1070 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1071 tcg_temp_free(addr_hi);
1072 } else {
1073 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1075 return;
1076 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1077 CHECK_FPU_ENABLED
1078 if (ctx->fpscr & FPSCR_SZ) {
1079 TCGv addr_hi = tcg_temp_new();
1080 int fr = XREG(B11_8);
1081 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1082 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1083 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1084 tcg_temp_free(addr_hi);
1085 } else {
1086 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1088 return;
1089 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1090 CHECK_FPU_ENABLED
1091 if (ctx->fpscr & FPSCR_SZ) {
1092 TCGv addr_hi = tcg_temp_new();
1093 int fr = XREG(B11_8);
1094 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1095 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1096 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1097 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1098 tcg_temp_free(addr_hi);
1099 } else {
1100 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1101 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1103 return;
1104 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1105 CHECK_FPU_ENABLED
1106 if (ctx->fpscr & FPSCR_SZ) {
1107 TCGv addr = tcg_temp_new_i32();
1108 int fr = XREG(B7_4);
1109 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1110 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1111 tcg_gen_subi_i32(addr, addr, 4);
1112 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1113 tcg_gen_mov_i32(REG(B11_8), addr);
1114 tcg_temp_free(addr);
1115 } else {
1116 TCGv addr;
1117 addr = tcg_temp_new_i32();
1118 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1119 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1120 tcg_gen_mov_i32(REG(B11_8), addr);
1121 tcg_temp_free(addr);
1123 return;
1124 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1125 CHECK_FPU_ENABLED
1127 TCGv addr = tcg_temp_new_i32();
1128 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1129 if (ctx->fpscr & FPSCR_SZ) {
1130 int fr = XREG(B11_8);
1131 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1132 tcg_gen_addi_i32(addr, addr, 4);
1133 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1134 } else {
1135 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1137 tcg_temp_free(addr);
1139 return;
1140 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1141 CHECK_FPU_ENABLED
1143 TCGv addr = tcg_temp_new();
1144 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1145 if (ctx->fpscr & FPSCR_SZ) {
1146 int fr = XREG(B7_4);
1147 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1148 tcg_gen_addi_i32(addr, addr, 4);
1149 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1150 } else {
1151 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1153 tcg_temp_free(addr);
1155 return;
1156 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1157 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1158 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1159 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1160 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1161 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1163 CHECK_FPU_ENABLED
1164 if (ctx->fpscr & FPSCR_PR) {
1165 TCGv_i64 fp0, fp1;
1167 if (ctx->opcode & 0x0110)
1168 break; /* illegal instruction */
1169 fp0 = tcg_temp_new_i64();
1170 fp1 = tcg_temp_new_i64();
1171 gen_load_fpr64(fp0, DREG(B11_8));
1172 gen_load_fpr64(fp1, DREG(B7_4));
1173 switch (ctx->opcode & 0xf00f) {
1174 case 0xf000: /* fadd Rm,Rn */
1175 gen_helper_fadd_DT(fp0, fp0, fp1);
1176 break;
1177 case 0xf001: /* fsub Rm,Rn */
1178 gen_helper_fsub_DT(fp0, fp0, fp1);
1179 break;
1180 case 0xf002: /* fmul Rm,Rn */
1181 gen_helper_fmul_DT(fp0, fp0, fp1);
1182 break;
1183 case 0xf003: /* fdiv Rm,Rn */
1184 gen_helper_fdiv_DT(fp0, fp0, fp1);
1185 break;
1186 case 0xf004: /* fcmp/eq Rm,Rn */
1187 gen_helper_fcmp_eq_DT(fp0, fp1);
1188 return;
1189 case 0xf005: /* fcmp/gt Rm,Rn */
1190 gen_helper_fcmp_gt_DT(fp0, fp1);
1191 return;
1193 gen_store_fpr64(fp0, DREG(B11_8));
1194 tcg_temp_free_i64(fp0);
1195 tcg_temp_free_i64(fp1);
1196 } else {
1197 switch (ctx->opcode & 0xf00f) {
1198 case 0xf000: /* fadd Rm,Rn */
1199 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1200 break;
1201 case 0xf001: /* fsub Rm,Rn */
1202 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1203 break;
1204 case 0xf002: /* fmul Rm,Rn */
1205 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1206 break;
1207 case 0xf003: /* fdiv Rm,Rn */
1208 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1209 break;
1210 case 0xf004: /* fcmp/eq Rm,Rn */
1211 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1212 return;
1213 case 0xf005: /* fcmp/gt Rm,Rn */
1214 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1215 return;
1219 return;
1220 case 0xf00e: /* fmac FR0,RM,Rn */
1222 CHECK_FPU_ENABLED
1223 if (ctx->fpscr & FPSCR_PR) {
1224 break; /* illegal instruction */
1225 } else {
1226 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1227 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1228 return;
1233 switch (ctx->opcode & 0xff00) {
1234 case 0xc900: /* and #imm,R0 */
1235 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1236 return;
1237 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1239 TCGv addr, val;
1240 addr = tcg_temp_new();
1241 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1242 val = tcg_temp_new();
1243 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1244 tcg_gen_andi_i32(val, val, B7_0);
1245 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1246 tcg_temp_free(val);
1247 tcg_temp_free(addr);
1249 return;
1250 case 0x8b00: /* bf label */
1251 CHECK_NOT_DELAY_SLOT
1252 gen_conditional_jump(ctx, ctx->pc + 2,
1253 ctx->pc + 4 + B7_0s * 2);
1254 ctx->bstate = BS_BRANCH;
1255 return;
1256 case 0x8f00: /* bf/s label */
1257 CHECK_NOT_DELAY_SLOT
1258 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1259 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1260 return;
1261 case 0x8900: /* bt label */
1262 CHECK_NOT_DELAY_SLOT
1263 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1264 ctx->pc + 2);
1265 ctx->bstate = BS_BRANCH;
1266 return;
1267 case 0x8d00: /* bt/s label */
1268 CHECK_NOT_DELAY_SLOT
1269 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1270 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1271 return;
1272 case 0x8800: /* cmp/eq #imm,R0 */
1273 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1274 return;
1275 case 0xc400: /* mov.b @(disp,GBR),R0 */
1277 TCGv addr = tcg_temp_new();
1278 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1279 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1280 tcg_temp_free(addr);
1282 return;
1283 case 0xc500: /* mov.w @(disp,GBR),R0 */
1285 TCGv addr = tcg_temp_new();
1286 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1287 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1288 tcg_temp_free(addr);
1290 return;
1291 case 0xc600: /* mov.l @(disp,GBR),R0 */
1293 TCGv addr = tcg_temp_new();
1294 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1295 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1296 tcg_temp_free(addr);
1298 return;
1299 case 0xc000: /* mov.b R0,@(disp,GBR) */
1301 TCGv addr = tcg_temp_new();
1302 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1303 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1304 tcg_temp_free(addr);
1306 return;
1307 case 0xc100: /* mov.w R0,@(disp,GBR) */
1309 TCGv addr = tcg_temp_new();
1310 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1311 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1312 tcg_temp_free(addr);
1314 return;
1315 case 0xc200: /* mov.l R0,@(disp,GBR) */
1317 TCGv addr = tcg_temp_new();
1318 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1319 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1320 tcg_temp_free(addr);
1322 return;
1323 case 0x8000: /* mov.b R0,@(disp,Rn) */
1325 TCGv addr = tcg_temp_new();
1326 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1327 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1328 tcg_temp_free(addr);
1330 return;
1331 case 0x8100: /* mov.w R0,@(disp,Rn) */
1333 TCGv addr = tcg_temp_new();
1334 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1335 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1336 tcg_temp_free(addr);
1338 return;
1339 case 0x8400: /* mov.b @(disp,Rn),R0 */
1341 TCGv addr = tcg_temp_new();
1342 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1343 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1344 tcg_temp_free(addr);
1346 return;
1347 case 0x8500: /* mov.w @(disp,Rn),R0 */
1349 TCGv addr = tcg_temp_new();
1350 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1351 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1352 tcg_temp_free(addr);
1354 return;
1355 case 0xc700: /* mova @(disp,PC),R0 */
1356 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1357 return;
1358 case 0xcb00: /* or #imm,R0 */
1359 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1360 return;
1361 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1363 TCGv addr, val;
1364 addr = tcg_temp_new();
1365 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1366 val = tcg_temp_new();
1367 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1368 tcg_gen_ori_i32(val, val, B7_0);
1369 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1370 tcg_temp_free(val);
1371 tcg_temp_free(addr);
1373 return;
1374 case 0xc300: /* trapa #imm */
1376 TCGv imm;
1377 CHECK_NOT_DELAY_SLOT
1378 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1379 imm = tcg_const_i32(B7_0);
1380 gen_helper_trapa(imm);
1381 tcg_temp_free(imm);
1382 ctx->bstate = BS_BRANCH;
1384 return;
1385 case 0xc800: /* tst #imm,R0 */
1387 TCGv val = tcg_temp_new();
1388 tcg_gen_andi_i32(val, REG(0), B7_0);
1389 gen_cmp_imm(TCG_COND_EQ, val, 0);
1390 tcg_temp_free(val);
1392 return;
1393 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1395 TCGv val = tcg_temp_new();
1396 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1397 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1398 tcg_gen_andi_i32(val, val, B7_0);
1399 gen_cmp_imm(TCG_COND_EQ, val, 0);
1400 tcg_temp_free(val);
1402 return;
1403 case 0xca00: /* xor #imm,R0 */
1404 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1405 return;
1406 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1408 TCGv addr, val;
1409 addr = tcg_temp_new();
1410 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1411 val = tcg_temp_new();
1412 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1413 tcg_gen_xori_i32(val, val, B7_0);
1414 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1415 tcg_temp_free(val);
1416 tcg_temp_free(addr);
1418 return;
1421 switch (ctx->opcode & 0xf08f) {
1422 case 0x408e: /* ldc Rm,Rn_BANK */
1423 CHECK_PRIVILEGED
1424 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1425 return;
1426 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1427 CHECK_PRIVILEGED
1428 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1429 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1430 return;
1431 case 0x0082: /* stc Rm_BANK,Rn */
1432 CHECK_PRIVILEGED
1433 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1434 return;
1435 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1436 CHECK_PRIVILEGED
1438 TCGv addr = tcg_temp_new();
1439 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1440 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1441 tcg_gen_mov_i32(REG(B11_8), addr);
1442 tcg_temp_free(addr);
1444 return;
1447 switch (ctx->opcode & 0xf0ff) {
1448 case 0x0023: /* braf Rn */
1449 CHECK_NOT_DELAY_SLOT
1450 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1451 ctx->flags |= DELAY_SLOT;
1452 ctx->delayed_pc = (uint32_t) - 1;
1453 return;
1454 case 0x0003: /* bsrf Rn */
1455 CHECK_NOT_DELAY_SLOT
1456 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1457 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1458 ctx->flags |= DELAY_SLOT;
1459 ctx->delayed_pc = (uint32_t) - 1;
1460 return;
1461 case 0x4015: /* cmp/pl Rn */
1462 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1463 return;
1464 case 0x4011: /* cmp/pz Rn */
1465 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1466 return;
1467 case 0x4010: /* dt Rn */
1468 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1469 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1470 return;
1471 case 0x402b: /* jmp @Rn */
1472 CHECK_NOT_DELAY_SLOT
1473 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1474 ctx->flags |= DELAY_SLOT;
1475 ctx->delayed_pc = (uint32_t) - 1;
1476 return;
1477 case 0x400b: /* jsr @Rn */
1478 CHECK_NOT_DELAY_SLOT
1479 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1480 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1481 ctx->flags |= DELAY_SLOT;
1482 ctx->delayed_pc = (uint32_t) - 1;
1483 return;
1484 case 0x400e: /* ldc Rm,SR */
1485 CHECK_PRIVILEGED
1486 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1487 ctx->bstate = BS_STOP;
1488 return;
1489 case 0x4007: /* ldc.l @Rm+,SR */
1490 CHECK_PRIVILEGED
1492 TCGv val = tcg_temp_new();
1493 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1494 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1495 tcg_temp_free(val);
1496 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1497 ctx->bstate = BS_STOP;
1499 return;
1500 case 0x0002: /* stc SR,Rn */
1501 CHECK_PRIVILEGED
1502 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1503 return;
1504 case 0x4003: /* stc SR,@-Rn */
1505 CHECK_PRIVILEGED
1507 TCGv addr = tcg_temp_new();
1508 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1509 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1510 tcg_gen_mov_i32(REG(B11_8), addr);
1511 tcg_temp_free(addr);
1513 return;
1514 #define LD(reg,ldnum,ldpnum,prechk) \
1515 case ldnum: \
1516 prechk \
1517 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1518 return; \
1519 case ldpnum: \
1520 prechk \
1521 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1522 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1523 return;
1524 #define ST(reg,stnum,stpnum,prechk) \
1525 case stnum: \
1526 prechk \
1527 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1528 return; \
1529 case stpnum: \
1530 prechk \
1532 TCGv addr = tcg_temp_new(); \
1533 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1534 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1535 tcg_gen_mov_i32(REG(B11_8), addr); \
1536 tcg_temp_free(addr); \
1538 return;
1539 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1540 LD(reg,ldnum,ldpnum,prechk) \
1541 ST(reg,stnum,stpnum,prechk)
1542 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1543 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1544 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1545 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1546 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1547 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1548 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1549 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1550 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1551 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1552 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1553 case 0x406a: /* lds Rm,FPSCR */
1554 CHECK_FPU_ENABLED
1555 gen_helper_ld_fpscr(REG(B11_8));
1556 ctx->bstate = BS_STOP;
1557 return;
1558 case 0x4066: /* lds.l @Rm+,FPSCR */
1559 CHECK_FPU_ENABLED
1561 TCGv addr = tcg_temp_new();
1562 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1563 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1564 gen_helper_ld_fpscr(addr);
1565 tcg_temp_free(addr);
1566 ctx->bstate = BS_STOP;
1568 return;
1569 case 0x006a: /* sts FPSCR,Rn */
1570 CHECK_FPU_ENABLED
1571 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1572 return;
1573 case 0x4062: /* sts FPSCR,@-Rn */
1574 CHECK_FPU_ENABLED
1576 TCGv addr, val;
1577 val = tcg_temp_new();
1578 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1579 addr = tcg_temp_new();
1580 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1581 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1582 tcg_gen_mov_i32(REG(B11_8), addr);
1583 tcg_temp_free(addr);
1584 tcg_temp_free(val);
1586 return;
1587 case 0x00c3: /* movca.l R0,@Rm */
1589 TCGv val = tcg_temp_new();
1590 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1591 gen_helper_movcal (REG(B11_8), val);
1592 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1594 ctx->has_movcal = 1;
1595 return;
1596 case 0x40a9:
1597 /* MOVUA.L @Rm,R0 (Rm) -> R0
1598 Load non-boundary-aligned data */
1599 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1600 return;
1601 case 0x40e9:
1602 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1603 Load non-boundary-aligned data */
1604 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1605 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1606 return;
1607 case 0x0029: /* movt Rn */
1608 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1609 return;
1610 case 0x0073:
1611 /* MOVCO.L
1612 LDST -> T
1613 If (T == 1) R0 -> (Rn)
1614 0 -> LDST
1616 if (ctx->features & SH_FEATURE_SH4A) {
1617 int label = gen_new_label();
1618 gen_clr_t();
1619 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1620 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1621 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1622 gen_set_label(label);
1623 tcg_gen_movi_i32(cpu_ldst, 0);
1624 return;
1625 } else
1626 break;
1627 case 0x0063:
1628 /* MOVLI.L @Rm,R0
1629 1 -> LDST
1630 (Rm) -> R0
1631 When interrupt/exception
1632 occurred 0 -> LDST
1634 if (ctx->features & SH_FEATURE_SH4A) {
1635 tcg_gen_movi_i32(cpu_ldst, 0);
1636 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1637 tcg_gen_movi_i32(cpu_ldst, 1);
1638 return;
1639 } else
1640 break;
1641 case 0x0093: /* ocbi @Rn */
1643 gen_helper_ocbi (REG(B11_8));
1645 return;
1646 case 0x00a3: /* ocbp @Rn */
1648 TCGv dummy = tcg_temp_new();
1649 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1650 tcg_temp_free(dummy);
1652 return;
1653 case 0x00b3: /* ocbwb @Rn */
1655 TCGv dummy = tcg_temp_new();
1656 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1657 tcg_temp_free(dummy);
1659 return;
1660 case 0x0083: /* pref @Rn */
1661 return;
1662 case 0x00d3: /* prefi @Rn */
1663 if (ctx->features & SH_FEATURE_SH4A)
1664 return;
1665 else
1666 break;
1667 case 0x00e3: /* icbi @Rn */
1668 if (ctx->features & SH_FEATURE_SH4A)
1669 return;
1670 else
1671 break;
1672 case 0x00ab: /* synco */
1673 if (ctx->features & SH_FEATURE_SH4A)
1674 return;
1675 else
1676 break;
1677 case 0x4024: /* rotcl Rn */
1679 TCGv tmp = tcg_temp_new();
1680 tcg_gen_mov_i32(tmp, cpu_sr);
1681 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1682 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1683 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1684 tcg_temp_free(tmp);
1686 return;
1687 case 0x4025: /* rotcr Rn */
1689 TCGv tmp = tcg_temp_new();
1690 tcg_gen_mov_i32(tmp, cpu_sr);
1691 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1692 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1693 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1694 tcg_temp_free(tmp);
1696 return;
1697 case 0x4004: /* rotl Rn */
1698 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1699 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1700 gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0);
1701 return;
1702 case 0x4005: /* rotr Rn */
1703 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1704 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1705 gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0);
1706 return;
1707 case 0x4000: /* shll Rn */
1708 case 0x4020: /* shal Rn */
1709 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1710 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1711 return;
1712 case 0x4021: /* shar Rn */
1713 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1714 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1715 return;
1716 case 0x4001: /* shlr Rn */
1717 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1718 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1719 return;
1720 case 0x4008: /* shll2 Rn */
1721 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1722 return;
1723 case 0x4018: /* shll8 Rn */
1724 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1725 return;
1726 case 0x4028: /* shll16 Rn */
1727 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1728 return;
1729 case 0x4009: /* shlr2 Rn */
1730 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1731 return;
1732 case 0x4019: /* shlr8 Rn */
1733 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1734 return;
1735 case 0x4029: /* shlr16 Rn */
1736 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1737 return;
1738 case 0x401b: /* tas.b @Rn */
1740 TCGv addr, val;
1741 addr = tcg_temp_local_new();
1742 tcg_gen_mov_i32(addr, REG(B11_8));
1743 val = tcg_temp_local_new();
1744 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1745 gen_cmp_imm(TCG_COND_EQ, val, 0);
1746 tcg_gen_ori_i32(val, val, 0x80);
1747 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1748 tcg_temp_free(val);
1749 tcg_temp_free(addr);
1751 return;
1752 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1753 CHECK_FPU_ENABLED
1754 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1755 return;
1756 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1757 CHECK_FPU_ENABLED
1758 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1759 return;
1760 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1761 CHECK_FPU_ENABLED
1762 if (ctx->fpscr & FPSCR_PR) {
1763 TCGv_i64 fp;
1764 if (ctx->opcode & 0x0100)
1765 break; /* illegal instruction */
1766 fp = tcg_temp_new_i64();
1767 gen_helper_float_DT(fp, cpu_fpul);
1768 gen_store_fpr64(fp, DREG(B11_8));
1769 tcg_temp_free_i64(fp);
1771 else {
1772 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1774 return;
1775 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1776 CHECK_FPU_ENABLED
1777 if (ctx->fpscr & FPSCR_PR) {
1778 TCGv_i64 fp;
1779 if (ctx->opcode & 0x0100)
1780 break; /* illegal instruction */
1781 fp = tcg_temp_new_i64();
1782 gen_load_fpr64(fp, DREG(B11_8));
1783 gen_helper_ftrc_DT(cpu_fpul, fp);
1784 tcg_temp_free_i64(fp);
1786 else {
1787 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1789 return;
1790 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1791 CHECK_FPU_ENABLED
1793 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1795 return;
1796 case 0xf05d: /* fabs FRn/DRn */
1797 CHECK_FPU_ENABLED
1798 if (ctx->fpscr & FPSCR_PR) {
1799 if (ctx->opcode & 0x0100)
1800 break; /* illegal instruction */
1801 TCGv_i64 fp = tcg_temp_new_i64();
1802 gen_load_fpr64(fp, DREG(B11_8));
1803 gen_helper_fabs_DT(fp, fp);
1804 gen_store_fpr64(fp, DREG(B11_8));
1805 tcg_temp_free_i64(fp);
1806 } else {
1807 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1809 return;
1810 case 0xf06d: /* fsqrt FRn */
1811 CHECK_FPU_ENABLED
1812 if (ctx->fpscr & FPSCR_PR) {
1813 if (ctx->opcode & 0x0100)
1814 break; /* illegal instruction */
1815 TCGv_i64 fp = tcg_temp_new_i64();
1816 gen_load_fpr64(fp, DREG(B11_8));
1817 gen_helper_fsqrt_DT(fp, fp);
1818 gen_store_fpr64(fp, DREG(B11_8));
1819 tcg_temp_free_i64(fp);
1820 } else {
1821 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1823 return;
1824 case 0xf07d: /* fsrra FRn */
1825 CHECK_FPU_ENABLED
1826 break;
1827 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1828 CHECK_FPU_ENABLED
1829 if (!(ctx->fpscr & FPSCR_PR)) {
1830 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1832 return;
1833 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1834 CHECK_FPU_ENABLED
1835 if (!(ctx->fpscr & FPSCR_PR)) {
1836 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1838 return;
1839 case 0xf0ad: /* fcnvsd FPUL,DRn */
1840 CHECK_FPU_ENABLED
1842 TCGv_i64 fp = tcg_temp_new_i64();
1843 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1844 gen_store_fpr64(fp, DREG(B11_8));
1845 tcg_temp_free_i64(fp);
1847 return;
1848 case 0xf0bd: /* fcnvds DRn,FPUL */
1849 CHECK_FPU_ENABLED
1851 TCGv_i64 fp = tcg_temp_new_i64();
1852 gen_load_fpr64(fp, DREG(B11_8));
1853 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1854 tcg_temp_free_i64(fp);
1856 return;
1858 #if 0
1859 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1860 ctx->opcode, ctx->pc);
1861 fflush(stderr);
1862 #endif
1863 gen_helper_raise_illegal_instruction();
1864 ctx->bstate = BS_EXCP;
1867 static void decode_opc(DisasContext * ctx)
1869 uint32_t old_flags = ctx->flags;
1871 _decode_opc(ctx);
1873 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1874 if (ctx->flags & DELAY_SLOT_CLEARME) {
1875 gen_store_flags(0);
1876 } else {
1877 /* go out of the delay slot */
1878 uint32_t new_flags = ctx->flags;
1879 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1880 gen_store_flags(new_flags);
1882 ctx->flags = 0;
1883 ctx->bstate = BS_BRANCH;
1884 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1885 gen_delayed_conditional_jump(ctx);
1886 } else if (old_flags & DELAY_SLOT) {
1887 gen_jump(ctx);
1892 /* go into a delay slot */
1893 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1894 gen_store_flags(ctx->flags);
1897 static inline void
1898 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1899 int search_pc)
1901 DisasContext ctx;
1902 target_ulong pc_start;
1903 static uint16_t *gen_opc_end;
1904 CPUBreakpoint *bp;
1905 int i, ii;
1906 int num_insns;
1907 int max_insns;
1909 pc_start = tb->pc;
1910 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1911 ctx.pc = pc_start;
1912 ctx.flags = (uint32_t)tb->flags;
1913 ctx.bstate = BS_NONE;
1914 ctx.sr = env->sr;
1915 ctx.fpscr = env->fpscr;
1916 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1917 /* We don't know if the delayed pc came from a dynamic or static branch,
1918 so assume it is a dynamic branch. */
1919 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1920 ctx.tb = tb;
1921 ctx.singlestep_enabled = env->singlestep_enabled;
1922 ctx.features = env->features;
1923 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1925 ii = -1;
1926 num_insns = 0;
1927 max_insns = tb->cflags & CF_COUNT_MASK;
1928 if (max_insns == 0)
1929 max_insns = CF_COUNT_MASK;
1930 gen_icount_start();
1931 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1932 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1933 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1934 if (ctx.pc == bp->pc) {
1935 /* We have hit a breakpoint - make sure PC is up-to-date */
1936 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1937 gen_helper_debug();
1938 ctx.bstate = BS_EXCP;
1939 break;
1943 if (search_pc) {
1944 i = gen_opc_ptr - gen_opc_buf;
1945 if (ii < i) {
1946 ii++;
1947 while (ii < i)
1948 gen_opc_instr_start[ii++] = 0;
1950 gen_opc_pc[ii] = ctx.pc;
1951 gen_opc_hflags[ii] = ctx.flags;
1952 gen_opc_instr_start[ii] = 1;
1953 gen_opc_icount[ii] = num_insns;
1955 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1956 gen_io_start();
1957 #if 0
1958 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1959 fflush(stderr);
1960 #endif
1961 ctx.opcode = lduw_code(ctx.pc);
1962 decode_opc(&ctx);
1963 num_insns++;
1964 ctx.pc += 2;
1965 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1966 break;
1967 if (env->singlestep_enabled)
1968 break;
1969 if (num_insns >= max_insns)
1970 break;
1971 if (singlestep)
1972 break;
1974 if (tb->cflags & CF_LAST_IO)
1975 gen_io_end();
1976 if (env->singlestep_enabled) {
1977 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1978 gen_helper_debug();
1979 } else {
1980 switch (ctx.bstate) {
1981 case BS_STOP:
1982 /* gen_op_interrupt_restart(); */
1983 /* fall through */
1984 case BS_NONE:
1985 if (ctx.flags) {
1986 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1988 gen_goto_tb(&ctx, 0, ctx.pc);
1989 break;
1990 case BS_EXCP:
1991 /* gen_op_interrupt_restart(); */
1992 tcg_gen_exit_tb(0);
1993 break;
1994 case BS_BRANCH:
1995 default:
1996 break;
2000 gen_icount_end(tb, num_insns);
2001 *gen_opc_ptr = INDEX_op_end;
2002 if (search_pc) {
2003 i = gen_opc_ptr - gen_opc_buf;
2004 ii++;
2005 while (ii <= i)
2006 gen_opc_instr_start[ii++] = 0;
2007 } else {
2008 tb->size = ctx.pc - pc_start;
2009 tb->icount = num_insns;
2012 #ifdef DEBUG_DISAS
2013 #ifdef SH4_DEBUG_DISAS
2014 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2015 #endif
2016 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2017 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2018 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2019 qemu_log("\n");
2021 #endif
2024 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
2026 gen_intermediate_code_internal(env, tb, 0);
2029 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
2031 gen_intermediate_code_internal(env, tb, 1);
2034 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2035 unsigned long searched_pc, int pc_pos, void *puc)
2037 env->pc = gen_opc_pc[pc_pos];
2038 env->flags = gen_opc_hflags[pc_pos];