4 * Copyright (c) 2019 Yoshinori Sato
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
30 #define HELPER_H "helper.h"
31 #include "exec/helper-info.c.inc"
35 typedef struct DisasContext
{
36 DisasContextBase base
;
42 typedef struct DisasCompare
{
48 const char *rx_crname(uint8_t cr
)
50 static const char *cr_names
[] = {
51 "psw", "pc", "usp", "fpsw", "", "", "", "",
52 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
54 if (cr
>= ARRAY_SIZE(cr_names
)) {
60 /* Target-specific values for dc->base.is_jmp. */
61 #define DISAS_JUMP DISAS_TARGET_0
62 #define DISAS_UPDATE DISAS_TARGET_1
63 #define DISAS_EXIT DISAS_TARGET_2
65 /* global register indexes */
66 static TCGv cpu_regs
[16];
67 static TCGv cpu_psw_o
, cpu_psw_s
, cpu_psw_z
, cpu_psw_c
;
68 static TCGv cpu_psw_i
, cpu_psw_pm
, cpu_psw_u
, cpu_psw_ipl
;
69 static TCGv cpu_usp
, cpu_fpsw
, cpu_bpsw
, cpu_bpc
, cpu_isp
;
70 static TCGv cpu_fintv
, cpu_intb
, cpu_pc
;
71 static TCGv_i64 cpu_acc
;
73 #define cpu_sp cpu_regs[0]
76 static uint32_t decode_load_bytes(DisasContext
*ctx
, uint32_t insn
,
80 uint8_t b
= translator_ldub(ctx
->env
, &ctx
->base
, ctx
->base
.pc_next
++);
81 insn
|= b
<< (32 - i
* 8);
86 static uint32_t li(DisasContext
*ctx
, int sz
)
90 CPURXState
*env
= ctx
->env
;
91 addr
= ctx
->base
.pc_next
;
95 ctx
->base
.pc_next
+= 1;
96 return (int8_t)translator_ldub(env
, &ctx
->base
, addr
);
98 ctx
->base
.pc_next
+= 2;
99 return (int16_t)translator_lduw(env
, &ctx
->base
, addr
);
101 ctx
->base
.pc_next
+= 3;
102 tmp
= (int8_t)translator_ldub(env
, &ctx
->base
, addr
+ 2);
104 tmp
|= translator_lduw(env
, &ctx
->base
, addr
);
107 ctx
->base
.pc_next
+= 4;
108 return translator_ldl(env
, &ctx
->base
, addr
);
110 g_assert_not_reached();
115 static int bdsp_s(DisasContext
*ctx
, int d
)
131 /* Include the auto-generated decoder. */
132 #include "decode-insns.c.inc"
134 void rx_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
136 CPURXState
*env
= cpu_env(cs
);
140 psw
= rx_cpu_pack_psw(env
);
141 qemu_fprintf(f
, "pc=0x%08x psw=0x%08x\n",
143 for (i
= 0; i
< 16; i
+= 4) {
144 qemu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
145 i
, env
->regs
[i
], i
+ 1, env
->regs
[i
+ 1],
146 i
+ 2, env
->regs
[i
+ 2], i
+ 3, env
->regs
[i
+ 3]);
150 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
152 if (translator_use_goto_tb(&dc
->base
, dest
)) {
154 tcg_gen_movi_i32(cpu_pc
, dest
);
155 tcg_gen_exit_tb(dc
->base
.tb
, n
);
157 tcg_gen_movi_i32(cpu_pc
, dest
);
158 tcg_gen_lookup_and_goto_ptr();
160 dc
->base
.is_jmp
= DISAS_NORETURN
;
163 /* generic load wrapper */
164 static inline void rx_gen_ld(unsigned int size
, TCGv reg
, TCGv mem
)
166 tcg_gen_qemu_ld_i32(reg
, mem
, 0, size
| MO_SIGN
| MO_TE
);
169 /* unsigned load wrapper */
170 static inline void rx_gen_ldu(unsigned int size
, TCGv reg
, TCGv mem
)
172 tcg_gen_qemu_ld_i32(reg
, mem
, 0, size
| MO_TE
);
175 /* generic store wrapper */
176 static inline void rx_gen_st(unsigned int size
, TCGv reg
, TCGv mem
)
178 tcg_gen_qemu_st_i32(reg
, mem
, 0, size
| MO_TE
);
182 static inline void rx_gen_regindex(DisasContext
*ctx
, TCGv mem
,
183 int size
, int ri
, int rb
)
185 tcg_gen_shli_i32(mem
, cpu_regs
[ri
], size
);
186 tcg_gen_add_i32(mem
, mem
, cpu_regs
[rb
]);
190 static inline TCGv
rx_index_addr(DisasContext
*ctx
, TCGv mem
,
191 int ld
, int size
, int reg
)
197 return cpu_regs
[reg
];
199 dsp
= translator_ldub(ctx
->env
, &ctx
->base
, ctx
->base
.pc_next
) << size
;
200 tcg_gen_addi_i32(mem
, cpu_regs
[reg
], dsp
);
201 ctx
->base
.pc_next
+= 1;
204 dsp
= translator_lduw(ctx
->env
, &ctx
->base
, ctx
->base
.pc_next
) << size
;
205 tcg_gen_addi_i32(mem
, cpu_regs
[reg
], dsp
);
206 ctx
->base
.pc_next
+= 2;
209 g_assert_not_reached();
213 static inline MemOp
mi_to_mop(unsigned mi
)
215 static const MemOp mop
[5] = { MO_SB
, MO_SW
, MO_UL
, MO_UW
, MO_UB
};
216 tcg_debug_assert(mi
< 5);
220 /* load source operand */
221 static inline TCGv
rx_load_source(DisasContext
*ctx
, TCGv mem
,
222 int ld
, int mi
, int rs
)
228 addr
= rx_index_addr(ctx
, mem
, ld
, mop
& MO_SIZE
, rs
);
229 tcg_gen_qemu_ld_i32(mem
, addr
, 0, mop
| MO_TE
);
236 /* Processor mode check */
237 static int is_privileged(DisasContext
*ctx
, int is_exception
)
239 if (FIELD_EX32(ctx
->tb_flags
, PSW
, PM
)) {
241 gen_helper_raise_privilege_violation(tcg_env
);
249 /* generate QEMU condition */
250 static void psw_cond(DisasCompare
*dc
, uint32_t cond
)
252 tcg_debug_assert(cond
< 16);
255 dc
->cond
= TCG_COND_EQ
;
256 dc
->value
= cpu_psw_z
;
259 dc
->cond
= TCG_COND_NE
;
260 dc
->value
= cpu_psw_z
;
263 dc
->cond
= TCG_COND_NE
;
264 dc
->value
= cpu_psw_c
;
267 dc
->cond
= TCG_COND_EQ
;
268 dc
->value
= cpu_psw_c
;
270 case 4: /* gtu (C& ~Z) == 1 */
271 case 5: /* leu (C& ~Z) == 0 */
272 tcg_gen_setcondi_i32(TCG_COND_NE
, dc
->temp
, cpu_psw_z
, 0);
273 tcg_gen_and_i32(dc
->temp
, dc
->temp
, cpu_psw_c
);
274 dc
->cond
= (cond
== 4) ? TCG_COND_NE
: TCG_COND_EQ
;
275 dc
->value
= dc
->temp
;
277 case 6: /* pz (S == 0) */
278 dc
->cond
= TCG_COND_GE
;
279 dc
->value
= cpu_psw_s
;
281 case 7: /* n (S == 1) */
282 dc
->cond
= TCG_COND_LT
;
283 dc
->value
= cpu_psw_s
;
285 case 8: /* ge (S^O)==0 */
286 case 9: /* lt (S^O)==1 */
287 tcg_gen_xor_i32(dc
->temp
, cpu_psw_o
, cpu_psw_s
);
288 dc
->cond
= (cond
== 8) ? TCG_COND_GE
: TCG_COND_LT
;
289 dc
->value
= dc
->temp
;
291 case 10: /* gt ((S^O)|Z)==0 */
292 case 11: /* le ((S^O)|Z)==1 */
293 tcg_gen_xor_i32(dc
->temp
, cpu_psw_o
, cpu_psw_s
);
294 tcg_gen_sari_i32(dc
->temp
, dc
->temp
, 31);
295 tcg_gen_andc_i32(dc
->temp
, cpu_psw_z
, dc
->temp
);
296 dc
->cond
= (cond
== 10) ? TCG_COND_NE
: TCG_COND_EQ
;
297 dc
->value
= dc
->temp
;
300 dc
->cond
= TCG_COND_LT
;
301 dc
->value
= cpu_psw_o
;
304 dc
->cond
= TCG_COND_GE
;
305 dc
->value
= cpu_psw_o
;
307 case 14: /* always true */
308 dc
->cond
= TCG_COND_ALWAYS
;
309 dc
->value
= dc
->temp
;
311 case 15: /* always false */
312 dc
->cond
= TCG_COND_NEVER
;
313 dc
->value
= dc
->temp
;
318 static void move_from_cr(DisasContext
*ctx
, TCGv ret
, int cr
, uint32_t pc
)
322 gen_helper_pack_psw(ret
, tcg_env
);
325 tcg_gen_movi_i32(ret
, pc
);
328 if (FIELD_EX32(ctx
->tb_flags
, PSW
, U
)) {
329 tcg_gen_mov_i32(ret
, cpu_sp
);
331 tcg_gen_mov_i32(ret
, cpu_usp
);
335 tcg_gen_mov_i32(ret
, cpu_fpsw
);
338 tcg_gen_mov_i32(ret
, cpu_bpsw
);
341 tcg_gen_mov_i32(ret
, cpu_bpc
);
344 if (FIELD_EX32(ctx
->tb_flags
, PSW
, U
)) {
345 tcg_gen_mov_i32(ret
, cpu_isp
);
347 tcg_gen_mov_i32(ret
, cpu_sp
);
351 tcg_gen_mov_i32(ret
, cpu_fintv
);
354 tcg_gen_mov_i32(ret
, cpu_intb
);
357 qemu_log_mask(LOG_GUEST_ERROR
, "Unimplement control register %d", cr
);
358 /* Unimplement registers return 0 */
359 tcg_gen_movi_i32(ret
, 0);
364 static void move_to_cr(DisasContext
*ctx
, TCGv val
, int cr
)
366 if (cr
>= 8 && !is_privileged(ctx
, 0)) {
367 /* Some control registers can only be written in privileged mode. */
368 qemu_log_mask(LOG_GUEST_ERROR
,
369 "disallow control register write %s", rx_crname(cr
));
374 gen_helper_set_psw(tcg_env
, val
);
375 if (is_privileged(ctx
, 0)) {
376 /* PSW.{I,U} may be updated here. exit TB. */
377 ctx
->base
.is_jmp
= DISAS_UPDATE
;
380 /* case 1: to PC not supported */
382 if (FIELD_EX32(ctx
->tb_flags
, PSW
, U
)) {
383 tcg_gen_mov_i32(cpu_sp
, val
);
385 tcg_gen_mov_i32(cpu_usp
, val
);
389 gen_helper_set_fpsw(tcg_env
, val
);
392 tcg_gen_mov_i32(cpu_bpsw
, val
);
395 tcg_gen_mov_i32(cpu_bpc
, val
);
398 if (FIELD_EX32(ctx
->tb_flags
, PSW
, U
)) {
399 tcg_gen_mov_i32(cpu_isp
, val
);
401 tcg_gen_mov_i32(cpu_sp
, val
);
405 tcg_gen_mov_i32(cpu_fintv
, val
);
408 tcg_gen_mov_i32(cpu_intb
, val
);
411 qemu_log_mask(LOG_GUEST_ERROR
,
412 "Unimplement control register %d", cr
);
417 static void push(TCGv val
)
419 tcg_gen_subi_i32(cpu_sp
, cpu_sp
, 4);
420 rx_gen_st(MO_32
, val
, cpu_sp
);
423 static void pop(TCGv ret
)
425 rx_gen_ld(MO_32
, ret
, cpu_sp
);
426 tcg_gen_addi_i32(cpu_sp
, cpu_sp
, 4);
429 /* mov.<bwl> rs,dsp5[rd] */
430 static bool trans_MOV_rm(DisasContext
*ctx
, arg_MOV_rm
*a
)
433 mem
= tcg_temp_new();
434 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rd
], a
->dsp
<< a
->sz
);
435 rx_gen_st(a
->sz
, cpu_regs
[a
->rs
], mem
);
439 /* mov.<bwl> dsp5[rs],rd */
440 static bool trans_MOV_mr(DisasContext
*ctx
, arg_MOV_mr
*a
)
443 mem
= tcg_temp_new();
444 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rs
], a
->dsp
<< a
->sz
);
445 rx_gen_ld(a
->sz
, cpu_regs
[a
->rd
], mem
);
449 /* mov.l #uimm4,rd */
450 /* mov.l #uimm8,rd */
452 static bool trans_MOV_ir(DisasContext
*ctx
, arg_MOV_ir
*a
)
454 tcg_gen_movi_i32(cpu_regs
[a
->rd
], a
->imm
);
458 /* mov.<bwl> #uimm8,dsp[rd] */
459 /* mov.<bwl> #imm, dsp[rd] */
460 static bool trans_MOV_im(DisasContext
*ctx
, arg_MOV_im
*a
)
463 imm
= tcg_constant_i32(a
->imm
);
464 mem
= tcg_temp_new();
465 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rd
], a
->dsp
<< a
->sz
);
466 rx_gen_st(a
->sz
, imm
, mem
);
470 /* mov.<bwl> [ri,rb],rd */
471 static bool trans_MOV_ar(DisasContext
*ctx
, arg_MOV_ar
*a
)
474 mem
= tcg_temp_new();
475 rx_gen_regindex(ctx
, mem
, a
->sz
, a
->ri
, a
->rb
);
476 rx_gen_ld(a
->sz
, cpu_regs
[a
->rd
], mem
);
480 /* mov.<bwl> rd,[ri,rb] */
481 static bool trans_MOV_ra(DisasContext
*ctx
, arg_MOV_ra
*a
)
484 mem
= tcg_temp_new();
485 rx_gen_regindex(ctx
, mem
, a
->sz
, a
->ri
, a
->rb
);
486 rx_gen_st(a
->sz
, cpu_regs
[a
->rs
], mem
);
490 /* mov.<bwl> dsp[rs],dsp[rd] */
491 /* mov.<bwl> rs,dsp[rd] */
492 /* mov.<bwl> dsp[rs],rd */
493 /* mov.<bwl> rs,rd */
494 static bool trans_MOV_mm(DisasContext
*ctx
, arg_MOV_mm
*a
)
498 if (a
->lds
== 3 && a
->ldd
== 3) {
499 /* mov.<bwl> rs,rd */
500 tcg_gen_ext_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs
], a
->sz
| MO_SIGN
);
504 mem
= tcg_temp_new();
506 /* mov.<bwl> rs,dsp[rd] */
507 addr
= rx_index_addr(ctx
, mem
, a
->ldd
, a
->sz
, a
->rs
);
508 rx_gen_st(a
->sz
, cpu_regs
[a
->rd
], addr
);
509 } else if (a
->ldd
== 3) {
510 /* mov.<bwl> dsp[rs],rd */
511 addr
= rx_index_addr(ctx
, mem
, a
->lds
, a
->sz
, a
->rs
);
512 rx_gen_ld(a
->sz
, cpu_regs
[a
->rd
], addr
);
514 /* mov.<bwl> dsp[rs],dsp[rd] */
515 tmp
= tcg_temp_new();
516 addr
= rx_index_addr(ctx
, mem
, a
->lds
, a
->sz
, a
->rs
);
517 rx_gen_ld(a
->sz
, tmp
, addr
);
518 addr
= rx_index_addr(ctx
, mem
, a
->ldd
, a
->sz
, a
->rd
);
519 rx_gen_st(a
->sz
, tmp
, addr
);
524 /* mov.<bwl> rs,[rd+] */
525 /* mov.<bwl> rs,[-rd] */
526 static bool trans_MOV_rp(DisasContext
*ctx
, arg_MOV_rp
*a
)
529 val
= tcg_temp_new();
530 tcg_gen_mov_i32(val
, cpu_regs
[a
->rs
]);
532 tcg_gen_subi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
534 rx_gen_st(a
->sz
, val
, cpu_regs
[a
->rd
]);
536 tcg_gen_addi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
541 /* mov.<bwl> [rd+],rs */
542 /* mov.<bwl> [-rd],rs */
543 static bool trans_MOV_pr(DisasContext
*ctx
, arg_MOV_pr
*a
)
546 val
= tcg_temp_new();
548 tcg_gen_subi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
550 rx_gen_ld(a
->sz
, val
, cpu_regs
[a
->rd
]);
552 tcg_gen_addi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
554 tcg_gen_mov_i32(cpu_regs
[a
->rs
], val
);
558 /* movu.<bw> dsp5[rs],rd */
559 /* movu.<bw> dsp[rs],rd */
560 static bool trans_MOVU_mr(DisasContext
*ctx
, arg_MOVU_mr
*a
)
563 mem
= tcg_temp_new();
564 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rs
], a
->dsp
<< a
->sz
);
565 rx_gen_ldu(a
->sz
, cpu_regs
[a
->rd
], mem
);
569 /* movu.<bw> rs,rd */
570 static bool trans_MOVU_rr(DisasContext
*ctx
, arg_MOVU_rr
*a
)
572 tcg_gen_ext_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs
], a
->sz
);
576 /* movu.<bw> [ri,rb],rd */
577 static bool trans_MOVU_ar(DisasContext
*ctx
, arg_MOVU_ar
*a
)
580 mem
= tcg_temp_new();
581 rx_gen_regindex(ctx
, mem
, a
->sz
, a
->ri
, a
->rb
);
582 rx_gen_ldu(a
->sz
, cpu_regs
[a
->rd
], mem
);
586 /* movu.<bw> [rd+],rs */
587 /* mov.<bw> [-rd],rs */
588 static bool trans_MOVU_pr(DisasContext
*ctx
, arg_MOVU_pr
*a
)
591 val
= tcg_temp_new();
593 tcg_gen_subi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
595 rx_gen_ldu(a
->sz
, val
, cpu_regs
[a
->rd
]);
597 tcg_gen_addi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
599 tcg_gen_mov_i32(cpu_regs
[a
->rs
], val
);
605 static bool trans_POP(DisasContext
*ctx
, arg_POP
*a
)
607 /* mov.l [r0+], rd */
613 trans_MOV_pr(ctx
, &mov_a
);
618 static bool trans_POPC(DisasContext
*ctx
, arg_POPC
*a
)
621 val
= tcg_temp_new();
623 move_to_cr(ctx
, val
, a
->cr
);
628 static bool trans_POPM(DisasContext
*ctx
, arg_POPM
*a
)
631 if (a
->rd
== 0 || a
->rd
>= a
->rd2
) {
632 qemu_log_mask(LOG_GUEST_ERROR
,
633 "Invalid register ranges r%d-r%d", a
->rd
, a
->rd2
);
636 while (r
<= a
->rd2
&& r
< 16) {
644 static bool trans_PUSH_r(DisasContext
*ctx
, arg_PUSH_r
*a
)
647 val
= tcg_temp_new();
648 tcg_gen_mov_i32(val
, cpu_regs
[a
->rs
]);
649 tcg_gen_subi_i32(cpu_sp
, cpu_sp
, 4);
650 rx_gen_st(a
->sz
, val
, cpu_sp
);
654 /* push.<bwl> dsp[rs] */
655 static bool trans_PUSH_m(DisasContext
*ctx
, arg_PUSH_m
*a
)
658 mem
= tcg_temp_new();
659 val
= tcg_temp_new();
660 addr
= rx_index_addr(ctx
, mem
, a
->ld
, a
->sz
, a
->rs
);
661 rx_gen_ld(a
->sz
, val
, addr
);
662 tcg_gen_subi_i32(cpu_sp
, cpu_sp
, 4);
663 rx_gen_st(a
->sz
, val
, cpu_sp
);
668 static bool trans_PUSHC(DisasContext
*ctx
, arg_PUSHC
*a
)
671 val
= tcg_temp_new();
672 move_from_cr(ctx
, val
, a
->cr
, ctx
->pc
);
678 static bool trans_PUSHM(DisasContext
*ctx
, arg_PUSHM
*a
)
682 if (a
->rs
== 0 || a
->rs
>= a
->rs2
) {
683 qemu_log_mask(LOG_GUEST_ERROR
,
684 "Invalid register ranges r%d-r%d", a
->rs
, a
->rs2
);
687 while (r
>= a
->rs
&& r
>= 0) {
694 static bool trans_XCHG_rr(DisasContext
*ctx
, arg_XCHG_rr
*a
)
697 tmp
= tcg_temp_new();
698 tcg_gen_mov_i32(tmp
, cpu_regs
[a
->rs
]);
699 tcg_gen_mov_i32(cpu_regs
[a
->rs
], cpu_regs
[a
->rd
]);
700 tcg_gen_mov_i32(cpu_regs
[a
->rd
], tmp
);
704 /* xchg dsp[rs].<mi>,rd */
705 static bool trans_XCHG_mr(DisasContext
*ctx
, arg_XCHG_mr
*a
)
708 mem
= tcg_temp_new();
710 case 0: /* dsp[rs].b */
711 case 1: /* dsp[rs].w */
712 case 2: /* dsp[rs].l */
713 addr
= rx_index_addr(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
715 case 3: /* dsp[rs].uw */
716 case 4: /* dsp[rs].ub */
717 addr
= rx_index_addr(ctx
, mem
, a
->ld
, 4 - a
->mi
, a
->rs
);
720 g_assert_not_reached();
722 tcg_gen_atomic_xchg_i32(cpu_regs
[a
->rd
], addr
, cpu_regs
[a
->rd
],
723 0, mi_to_mop(a
->mi
));
727 static inline void stcond(TCGCond cond
, int rd
, int imm
)
731 z
= tcg_constant_i32(0);
732 _imm
= tcg_constant_i32(imm
);
733 tcg_gen_movcond_i32(cond
, cpu_regs
[rd
], cpu_psw_z
, z
,
738 static bool trans_STZ(DisasContext
*ctx
, arg_STZ
*a
)
740 stcond(TCG_COND_EQ
, a
->rd
, a
->imm
);
745 static bool trans_STNZ(DisasContext
*ctx
, arg_STNZ
*a
)
747 stcond(TCG_COND_NE
, a
->rd
, a
->imm
);
752 /* sccnd.<bwl> dsp:[rd] */
753 static bool trans_SCCnd(DisasContext
*ctx
, arg_SCCnd
*a
)
757 dc
.temp
= tcg_temp_new();
758 psw_cond(&dc
, a
->cd
);
760 val
= tcg_temp_new();
761 mem
= tcg_temp_new();
762 tcg_gen_setcondi_i32(dc
.cond
, val
, dc
.value
, 0);
763 addr
= rx_index_addr(ctx
, mem
, a
->sz
, a
->ld
, a
->rd
);
764 rx_gen_st(a
->sz
, val
, addr
);
766 tcg_gen_setcondi_i32(dc
.cond
, cpu_regs
[a
->rd
], dc
.value
, 0);
772 static bool trans_RTSD_i(DisasContext
*ctx
, arg_RTSD_i
*a
)
774 tcg_gen_addi_i32(cpu_sp
, cpu_sp
, a
->imm
<< 2);
776 ctx
->base
.is_jmp
= DISAS_JUMP
;
780 /* rtsd #imm, rd-rd2 */
781 static bool trans_RTSD_irr(DisasContext
*ctx
, arg_RTSD_irr
*a
)
786 if (a
->rd2
>= a
->rd
) {
787 adj
= a
->imm
- (a
->rd2
- a
->rd
+ 1);
789 adj
= a
->imm
- (15 - a
->rd
+ 1);
792 tcg_gen_addi_i32(cpu_sp
, cpu_sp
, adj
<< 2);
794 while (dst
<= a
->rd2
&& dst
< 16) {
795 pop(cpu_regs
[dst
++]);
798 ctx
->base
.is_jmp
= DISAS_JUMP
;
802 typedef void (*op2fn
)(TCGv ret
, TCGv arg1
);
803 typedef void (*op3fn
)(TCGv ret
, TCGv arg1
, TCGv arg2
);
805 static inline void rx_gen_op_rr(op2fn opr
, int dst
, int src
)
807 opr(cpu_regs
[dst
], cpu_regs
[src
]);
810 static inline void rx_gen_op_rrr(op3fn opr
, int dst
, int src
, int src2
)
812 opr(cpu_regs
[dst
], cpu_regs
[src
], cpu_regs
[src2
]);
815 static inline void rx_gen_op_irr(op3fn opr
, int dst
, int src
, uint32_t src2
)
817 TCGv imm
= tcg_constant_i32(src2
);
818 opr(cpu_regs
[dst
], cpu_regs
[src
], imm
);
821 static inline void rx_gen_op_mr(op3fn opr
, DisasContext
*ctx
,
822 int dst
, int src
, int ld
, int mi
)
825 mem
= tcg_temp_new();
826 val
= rx_load_source(ctx
, mem
, ld
, mi
, src
);
827 opr(cpu_regs
[dst
], cpu_regs
[dst
], val
);
830 static void rx_and(TCGv ret
, TCGv arg1
, TCGv arg2
)
832 tcg_gen_and_i32(cpu_psw_s
, arg1
, arg2
);
833 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
834 tcg_gen_mov_i32(ret
, cpu_psw_s
);
837 /* and #uimm:4, rd */
839 static bool trans_AND_ir(DisasContext
*ctx
, arg_AND_ir
*a
)
841 rx_gen_op_irr(rx_and
, a
->rd
, a
->rd
, a
->imm
);
845 /* and dsp[rs], rd */
847 static bool trans_AND_mr(DisasContext
*ctx
, arg_AND_mr
*a
)
849 rx_gen_op_mr(rx_and
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
854 static bool trans_AND_rrr(DisasContext
*ctx
, arg_AND_rrr
*a
)
856 rx_gen_op_rrr(rx_and
, a
->rd
, a
->rs
, a
->rs2
);
860 static void rx_or(TCGv ret
, TCGv arg1
, TCGv arg2
)
862 tcg_gen_or_i32(cpu_psw_s
, arg1
, arg2
);
863 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
864 tcg_gen_mov_i32(ret
, cpu_psw_s
);
869 static bool trans_OR_ir(DisasContext
*ctx
, arg_OR_ir
*a
)
871 rx_gen_op_irr(rx_or
, a
->rd
, a
->rd
, a
->imm
);
877 static bool trans_OR_mr(DisasContext
*ctx
, arg_OR_mr
*a
)
879 rx_gen_op_mr(rx_or
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
884 static bool trans_OR_rrr(DisasContext
*ctx
, arg_OR_rrr
*a
)
886 rx_gen_op_rrr(rx_or
, a
->rd
, a
->rs
, a
->rs2
);
890 static void rx_xor(TCGv ret
, TCGv arg1
, TCGv arg2
)
892 tcg_gen_xor_i32(cpu_psw_s
, arg1
, arg2
);
893 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
894 tcg_gen_mov_i32(ret
, cpu_psw_s
);
898 static bool trans_XOR_ir(DisasContext
*ctx
, arg_XOR_ir
*a
)
900 rx_gen_op_irr(rx_xor
, a
->rd
, a
->rd
, a
->imm
);
904 /* xor dsp[rs], rd */
906 static bool trans_XOR_mr(DisasContext
*ctx
, arg_XOR_mr
*a
)
908 rx_gen_op_mr(rx_xor
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
912 static void rx_tst(TCGv ret
, TCGv arg1
, TCGv arg2
)
914 tcg_gen_and_i32(cpu_psw_s
, arg1
, arg2
);
915 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
919 static bool trans_TST_ir(DisasContext
*ctx
, arg_TST_ir
*a
)
921 rx_gen_op_irr(rx_tst
, a
->rd
, a
->rd
, a
->imm
);
925 /* tst dsp[rs], rd */
927 static bool trans_TST_mr(DisasContext
*ctx
, arg_TST_mr
*a
)
929 rx_gen_op_mr(rx_tst
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
933 static void rx_not(TCGv ret
, TCGv arg1
)
935 tcg_gen_not_i32(ret
, arg1
);
936 tcg_gen_mov_i32(cpu_psw_z
, ret
);
937 tcg_gen_mov_i32(cpu_psw_s
, ret
);
942 static bool trans_NOT_rr(DisasContext
*ctx
, arg_NOT_rr
*a
)
944 rx_gen_op_rr(rx_not
, a
->rd
, a
->rs
);
948 static void rx_neg(TCGv ret
, TCGv arg1
)
950 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_o
, arg1
, 0x80000000);
951 tcg_gen_neg_i32(ret
, arg1
);
952 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_c
, ret
, 0);
953 tcg_gen_mov_i32(cpu_psw_z
, ret
);
954 tcg_gen_mov_i32(cpu_psw_s
, ret
);
960 static bool trans_NEG_rr(DisasContext
*ctx
, arg_NEG_rr
*a
)
962 rx_gen_op_rr(rx_neg
, a
->rd
, a
->rs
);
966 /* ret = arg1 + arg2 + psw_c */
967 static void rx_adc(TCGv ret
, TCGv arg1
, TCGv arg2
)
969 TCGv z
= tcg_constant_i32(0);
970 tcg_gen_add2_i32(cpu_psw_s
, cpu_psw_c
, arg1
, z
, cpu_psw_c
, z
);
971 tcg_gen_add2_i32(cpu_psw_s
, cpu_psw_c
, cpu_psw_s
, cpu_psw_c
, arg2
, z
);
972 tcg_gen_xor_i32(cpu_psw_o
, cpu_psw_s
, arg1
);
973 tcg_gen_xor_i32(cpu_psw_z
, arg1
, arg2
);
974 tcg_gen_andc_i32(cpu_psw_o
, cpu_psw_o
, cpu_psw_z
);
975 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
976 tcg_gen_mov_i32(ret
, cpu_psw_s
);
980 static bool trans_ADC_ir(DisasContext
*ctx
, arg_ADC_ir
*a
)
982 rx_gen_op_irr(rx_adc
, a
->rd
, a
->rd
, a
->imm
);
987 static bool trans_ADC_rr(DisasContext
*ctx
, arg_ADC_rr
*a
)
989 rx_gen_op_rrr(rx_adc
, a
->rd
, a
->rd
, a
->rs
);
993 /* adc dsp[rs], rd */
994 static bool trans_ADC_mr(DisasContext
*ctx
, arg_ADC_mr
*a
)
1000 rx_gen_op_mr(rx_adc
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1004 /* ret = arg1 + arg2 */
1005 static void rx_add(TCGv ret
, TCGv arg1
, TCGv arg2
)
1007 TCGv z
= tcg_constant_i32(0);
1008 tcg_gen_add2_i32(cpu_psw_s
, cpu_psw_c
, arg1
, z
, arg2
, z
);
1009 tcg_gen_xor_i32(cpu_psw_o
, cpu_psw_s
, arg1
);
1010 tcg_gen_xor_i32(cpu_psw_z
, arg1
, arg2
);
1011 tcg_gen_andc_i32(cpu_psw_o
, cpu_psw_o
, cpu_psw_z
);
1012 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
1013 tcg_gen_mov_i32(ret
, cpu_psw_s
);
1016 /* add #uimm4, rd */
1017 /* add #imm, rs, rd */
1018 static bool trans_ADD_irr(DisasContext
*ctx
, arg_ADD_irr
*a
)
1020 rx_gen_op_irr(rx_add
, a
->rd
, a
->rs2
, a
->imm
);
1025 /* add dsp[rs], rd */
1026 static bool trans_ADD_mr(DisasContext
*ctx
, arg_ADD_mr
*a
)
1028 rx_gen_op_mr(rx_add
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1032 /* add rs, rs2, rd */
1033 static bool trans_ADD_rrr(DisasContext
*ctx
, arg_ADD_rrr
*a
)
1035 rx_gen_op_rrr(rx_add
, a
->rd
, a
->rs
, a
->rs2
);
1039 /* ret = arg1 - arg2 */
1040 static void rx_sub(TCGv ret
, TCGv arg1
, TCGv arg2
)
1042 tcg_gen_sub_i32(cpu_psw_s
, arg1
, arg2
);
1043 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_psw_c
, arg1
, arg2
);
1044 tcg_gen_xor_i32(cpu_psw_o
, cpu_psw_s
, arg1
);
1045 tcg_gen_xor_i32(cpu_psw_z
, arg1
, arg2
);
1046 tcg_gen_and_i32(cpu_psw_o
, cpu_psw_o
, cpu_psw_z
);
1047 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
1048 /* CMP not required return */
1050 tcg_gen_mov_i32(ret
, cpu_psw_s
);
1054 static void rx_cmp(TCGv dummy
, TCGv arg1
, TCGv arg2
)
1056 rx_sub(NULL
, arg1
, arg2
);
1059 /* ret = arg1 - arg2 - !psw_c */
1060 /* -> ret = arg1 + ~arg2 + psw_c */
1061 static void rx_sbb(TCGv ret
, TCGv arg1
, TCGv arg2
)
1064 temp
= tcg_temp_new();
1065 tcg_gen_not_i32(temp
, arg2
);
1066 rx_adc(ret
, arg1
, temp
);
1069 /* cmp #imm4, rs2 */
1070 /* cmp #imm8, rs2 */
1072 static bool trans_CMP_ir(DisasContext
*ctx
, arg_CMP_ir
*a
)
1074 rx_gen_op_irr(rx_cmp
, 0, a
->rs2
, a
->imm
);
1079 /* cmp dsp[rs], rs2 */
1080 static bool trans_CMP_mr(DisasContext
*ctx
, arg_CMP_mr
*a
)
1082 rx_gen_op_mr(rx_cmp
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1087 static bool trans_SUB_ir(DisasContext
*ctx
, arg_SUB_ir
*a
)
1089 rx_gen_op_irr(rx_sub
, a
->rd
, a
->rd
, a
->imm
);
1094 /* sub dsp[rs], rd */
1095 static bool trans_SUB_mr(DisasContext
*ctx
, arg_SUB_mr
*a
)
1097 rx_gen_op_mr(rx_sub
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1101 /* sub rs2, rs, rd */
1102 static bool trans_SUB_rrr(DisasContext
*ctx
, arg_SUB_rrr
*a
)
1104 rx_gen_op_rrr(rx_sub
, a
->rd
, a
->rs2
, a
->rs
);
1109 static bool trans_SBB_rr(DisasContext
*ctx
, arg_SBB_rr
*a
)
1111 rx_gen_op_rrr(rx_sbb
, a
->rd
, a
->rd
, a
->rs
);
1115 /* sbb dsp[rs], rd */
1116 static bool trans_SBB_mr(DisasContext
*ctx
, arg_SBB_mr
*a
)
1122 rx_gen_op_mr(rx_sbb
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1128 static bool trans_ABS_rr(DisasContext
*ctx
, arg_ABS_rr
*a
)
1130 rx_gen_op_rr(tcg_gen_abs_i32
, a
->rd
, a
->rs
);
1135 static bool trans_MAX_ir(DisasContext
*ctx
, arg_MAX_ir
*a
)
1137 rx_gen_op_irr(tcg_gen_smax_i32
, a
->rd
, a
->rd
, a
->imm
);
1142 /* max dsp[rs], rd */
1143 static bool trans_MAX_mr(DisasContext
*ctx
, arg_MAX_mr
*a
)
1145 rx_gen_op_mr(tcg_gen_smax_i32
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1150 static bool trans_MIN_ir(DisasContext
*ctx
, arg_MIN_ir
*a
)
1152 rx_gen_op_irr(tcg_gen_smin_i32
, a
->rd
, a
->rd
, a
->imm
);
1157 /* min dsp[rs], rd */
1158 static bool trans_MIN_mr(DisasContext
*ctx
, arg_MIN_mr
*a
)
1160 rx_gen_op_mr(tcg_gen_smin_i32
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1164 /* mul #uimm4, rd */
1166 static bool trans_MUL_ir(DisasContext
*ctx
, arg_MUL_ir
*a
)
1168 rx_gen_op_irr(tcg_gen_mul_i32
, a
->rd
, a
->rd
, a
->imm
);
1173 /* mul dsp[rs], rd */
1174 static bool trans_MUL_mr(DisasContext
*ctx
, arg_MUL_mr
*a
)
1176 rx_gen_op_mr(tcg_gen_mul_i32
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1180 /* mul rs, rs2, rd */
1181 static bool trans_MUL_rrr(DisasContext
*ctx
, arg_MUL_rrr
*a
)
1183 rx_gen_op_rrr(tcg_gen_mul_i32
, a
->rd
, a
->rs
, a
->rs2
);
1188 static bool trans_EMUL_ir(DisasContext
*ctx
, arg_EMUL_ir
*a
)
1190 TCGv imm
= tcg_constant_i32(a
->imm
);
1192 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1194 tcg_gen_muls2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1195 cpu_regs
[a
->rd
], imm
);
1200 /* emul dsp[rs], rd */
1201 static bool trans_EMUL_mr(DisasContext
*ctx
, arg_EMUL_mr
*a
)
1205 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1207 mem
= tcg_temp_new();
1208 val
= rx_load_source(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
1209 tcg_gen_muls2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1210 cpu_regs
[a
->rd
], val
);
1214 /* emulu #imm, rd */
1215 static bool trans_EMULU_ir(DisasContext
*ctx
, arg_EMULU_ir
*a
)
1217 TCGv imm
= tcg_constant_i32(a
->imm
);
1219 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1221 tcg_gen_mulu2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1222 cpu_regs
[a
->rd
], imm
);
1227 /* emulu dsp[rs], rd */
1228 static bool trans_EMULU_mr(DisasContext
*ctx
, arg_EMULU_mr
*a
)
1232 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1234 mem
= tcg_temp_new();
1235 val
= rx_load_source(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
1236 tcg_gen_mulu2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1237 cpu_regs
[a
->rd
], val
);
1241 static void rx_div(TCGv ret
, TCGv arg1
, TCGv arg2
)
1243 gen_helper_div(ret
, tcg_env
, arg1
, arg2
);
1246 static void rx_divu(TCGv ret
, TCGv arg1
, TCGv arg2
)
1248 gen_helper_divu(ret
, tcg_env
, arg1
, arg2
);
1252 static bool trans_DIV_ir(DisasContext
*ctx
, arg_DIV_ir
*a
)
1254 rx_gen_op_irr(rx_div
, a
->rd
, a
->rd
, a
->imm
);
1259 /* div dsp[rs], rd */
1260 static bool trans_DIV_mr(DisasContext
*ctx
, arg_DIV_mr
*a
)
1262 rx_gen_op_mr(rx_div
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1267 static bool trans_DIVU_ir(DisasContext
*ctx
, arg_DIVU_ir
*a
)
1269 rx_gen_op_irr(rx_divu
, a
->rd
, a
->rd
, a
->imm
);
1274 /* divu dsp[rs], rd */
1275 static bool trans_DIVU_mr(DisasContext
*ctx
, arg_DIVU_mr
*a
)
1277 rx_gen_op_mr(rx_divu
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1282 /* shll #imm:5, rd */
1283 /* shll #imm:5, rs2, rd */
1284 static bool trans_SHLL_irr(DisasContext
*ctx
, arg_SHLL_irr
*a
)
1287 tmp
= tcg_temp_new();
1289 tcg_gen_sari_i32(cpu_psw_c
, cpu_regs
[a
->rs2
], 32 - a
->imm
);
1290 tcg_gen_shli_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs2
], a
->imm
);
1291 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_o
, cpu_psw_c
, 0);
1292 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_psw_c
, 0xffffffff);
1293 tcg_gen_or_i32(cpu_psw_o
, cpu_psw_o
, tmp
);
1294 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, cpu_psw_c
, 0);
1296 tcg_gen_mov_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs2
]);
1297 tcg_gen_movi_i32(cpu_psw_c
, 0);
1298 tcg_gen_movi_i32(cpu_psw_o
, 0);
1300 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1301 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1306 static bool trans_SHLL_rr(DisasContext
*ctx
, arg_SHLL_rr
*a
)
1308 TCGLabel
*noshift
, *done
;
1311 noshift
= gen_new_label();
1312 done
= gen_new_label();
1313 /* if (cpu_regs[a->rs]) { */
1314 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_regs
[a
->rs
], 0, noshift
);
1315 count
= tcg_temp_new();
1316 tmp
= tcg_temp_new();
1317 tcg_gen_andi_i32(tmp
, cpu_regs
[a
->rs
], 31);
1318 tcg_gen_sub_i32(count
, tcg_constant_i32(32), tmp
);
1319 tcg_gen_sar_i32(cpu_psw_c
, cpu_regs
[a
->rd
], count
);
1320 tcg_gen_shl_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], tmp
);
1321 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_o
, cpu_psw_c
, 0);
1322 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_psw_c
, 0xffffffff);
1323 tcg_gen_or_i32(cpu_psw_o
, cpu_psw_o
, tmp
);
1324 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, cpu_psw_c
, 0);
1327 gen_set_label(noshift
);
1328 tcg_gen_movi_i32(cpu_psw_c
, 0);
1329 tcg_gen_movi_i32(cpu_psw_o
, 0);
1331 gen_set_label(done
);
1332 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1333 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1337 static inline void shiftr_imm(uint32_t rd
, uint32_t rs
, uint32_t imm
,
1340 static void (* const gen_sXri
[])(TCGv ret
, TCGv arg1
, int arg2
) = {
1341 tcg_gen_shri_i32
, tcg_gen_sari_i32
,
1343 tcg_debug_assert(alith
< 2);
1345 gen_sXri
[alith
](cpu_regs
[rd
], cpu_regs
[rs
], imm
- 1);
1346 tcg_gen_andi_i32(cpu_psw_c
, cpu_regs
[rd
], 0x00000001);
1347 gen_sXri
[alith
](cpu_regs
[rd
], cpu_regs
[rd
], 1);
1349 tcg_gen_mov_i32(cpu_regs
[rd
], cpu_regs
[rs
]);
1350 tcg_gen_movi_i32(cpu_psw_c
, 0);
1352 tcg_gen_movi_i32(cpu_psw_o
, 0);
1353 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[rd
]);
1354 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[rd
]);
1357 static inline void shiftr_reg(uint32_t rd
, uint32_t rs
, unsigned int alith
)
1359 TCGLabel
*noshift
, *done
;
1361 static void (* const gen_sXri
[])(TCGv ret
, TCGv arg1
, int arg2
) = {
1362 tcg_gen_shri_i32
, tcg_gen_sari_i32
,
1364 static void (* const gen_sXr
[])(TCGv ret
, TCGv arg1
, TCGv arg2
) = {
1365 tcg_gen_shr_i32
, tcg_gen_sar_i32
,
1367 tcg_debug_assert(alith
< 2);
1368 noshift
= gen_new_label();
1369 done
= gen_new_label();
1370 count
= tcg_temp_new();
1371 /* if (cpu_regs[rs]) { */
1372 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_regs
[rs
], 0, noshift
);
1373 tcg_gen_andi_i32(count
, cpu_regs
[rs
], 31);
1374 tcg_gen_subi_i32(count
, count
, 1);
1375 gen_sXr
[alith
](cpu_regs
[rd
], cpu_regs
[rd
], count
);
1376 tcg_gen_andi_i32(cpu_psw_c
, cpu_regs
[rd
], 0x00000001);
1377 gen_sXri
[alith
](cpu_regs
[rd
], cpu_regs
[rd
], 1);
1380 gen_set_label(noshift
);
1381 tcg_gen_movi_i32(cpu_psw_c
, 0);
1383 gen_set_label(done
);
1384 tcg_gen_movi_i32(cpu_psw_o
, 0);
1385 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[rd
]);
1386 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[rd
]);
1389 /* shar #imm:5, rd */
1390 /* shar #imm:5, rs2, rd */
1391 static bool trans_SHAR_irr(DisasContext
*ctx
, arg_SHAR_irr
*a
)
1393 shiftr_imm(a
->rd
, a
->rs2
, a
->imm
, 1);
1398 static bool trans_SHAR_rr(DisasContext
*ctx
, arg_SHAR_rr
*a
)
1400 shiftr_reg(a
->rd
, a
->rs
, 1);
1404 /* shlr #imm:5, rd */
1405 /* shlr #imm:5, rs2, rd */
1406 static bool trans_SHLR_irr(DisasContext
*ctx
, arg_SHLR_irr
*a
)
1408 shiftr_imm(a
->rd
, a
->rs2
, a
->imm
, 0);
1413 static bool trans_SHLR_rr(DisasContext
*ctx
, arg_SHLR_rr
*a
)
1415 shiftr_reg(a
->rd
, a
->rs
, 0);
1420 static bool trans_ROLC(DisasContext
*ctx
, arg_ROLC
*a
)
1423 tmp
= tcg_temp_new();
1424 tcg_gen_shri_i32(tmp
, cpu_regs
[a
->rd
], 31);
1425 tcg_gen_shli_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1);
1426 tcg_gen_or_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], cpu_psw_c
);
1427 tcg_gen_mov_i32(cpu_psw_c
, tmp
);
1428 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1429 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1434 static bool trans_RORC(DisasContext
*ctx
, arg_RORC
*a
)
1437 tmp
= tcg_temp_new();
1438 tcg_gen_andi_i32(tmp
, cpu_regs
[a
->rd
], 0x00000001);
1439 tcg_gen_shri_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1);
1440 tcg_gen_shli_i32(cpu_psw_c
, cpu_psw_c
, 31);
1441 tcg_gen_or_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], cpu_psw_c
);
1442 tcg_gen_mov_i32(cpu_psw_c
, tmp
);
1443 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1444 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1448 enum {ROTR
= 0, ROTL
= 1};
1449 enum {ROT_IMM
= 0, ROT_REG
= 1};
1450 static inline void rx_rot(int ir
, int dir
, int rd
, int src
)
1454 if (ir
== ROT_IMM
) {
1455 tcg_gen_rotli_i32(cpu_regs
[rd
], cpu_regs
[rd
], src
);
1457 tcg_gen_rotl_i32(cpu_regs
[rd
], cpu_regs
[rd
], cpu_regs
[src
]);
1459 tcg_gen_andi_i32(cpu_psw_c
, cpu_regs
[rd
], 0x00000001);
1462 if (ir
== ROT_IMM
) {
1463 tcg_gen_rotri_i32(cpu_regs
[rd
], cpu_regs
[rd
], src
);
1465 tcg_gen_rotr_i32(cpu_regs
[rd
], cpu_regs
[rd
], cpu_regs
[src
]);
1467 tcg_gen_shri_i32(cpu_psw_c
, cpu_regs
[rd
], 31);
1470 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[rd
]);
1471 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[rd
]);
1475 static bool trans_ROTL_ir(DisasContext
*ctx
, arg_ROTL_ir
*a
)
1477 rx_rot(ROT_IMM
, ROTL
, a
->rd
, a
->imm
);
1482 static bool trans_ROTL_rr(DisasContext
*ctx
, arg_ROTL_rr
*a
)
1484 rx_rot(ROT_REG
, ROTL
, a
->rd
, a
->rs
);
1489 static bool trans_ROTR_ir(DisasContext
*ctx
, arg_ROTR_ir
*a
)
1491 rx_rot(ROT_IMM
, ROTR
, a
->rd
, a
->imm
);
1496 static bool trans_ROTR_rr(DisasContext
*ctx
, arg_ROTR_rr
*a
)
1498 rx_rot(ROT_REG
, ROTR
, a
->rd
, a
->rs
);
1503 static bool trans_REVL(DisasContext
*ctx
, arg_REVL
*a
)
1505 tcg_gen_bswap32_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs
]);
1510 static bool trans_REVW(DisasContext
*ctx
, arg_REVW
*a
)
1513 tmp
= tcg_temp_new();
1514 tcg_gen_andi_i32(tmp
, cpu_regs
[a
->rs
], 0x00ff00ff);
1515 tcg_gen_shli_i32(tmp
, tmp
, 8);
1516 tcg_gen_shri_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs
], 8);
1517 tcg_gen_andi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 0x00ff00ff);
1518 tcg_gen_or_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], tmp
);
1522 /* conditional branch helper */
1523 static void rx_bcnd_main(DisasContext
*ctx
, int cd
, int dst
)
1530 dc
.temp
= tcg_temp_new();
1532 t
= gen_new_label();
1533 done
= gen_new_label();
1534 tcg_gen_brcondi_i32(dc
.cond
, dc
.value
, 0, t
);
1535 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
1538 gen_goto_tb(ctx
, 1, ctx
->pc
+ dst
);
1539 gen_set_label(done
);
1542 /* always true case */
1543 gen_goto_tb(ctx
, 0, ctx
->pc
+ dst
);
1546 /* always false case */
1552 /* beq dsp:3 / bne dsp:3 */
1553 /* beq dsp:8 / bne dsp:8 */
1554 /* bc dsp:8 / bnc dsp:8 */
1555 /* bgtu dsp:8 / bleu dsp:8 */
1556 /* bpz dsp:8 / bn dsp:8 */
1557 /* bge dsp:8 / blt dsp:8 */
1558 /* bgt dsp:8 / ble dsp:8 */
1559 /* bo dsp:8 / bno dsp:8 */
1560 /* beq dsp:16 / bne dsp:16 */
1561 static bool trans_BCnd(DisasContext
*ctx
, arg_BCnd
*a
)
1563 rx_bcnd_main(ctx
, a
->cd
, a
->dsp
);
1571 static bool trans_BRA(DisasContext
*ctx
, arg_BRA
*a
)
1573 rx_bcnd_main(ctx
, 14, a
->dsp
);
1578 static bool trans_BRA_l(DisasContext
*ctx
, arg_BRA_l
*a
)
1580 tcg_gen_addi_i32(cpu_pc
, cpu_regs
[a
->rd
], ctx
->pc
);
1581 ctx
->base
.is_jmp
= DISAS_JUMP
;
1585 static inline void rx_save_pc(DisasContext
*ctx
)
1587 TCGv pc
= tcg_constant_i32(ctx
->base
.pc_next
);
1592 static bool trans_JMP(DisasContext
*ctx
, arg_JMP
*a
)
1594 tcg_gen_mov_i32(cpu_pc
, cpu_regs
[a
->rs
]);
1595 ctx
->base
.is_jmp
= DISAS_JUMP
;
1600 static bool trans_JSR(DisasContext
*ctx
, arg_JSR
*a
)
1603 tcg_gen_mov_i32(cpu_pc
, cpu_regs
[a
->rs
]);
1604 ctx
->base
.is_jmp
= DISAS_JUMP
;
1610 static bool trans_BSR(DisasContext
*ctx
, arg_BSR
*a
)
1613 rx_bcnd_main(ctx
, 14, a
->dsp
);
1618 static bool trans_BSR_l(DisasContext
*ctx
, arg_BSR_l
*a
)
1621 tcg_gen_addi_i32(cpu_pc
, cpu_regs
[a
->rd
], ctx
->pc
);
1622 ctx
->base
.is_jmp
= DISAS_JUMP
;
1627 static bool trans_RTS(DisasContext
*ctx
, arg_RTS
*a
)
1630 ctx
->base
.is_jmp
= DISAS_JUMP
;
1635 static bool trans_NOP(DisasContext
*ctx
, arg_NOP
*a
)
1641 static bool trans_SCMPU(DisasContext
*ctx
, arg_SCMPU
*a
)
1643 gen_helper_scmpu(tcg_env
);
1648 static bool trans_SMOVU(DisasContext
*ctx
, arg_SMOVU
*a
)
1650 gen_helper_smovu(tcg_env
);
1655 static bool trans_SMOVF(DisasContext
*ctx
, arg_SMOVF
*a
)
1657 gen_helper_smovf(tcg_env
);
1662 static bool trans_SMOVB(DisasContext
*ctx
, arg_SMOVB
*a
)
1664 gen_helper_smovb(tcg_env
);
1668 #define STRING(op) \
1670 TCGv size = tcg_constant_i32(a->sz); \
1671 gen_helper_##op(tcg_env, size); \
1675 static bool trans_SUNTIL(DisasContext
*ctx
, arg_SUNTIL
*a
)
1682 static bool trans_SWHILE(DisasContext
*ctx
, arg_SWHILE
*a
)
1688 static bool trans_SSTR(DisasContext
*ctx
, arg_SSTR
*a
)
1695 static bool trans_RMPA(DisasContext
*ctx
, arg_RMPA
*a
)
1701 static void rx_mul64hi(TCGv_i64 ret
, int rs
, int rs2
)
1703 TCGv_i64 tmp0
, tmp1
;
1704 tmp0
= tcg_temp_new_i64();
1705 tmp1
= tcg_temp_new_i64();
1706 tcg_gen_ext_i32_i64(tmp0
, cpu_regs
[rs
]);
1707 tcg_gen_sari_i64(tmp0
, tmp0
, 16);
1708 tcg_gen_ext_i32_i64(tmp1
, cpu_regs
[rs2
]);
1709 tcg_gen_sari_i64(tmp1
, tmp1
, 16);
1710 tcg_gen_mul_i64(ret
, tmp0
, tmp1
);
1711 tcg_gen_shli_i64(ret
, ret
, 16);
1714 static void rx_mul64lo(TCGv_i64 ret
, int rs
, int rs2
)
1716 TCGv_i64 tmp0
, tmp1
;
1717 tmp0
= tcg_temp_new_i64();
1718 tmp1
= tcg_temp_new_i64();
1719 tcg_gen_ext_i32_i64(tmp0
, cpu_regs
[rs
]);
1720 tcg_gen_ext16s_i64(tmp0
, tmp0
);
1721 tcg_gen_ext_i32_i64(tmp1
, cpu_regs
[rs2
]);
1722 tcg_gen_ext16s_i64(tmp1
, tmp1
);
1723 tcg_gen_mul_i64(ret
, tmp0
, tmp1
);
1724 tcg_gen_shli_i64(ret
, ret
, 16);
1728 static bool trans_MULHI(DisasContext
*ctx
, arg_MULHI
*a
)
1730 rx_mul64hi(cpu_acc
, a
->rs
, a
->rs2
);
1735 static bool trans_MULLO(DisasContext
*ctx
, arg_MULLO
*a
)
1737 rx_mul64lo(cpu_acc
, a
->rs
, a
->rs2
);
1742 static bool trans_MACHI(DisasContext
*ctx
, arg_MACHI
*a
)
1745 tmp
= tcg_temp_new_i64();
1746 rx_mul64hi(tmp
, a
->rs
, a
->rs2
);
1747 tcg_gen_add_i64(cpu_acc
, cpu_acc
, tmp
);
1752 static bool trans_MACLO(DisasContext
*ctx
, arg_MACLO
*a
)
1755 tmp
= tcg_temp_new_i64();
1756 rx_mul64lo(tmp
, a
->rs
, a
->rs2
);
1757 tcg_gen_add_i64(cpu_acc
, cpu_acc
, tmp
);
1762 static bool trans_MVFACHI(DisasContext
*ctx
, arg_MVFACHI
*a
)
1764 tcg_gen_extrh_i64_i32(cpu_regs
[a
->rd
], cpu_acc
);
1769 static bool trans_MVFACMI(DisasContext
*ctx
, arg_MVFACMI
*a
)
1772 rd64
= tcg_temp_new_i64();
1773 tcg_gen_extract_i64(rd64
, cpu_acc
, 16, 32);
1774 tcg_gen_extrl_i64_i32(cpu_regs
[a
->rd
], rd64
);
1779 static bool trans_MVTACHI(DisasContext
*ctx
, arg_MVTACHI
*a
)
1782 rs64
= tcg_temp_new_i64();
1783 tcg_gen_extu_i32_i64(rs64
, cpu_regs
[a
->rs
]);
1784 tcg_gen_deposit_i64(cpu_acc
, cpu_acc
, rs64
, 32, 32);
1789 static bool trans_MVTACLO(DisasContext
*ctx
, arg_MVTACLO
*a
)
1792 rs64
= tcg_temp_new_i64();
1793 tcg_gen_extu_i32_i64(rs64
, cpu_regs
[a
->rs
]);
1794 tcg_gen_deposit_i64(cpu_acc
, cpu_acc
, rs64
, 0, 32);
1799 static bool trans_RACW(DisasContext
*ctx
, arg_RACW
*a
)
1801 TCGv imm
= tcg_constant_i32(a
->imm
+ 1);
1802 gen_helper_racw(tcg_env
, imm
);
1807 static bool trans_SAT(DisasContext
*ctx
, arg_SAT
*a
)
1810 tmp
= tcg_temp_new();
1811 z
= tcg_constant_i32(0);
1812 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1813 tcg_gen_sari_i32(tmp
, cpu_psw_s
, 31);
1814 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1815 tcg_gen_xori_i32(tmp
, tmp
, 0x80000000);
1816 tcg_gen_movcond_i32(TCG_COND_LT
, cpu_regs
[a
->rd
],
1817 cpu_psw_o
, z
, tmp
, cpu_regs
[a
->rd
]);
1822 static bool trans_SATR(DisasContext
*ctx
, arg_SATR
*a
)
1824 gen_helper_satr(tcg_env
);
1828 #define cat3(a, b, c) a##b##c
1829 #define FOP(name, op) \
1830 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1831 cat3(arg_, name, _ir) * a) \
1833 TCGv imm = tcg_constant_i32(li(ctx, 0)); \
1834 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1835 cpu_regs[a->rd], imm); \
1838 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1839 cat3(arg_, name, _mr) * a) \
1842 mem = tcg_temp_new(); \
1843 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1844 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1845 cpu_regs[a->rd], val); \
1849 #define FCONVOP(name, op) \
1850 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1853 mem = tcg_temp_new(); \
1854 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1855 gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \
1865 static bool trans_FCMP_ir(DisasContext
*ctx
, arg_FCMP_ir
* a
)
1867 TCGv imm
= tcg_constant_i32(li(ctx
, 0));
1868 gen_helper_fcmp(tcg_env
, cpu_regs
[a
->rd
], imm
);
1872 /* fcmp dsp[rs], rd */
1874 static bool trans_FCMP_mr(DisasContext
*ctx
, arg_FCMP_mr
*a
)
1877 mem
= tcg_temp_new();
1878 val
= rx_load_source(ctx
, mem
, a
->ld
, MO_32
, a
->rs
);
1879 gen_helper_fcmp(tcg_env
, cpu_regs
[a
->rd
], val
);
1884 FCONVOP(ROUND
, round
)
1887 /* itof dsp[rs], rd */
1888 static bool trans_ITOF(DisasContext
*ctx
, arg_ITOF
* a
)
1891 mem
= tcg_temp_new();
1892 val
= rx_load_source(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
1893 gen_helper_itof(cpu_regs
[a
->rd
], tcg_env
, val
);
1897 static void rx_bsetm(TCGv mem
, TCGv mask
)
1900 val
= tcg_temp_new();
1901 rx_gen_ld(MO_8
, val
, mem
);
1902 tcg_gen_or_i32(val
, val
, mask
);
1903 rx_gen_st(MO_8
, val
, mem
);
1906 static void rx_bclrm(TCGv mem
, TCGv mask
)
1909 val
= tcg_temp_new();
1910 rx_gen_ld(MO_8
, val
, mem
);
1911 tcg_gen_andc_i32(val
, val
, mask
);
1912 rx_gen_st(MO_8
, val
, mem
);
1915 static void rx_btstm(TCGv mem
, TCGv mask
)
1918 val
= tcg_temp_new();
1919 rx_gen_ld(MO_8
, val
, mem
);
1920 tcg_gen_and_i32(val
, val
, mask
);
1921 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, val
, 0);
1922 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_c
);
1925 static void rx_bnotm(TCGv mem
, TCGv mask
)
1928 val
= tcg_temp_new();
1929 rx_gen_ld(MO_8
, val
, mem
);
1930 tcg_gen_xor_i32(val
, val
, mask
);
1931 rx_gen_st(MO_8
, val
, mem
);
1934 static void rx_bsetr(TCGv reg
, TCGv mask
)
1936 tcg_gen_or_i32(reg
, reg
, mask
);
1939 static void rx_bclrr(TCGv reg
, TCGv mask
)
1941 tcg_gen_andc_i32(reg
, reg
, mask
);
1944 static inline void rx_btstr(TCGv reg
, TCGv mask
)
1947 t0
= tcg_temp_new();
1948 tcg_gen_and_i32(t0
, reg
, mask
);
1949 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, t0
, 0);
1950 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_c
);
1953 static inline void rx_bnotr(TCGv reg
, TCGv mask
)
1955 tcg_gen_xor_i32(reg
, reg
, mask
);
1958 #define BITOP(name, op) \
1959 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
1960 cat3(arg_, name, _im) * a) \
1962 TCGv mask, mem, addr; \
1963 mem = tcg_temp_new(); \
1964 mask = tcg_constant_i32(1 << a->imm); \
1965 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1966 cat3(rx_, op, m)(addr, mask); \
1969 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1970 cat3(arg_, name, _ir) * a) \
1973 mask = tcg_constant_i32(1 << a->imm); \
1974 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1977 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
1978 cat3(arg_, name, _rr) * a) \
1981 mask = tcg_temp_new(); \
1982 b = tcg_temp_new(); \
1983 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
1984 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1985 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1988 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
1989 cat3(arg_, name, _rm) * a) \
1991 TCGv mask, mem, addr, b; \
1992 mask = tcg_temp_new(); \
1993 b = tcg_temp_new(); \
1994 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
1995 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1996 mem = tcg_temp_new(); \
1997 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1998 cat3(rx_, op, m)(addr, mask); \
2007 static inline void bmcnd_op(TCGv val
, TCGCond cond
, int pos
)
2011 dc
.temp
= tcg_temp_new();
2012 bit
= tcg_temp_new();
2013 psw_cond(&dc
, cond
);
2014 tcg_gen_andi_i32(val
, val
, ~(1 << pos
));
2015 tcg_gen_setcondi_i32(dc
.cond
, bit
, dc
.value
, 0);
2016 tcg_gen_deposit_i32(val
, val
, bit
, pos
, 1);
2019 /* bmcnd #imm, dsp[rd] */
2020 static bool trans_BMCnd_im(DisasContext
*ctx
, arg_BMCnd_im
*a
)
2022 TCGv val
, mem
, addr
;
2023 val
= tcg_temp_new();
2024 mem
= tcg_temp_new();
2025 addr
= rx_index_addr(ctx
, mem
, a
->ld
, MO_8
, a
->rd
);
2026 rx_gen_ld(MO_8
, val
, addr
);
2027 bmcnd_op(val
, a
->cd
, a
->imm
);
2028 rx_gen_st(MO_8
, val
, addr
);
2032 /* bmcond #imm, rd */
2033 static bool trans_BMCnd_ir(DisasContext
*ctx
, arg_BMCnd_ir
*a
)
2035 bmcnd_op(cpu_regs
[a
->rd
], a
->cd
, a
->imm
);
2048 static inline void clrsetpsw(DisasContext
*ctx
, int cb
, int val
)
2053 tcg_gen_movi_i32(cpu_psw_c
, val
);
2056 tcg_gen_movi_i32(cpu_psw_z
, val
== 0);
2059 tcg_gen_movi_i32(cpu_psw_s
, val
? -1 : 0);
2062 tcg_gen_movi_i32(cpu_psw_o
, val
<< 31);
2065 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid destination %d", cb
);
2068 } else if (is_privileged(ctx
, 0)) {
2071 tcg_gen_movi_i32(cpu_psw_i
, val
);
2072 ctx
->base
.is_jmp
= DISAS_UPDATE
;
2075 if (FIELD_EX32(ctx
->tb_flags
, PSW
, U
) != val
) {
2076 ctx
->tb_flags
= FIELD_DP32(ctx
->tb_flags
, PSW
, U
, val
);
2077 tcg_gen_movi_i32(cpu_psw_u
, val
);
2078 tcg_gen_mov_i32(val
? cpu_isp
: cpu_usp
, cpu_sp
);
2079 tcg_gen_mov_i32(cpu_sp
, val
? cpu_usp
: cpu_isp
);
2083 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid destination %d", cb
);
2090 static bool trans_CLRPSW(DisasContext
*ctx
, arg_CLRPSW
*a
)
2092 clrsetpsw(ctx
, a
->cb
, 0);
2097 static bool trans_SETPSW(DisasContext
*ctx
, arg_SETPSW
*a
)
2099 clrsetpsw(ctx
, a
->cb
, 1);
2104 static bool trans_MVTIPL(DisasContext
*ctx
, arg_MVTIPL
*a
)
2106 if (is_privileged(ctx
, 1)) {
2107 tcg_gen_movi_i32(cpu_psw_ipl
, a
->imm
);
2108 ctx
->base
.is_jmp
= DISAS_UPDATE
;
2114 static bool trans_MVTC_i(DisasContext
*ctx
, arg_MVTC_i
*a
)
2118 imm
= tcg_constant_i32(a
->imm
);
2119 move_to_cr(ctx
, imm
, a
->cr
);
2124 static bool trans_MVTC_r(DisasContext
*ctx
, arg_MVTC_r
*a
)
2126 move_to_cr(ctx
, cpu_regs
[a
->rs
], a
->cr
);
2131 static bool trans_MVFC(DisasContext
*ctx
, arg_MVFC
*a
)
2133 move_from_cr(ctx
, cpu_regs
[a
->rd
], a
->cr
, ctx
->pc
);
2138 static bool trans_RTFI(DisasContext
*ctx
, arg_RTFI
*a
)
2141 if (is_privileged(ctx
, 1)) {
2142 psw
= tcg_temp_new();
2143 tcg_gen_mov_i32(cpu_pc
, cpu_bpc
);
2144 tcg_gen_mov_i32(psw
, cpu_bpsw
);
2145 gen_helper_set_psw_rte(tcg_env
, psw
);
2146 ctx
->base
.is_jmp
= DISAS_EXIT
;
2152 static bool trans_RTE(DisasContext
*ctx
, arg_RTE
*a
)
2155 if (is_privileged(ctx
, 1)) {
2156 psw
= tcg_temp_new();
2159 gen_helper_set_psw_rte(tcg_env
, psw
);
2160 ctx
->base
.is_jmp
= DISAS_EXIT
;
2166 static bool trans_BRK(DisasContext
*ctx
, arg_BRK
*a
)
2168 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2169 gen_helper_rxbrk(tcg_env
);
2170 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2175 static bool trans_INT(DisasContext
*ctx
, arg_INT
*a
)
2179 tcg_debug_assert(a
->imm
< 0x100);
2180 vec
= tcg_constant_i32(a
->imm
);
2181 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2182 gen_helper_rxint(tcg_env
, vec
);
2183 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2188 static bool trans_WAIT(DisasContext
*ctx
, arg_WAIT
*a
)
2190 if (is_privileged(ctx
, 1)) {
2191 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2192 gen_helper_wait(tcg_env
);
2197 static void rx_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
2199 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2200 ctx
->env
= cpu_env(cs
);
2201 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
2204 static void rx_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
2208 static void rx_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
2210 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2212 tcg_gen_insn_start(ctx
->base
.pc_next
);
2215 static void rx_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
2217 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2220 ctx
->pc
= ctx
->base
.pc_next
;
2221 insn
= decode_load(ctx
);
2222 if (!decode(ctx
, insn
)) {
2223 gen_helper_raise_illegal_instruction(tcg_env
);
2227 static void rx_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
2229 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2231 switch (ctx
->base
.is_jmp
) {
2233 case DISAS_TOO_MANY
:
2234 gen_goto_tb(ctx
, 0, dcbase
->pc_next
);
2237 tcg_gen_lookup_and_goto_ptr();
2240 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2243 tcg_gen_exit_tb(NULL
, 0);
2245 case DISAS_NORETURN
:
2248 g_assert_not_reached();
2252 static const TranslatorOps rx_tr_ops
= {
2253 .init_disas_context
= rx_tr_init_disas_context
,
2254 .tb_start
= rx_tr_tb_start
,
2255 .insn_start
= rx_tr_insn_start
,
2256 .translate_insn
= rx_tr_translate_insn
,
2257 .tb_stop
= rx_tr_tb_stop
,
2260 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
2261 vaddr pc
, void *host_pc
)
2265 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &rx_tr_ops
, &dc
.base
);
2268 #define ALLOC_REGISTER(sym, name) \
2269 cpu_##sym = tcg_global_mem_new_i32(tcg_env, \
2270 offsetof(CPURXState, sym), name)
2272 void rx_translate_init(void)
2274 static const char * const regnames
[NUM_REGS
] = {
2275 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2276 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2280 for (i
= 0; i
< NUM_REGS
; i
++) {
2281 cpu_regs
[i
] = tcg_global_mem_new_i32(tcg_env
,
2282 offsetof(CPURXState
, regs
[i
]),
2285 ALLOC_REGISTER(pc
, "PC");
2286 ALLOC_REGISTER(psw_o
, "PSW(O)");
2287 ALLOC_REGISTER(psw_s
, "PSW(S)");
2288 ALLOC_REGISTER(psw_z
, "PSW(Z)");
2289 ALLOC_REGISTER(psw_c
, "PSW(C)");
2290 ALLOC_REGISTER(psw_u
, "PSW(U)");
2291 ALLOC_REGISTER(psw_i
, "PSW(I)");
2292 ALLOC_REGISTER(psw_pm
, "PSW(PM)");
2293 ALLOC_REGISTER(psw_ipl
, "PSW(IPL)");
2294 ALLOC_REGISTER(usp
, "USP");
2295 ALLOC_REGISTER(fpsw
, "FPSW");
2296 ALLOC_REGISTER(bpsw
, "BPSW");
2297 ALLOC_REGISTER(bpc
, "BPC");
2298 ALLOC_REGISTER(isp
, "ISP");
2299 ALLOC_REGISTER(fintv
, "FINTV");
2300 ALLOC_REGISTER(intb
, "INTB");
2301 cpu_acc
= tcg_global_mem_new_i64(tcg_env
,
2302 offsetof(CPURXState
, acc
), "ACC");