Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / target / rx / translate.c
blob9aade2b6e5c0020986db8783d98213946de19fa2
1 /*
2 * RX translation
4 * Copyright (c) 2019 Yoshinori Sato
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 #include "exec/translator.h"
28 #include "exec/log.h"
30 #define HELPER_H "helper.h"
31 #include "exec/helper-info.c.inc"
32 #undef HELPER_H
35 typedef struct DisasContext {
36 DisasContextBase base;
37 CPURXState *env;
38 uint32_t pc;
39 uint32_t tb_flags;
40 } DisasContext;
42 typedef struct DisasCompare {
43 TCGv value;
44 TCGv temp;
45 TCGCond cond;
46 } DisasCompare;
48 const char *rx_crname(uint8_t cr)
50 static const char *cr_names[] = {
51 "psw", "pc", "usp", "fpsw", "", "", "", "",
52 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
54 if (cr >= ARRAY_SIZE(cr_names)) {
55 return "illegal";
57 return cr_names[cr];
60 /* Target-specific values for dc->base.is_jmp. */
61 #define DISAS_JUMP DISAS_TARGET_0
62 #define DISAS_UPDATE DISAS_TARGET_1
63 #define DISAS_EXIT DISAS_TARGET_2
65 /* global register indexes */
66 static TCGv cpu_regs[16];
67 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
68 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
69 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
70 static TCGv cpu_fintv, cpu_intb, cpu_pc;
71 static TCGv_i64 cpu_acc;
73 #define cpu_sp cpu_regs[0]
75 /* decoder helper */
76 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
77 int i, int n)
79 while (++i <= n) {
80 uint8_t b = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next++);
81 insn |= b << (32 - i * 8);
83 return insn;
86 static uint32_t li(DisasContext *ctx, int sz)
88 target_ulong addr;
89 uint32_t tmp;
90 CPURXState *env = ctx->env;
91 addr = ctx->base.pc_next;
93 switch (sz) {
94 case 1:
95 ctx->base.pc_next += 1;
96 return (int8_t)translator_ldub(env, &ctx->base, addr);
97 case 2:
98 ctx->base.pc_next += 2;
99 return (int16_t)translator_lduw(env, &ctx->base, addr);
100 case 3:
101 ctx->base.pc_next += 3;
102 tmp = (int8_t)translator_ldub(env, &ctx->base, addr + 2);
103 tmp <<= 16;
104 tmp |= translator_lduw(env, &ctx->base, addr);
105 return tmp;
106 case 0:
107 ctx->base.pc_next += 4;
108 return translator_ldl(env, &ctx->base, addr);
109 default:
110 g_assert_not_reached();
112 return 0;
115 static int bdsp_s(DisasContext *ctx, int d)
118 * 0 -> 8
119 * 1 -> 9
120 * 2 -> 10
121 * 3 -> 3
123 * 7 -> 7
125 if (d < 3) {
126 d += 8;
128 return d;
131 /* Include the auto-generated decoder. */
132 #include "decode-insns.c.inc"
134 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
136 CPURXState *env = cpu_env(cs);
137 int i;
138 uint32_t psw;
140 psw = rx_cpu_pack_psw(env);
141 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
142 env->pc, psw);
143 for (i = 0; i < 16; i += 4) {
144 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
145 i, env->regs[i], i + 1, env->regs[i + 1],
146 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
150 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
152 if (translator_use_goto_tb(&dc->base, dest)) {
153 tcg_gen_goto_tb(n);
154 tcg_gen_movi_i32(cpu_pc, dest);
155 tcg_gen_exit_tb(dc->base.tb, n);
156 } else {
157 tcg_gen_movi_i32(cpu_pc, dest);
158 tcg_gen_lookup_and_goto_ptr();
160 dc->base.is_jmp = DISAS_NORETURN;
163 /* generic load wrapper */
164 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
166 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
169 /* unsigned load wrapper */
170 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
172 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
175 /* generic store wrapper */
176 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
178 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
181 /* [ri, rb] */
182 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
183 int size, int ri, int rb)
185 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
186 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
189 /* dsp[reg] */
190 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
191 int ld, int size, int reg)
193 uint32_t dsp;
195 switch (ld) {
196 case 0:
197 return cpu_regs[reg];
198 case 1:
199 dsp = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next) << size;
200 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
201 ctx->base.pc_next += 1;
202 return mem;
203 case 2:
204 dsp = translator_lduw(ctx->env, &ctx->base, ctx->base.pc_next) << size;
205 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
206 ctx->base.pc_next += 2;
207 return mem;
208 default:
209 g_assert_not_reached();
213 static inline MemOp mi_to_mop(unsigned mi)
215 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
216 tcg_debug_assert(mi < 5);
217 return mop[mi];
220 /* load source operand */
221 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
222 int ld, int mi, int rs)
224 TCGv addr;
225 MemOp mop;
226 if (ld < 3) {
227 mop = mi_to_mop(mi);
228 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
229 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
230 return mem;
231 } else {
232 return cpu_regs[rs];
236 /* Processor mode check */
237 static int is_privileged(DisasContext *ctx, int is_exception)
239 if (FIELD_EX32(ctx->tb_flags, PSW, PM)) {
240 if (is_exception) {
241 gen_helper_raise_privilege_violation(tcg_env);
243 return 0;
244 } else {
245 return 1;
249 /* generate QEMU condition */
250 static void psw_cond(DisasCompare *dc, uint32_t cond)
252 tcg_debug_assert(cond < 16);
253 switch (cond) {
254 case 0: /* z */
255 dc->cond = TCG_COND_EQ;
256 dc->value = cpu_psw_z;
257 break;
258 case 1: /* nz */
259 dc->cond = TCG_COND_NE;
260 dc->value = cpu_psw_z;
261 break;
262 case 2: /* c */
263 dc->cond = TCG_COND_NE;
264 dc->value = cpu_psw_c;
265 break;
266 case 3: /* nc */
267 dc->cond = TCG_COND_EQ;
268 dc->value = cpu_psw_c;
269 break;
270 case 4: /* gtu (C& ~Z) == 1 */
271 case 5: /* leu (C& ~Z) == 0 */
272 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
273 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
274 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
275 dc->value = dc->temp;
276 break;
277 case 6: /* pz (S == 0) */
278 dc->cond = TCG_COND_GE;
279 dc->value = cpu_psw_s;
280 break;
281 case 7: /* n (S == 1) */
282 dc->cond = TCG_COND_LT;
283 dc->value = cpu_psw_s;
284 break;
285 case 8: /* ge (S^O)==0 */
286 case 9: /* lt (S^O)==1 */
287 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
288 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
289 dc->value = dc->temp;
290 break;
291 case 10: /* gt ((S^O)|Z)==0 */
292 case 11: /* le ((S^O)|Z)==1 */
293 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
294 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
295 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
296 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
297 dc->value = dc->temp;
298 break;
299 case 12: /* o */
300 dc->cond = TCG_COND_LT;
301 dc->value = cpu_psw_o;
302 break;
303 case 13: /* no */
304 dc->cond = TCG_COND_GE;
305 dc->value = cpu_psw_o;
306 break;
307 case 14: /* always true */
308 dc->cond = TCG_COND_ALWAYS;
309 dc->value = dc->temp;
310 break;
311 case 15: /* always false */
312 dc->cond = TCG_COND_NEVER;
313 dc->value = dc->temp;
314 break;
318 static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
320 switch (cr) {
321 case 0: /* PSW */
322 gen_helper_pack_psw(ret, tcg_env);
323 break;
324 case 1: /* PC */
325 tcg_gen_movi_i32(ret, pc);
326 break;
327 case 2: /* USP */
328 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
329 tcg_gen_mov_i32(ret, cpu_sp);
330 } else {
331 tcg_gen_mov_i32(ret, cpu_usp);
333 break;
334 case 3: /* FPSW */
335 tcg_gen_mov_i32(ret, cpu_fpsw);
336 break;
337 case 8: /* BPSW */
338 tcg_gen_mov_i32(ret, cpu_bpsw);
339 break;
340 case 9: /* BPC */
341 tcg_gen_mov_i32(ret, cpu_bpc);
342 break;
343 case 10: /* ISP */
344 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
345 tcg_gen_mov_i32(ret, cpu_isp);
346 } else {
347 tcg_gen_mov_i32(ret, cpu_sp);
349 break;
350 case 11: /* FINTV */
351 tcg_gen_mov_i32(ret, cpu_fintv);
352 break;
353 case 12: /* INTB */
354 tcg_gen_mov_i32(ret, cpu_intb);
355 break;
356 default:
357 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
358 /* Unimplement registers return 0 */
359 tcg_gen_movi_i32(ret, 0);
360 break;
364 static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
366 if (cr >= 8 && !is_privileged(ctx, 0)) {
367 /* Some control registers can only be written in privileged mode. */
368 qemu_log_mask(LOG_GUEST_ERROR,
369 "disallow control register write %s", rx_crname(cr));
370 return;
372 switch (cr) {
373 case 0: /* PSW */
374 gen_helper_set_psw(tcg_env, val);
375 if (is_privileged(ctx, 0)) {
376 /* PSW.{I,U} may be updated here. exit TB. */
377 ctx->base.is_jmp = DISAS_UPDATE;
379 break;
380 /* case 1: to PC not supported */
381 case 2: /* USP */
382 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
383 tcg_gen_mov_i32(cpu_sp, val);
384 } else {
385 tcg_gen_mov_i32(cpu_usp, val);
387 break;
388 case 3: /* FPSW */
389 gen_helper_set_fpsw(tcg_env, val);
390 break;
391 case 8: /* BPSW */
392 tcg_gen_mov_i32(cpu_bpsw, val);
393 break;
394 case 9: /* BPC */
395 tcg_gen_mov_i32(cpu_bpc, val);
396 break;
397 case 10: /* ISP */
398 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
399 tcg_gen_mov_i32(cpu_isp, val);
400 } else {
401 tcg_gen_mov_i32(cpu_sp, val);
403 break;
404 case 11: /* FINTV */
405 tcg_gen_mov_i32(cpu_fintv, val);
406 break;
407 case 12: /* INTB */
408 tcg_gen_mov_i32(cpu_intb, val);
409 break;
410 default:
411 qemu_log_mask(LOG_GUEST_ERROR,
412 "Unimplement control register %d", cr);
413 break;
417 static void push(TCGv val)
419 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
420 rx_gen_st(MO_32, val, cpu_sp);
423 static void pop(TCGv ret)
425 rx_gen_ld(MO_32, ret, cpu_sp);
426 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
429 /* mov.<bwl> rs,dsp5[rd] */
430 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
432 TCGv mem;
433 mem = tcg_temp_new();
434 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
435 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
436 return true;
439 /* mov.<bwl> dsp5[rs],rd */
440 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
442 TCGv mem;
443 mem = tcg_temp_new();
444 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
445 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
446 return true;
449 /* mov.l #uimm4,rd */
450 /* mov.l #uimm8,rd */
451 /* mov.l #imm,rd */
452 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
454 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
455 return true;
458 /* mov.<bwl> #uimm8,dsp[rd] */
459 /* mov.<bwl> #imm, dsp[rd] */
460 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
462 TCGv imm, mem;
463 imm = tcg_constant_i32(a->imm);
464 mem = tcg_temp_new();
465 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
466 rx_gen_st(a->sz, imm, mem);
467 return true;
470 /* mov.<bwl> [ri,rb],rd */
471 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
473 TCGv mem;
474 mem = tcg_temp_new();
475 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
476 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
477 return true;
480 /* mov.<bwl> rd,[ri,rb] */
481 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
483 TCGv mem;
484 mem = tcg_temp_new();
485 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
486 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
487 return true;
490 /* mov.<bwl> dsp[rs],dsp[rd] */
491 /* mov.<bwl> rs,dsp[rd] */
492 /* mov.<bwl> dsp[rs],rd */
493 /* mov.<bwl> rs,rd */
494 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
496 TCGv tmp, mem, addr;
498 if (a->lds == 3 && a->ldd == 3) {
499 /* mov.<bwl> rs,rd */
500 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz | MO_SIGN);
501 return true;
504 mem = tcg_temp_new();
505 if (a->lds == 3) {
506 /* mov.<bwl> rs,dsp[rd] */
507 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
508 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
509 } else if (a->ldd == 3) {
510 /* mov.<bwl> dsp[rs],rd */
511 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
512 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
513 } else {
514 /* mov.<bwl> dsp[rs],dsp[rd] */
515 tmp = tcg_temp_new();
516 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
517 rx_gen_ld(a->sz, tmp, addr);
518 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
519 rx_gen_st(a->sz, tmp, addr);
521 return true;
524 /* mov.<bwl> rs,[rd+] */
525 /* mov.<bwl> rs,[-rd] */
526 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
528 TCGv val;
529 val = tcg_temp_new();
530 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
531 if (a->ad == 1) {
532 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
534 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
535 if (a->ad == 0) {
536 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
538 return true;
541 /* mov.<bwl> [rd+],rs */
542 /* mov.<bwl> [-rd],rs */
543 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
545 TCGv val;
546 val = tcg_temp_new();
547 if (a->ad == 1) {
548 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
550 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
551 if (a->ad == 0) {
552 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
554 tcg_gen_mov_i32(cpu_regs[a->rs], val);
555 return true;
558 /* movu.<bw> dsp5[rs],rd */
559 /* movu.<bw> dsp[rs],rd */
560 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
562 TCGv mem;
563 mem = tcg_temp_new();
564 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
565 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
566 return true;
569 /* movu.<bw> rs,rd */
570 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
572 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz);
573 return true;
576 /* movu.<bw> [ri,rb],rd */
577 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
579 TCGv mem;
580 mem = tcg_temp_new();
581 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
582 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
583 return true;
586 /* movu.<bw> [rd+],rs */
587 /* mov.<bw> [-rd],rs */
588 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
590 TCGv val;
591 val = tcg_temp_new();
592 if (a->ad == 1) {
593 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
595 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
596 if (a->ad == 0) {
597 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
599 tcg_gen_mov_i32(cpu_regs[a->rs], val);
600 return true;
604 /* pop rd */
605 static bool trans_POP(DisasContext *ctx, arg_POP *a)
607 /* mov.l [r0+], rd */
608 arg_MOV_rp mov_a;
609 mov_a.rd = 0;
610 mov_a.rs = a->rd;
611 mov_a.ad = 0;
612 mov_a.sz = MO_32;
613 trans_MOV_pr(ctx, &mov_a);
614 return true;
617 /* popc cr */
618 static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
620 TCGv val;
621 val = tcg_temp_new();
622 pop(val);
623 move_to_cr(ctx, val, a->cr);
624 return true;
627 /* popm rd-rd2 */
628 static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
630 int r;
631 if (a->rd == 0 || a->rd >= a->rd2) {
632 qemu_log_mask(LOG_GUEST_ERROR,
633 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
635 r = a->rd;
636 while (r <= a->rd2 && r < 16) {
637 pop(cpu_regs[r++]);
639 return true;
643 /* push.<bwl> rs */
644 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
646 TCGv val;
647 val = tcg_temp_new();
648 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
649 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
650 rx_gen_st(a->sz, val, cpu_sp);
651 return true;
654 /* push.<bwl> dsp[rs] */
655 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
657 TCGv mem, val, addr;
658 mem = tcg_temp_new();
659 val = tcg_temp_new();
660 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
661 rx_gen_ld(a->sz, val, addr);
662 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
663 rx_gen_st(a->sz, val, cpu_sp);
664 return true;
667 /* pushc rx */
668 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
670 TCGv val;
671 val = tcg_temp_new();
672 move_from_cr(ctx, val, a->cr, ctx->pc);
673 push(val);
674 return true;
677 /* pushm rs-rs2 */
678 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
680 int r;
682 if (a->rs == 0 || a->rs >= a->rs2) {
683 qemu_log_mask(LOG_GUEST_ERROR,
684 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
686 r = a->rs2;
687 while (r >= a->rs && r >= 0) {
688 push(cpu_regs[r--]);
690 return true;
693 /* xchg rs,rd */
694 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
696 TCGv tmp;
697 tmp = tcg_temp_new();
698 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
699 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
700 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
701 return true;
704 /* xchg dsp[rs].<mi>,rd */
705 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
707 TCGv mem, addr;
708 mem = tcg_temp_new();
709 switch (a->mi) {
710 case 0: /* dsp[rs].b */
711 case 1: /* dsp[rs].w */
712 case 2: /* dsp[rs].l */
713 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
714 break;
715 case 3: /* dsp[rs].uw */
716 case 4: /* dsp[rs].ub */
717 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
718 break;
719 default:
720 g_assert_not_reached();
722 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
723 0, mi_to_mop(a->mi));
724 return true;
727 static inline void stcond(TCGCond cond, int rd, int imm)
729 TCGv z;
730 TCGv _imm;
731 z = tcg_constant_i32(0);
732 _imm = tcg_constant_i32(imm);
733 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
734 _imm, cpu_regs[rd]);
737 /* stz #imm,rd */
738 static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
740 stcond(TCG_COND_EQ, a->rd, a->imm);
741 return true;
744 /* stnz #imm,rd */
745 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
747 stcond(TCG_COND_NE, a->rd, a->imm);
748 return true;
751 /* sccnd.<bwl> rd */
752 /* sccnd.<bwl> dsp:[rd] */
753 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
755 DisasCompare dc;
756 TCGv val, mem, addr;
757 dc.temp = tcg_temp_new();
758 psw_cond(&dc, a->cd);
759 if (a->ld < 3) {
760 val = tcg_temp_new();
761 mem = tcg_temp_new();
762 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
763 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
764 rx_gen_st(a->sz, val, addr);
765 } else {
766 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
768 return true;
771 /* rtsd #imm */
772 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
774 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
775 pop(cpu_pc);
776 ctx->base.is_jmp = DISAS_JUMP;
777 return true;
780 /* rtsd #imm, rd-rd2 */
781 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
783 int dst;
784 int adj;
786 if (a->rd2 >= a->rd) {
787 adj = a->imm - (a->rd2 - a->rd + 1);
788 } else {
789 adj = a->imm - (15 - a->rd + 1);
792 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
793 dst = a->rd;
794 while (dst <= a->rd2 && dst < 16) {
795 pop(cpu_regs[dst++]);
797 pop(cpu_pc);
798 ctx->base.is_jmp = DISAS_JUMP;
799 return true;
802 typedef void (*op2fn)(TCGv ret, TCGv arg1);
803 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
805 static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
807 opr(cpu_regs[dst], cpu_regs[src]);
810 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
812 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
815 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
817 TCGv imm = tcg_constant_i32(src2);
818 opr(cpu_regs[dst], cpu_regs[src], imm);
821 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
822 int dst, int src, int ld, int mi)
824 TCGv val, mem;
825 mem = tcg_temp_new();
826 val = rx_load_source(ctx, mem, ld, mi, src);
827 opr(cpu_regs[dst], cpu_regs[dst], val);
830 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
832 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
833 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
834 tcg_gen_mov_i32(ret, cpu_psw_s);
837 /* and #uimm:4, rd */
838 /* and #imm, rd */
839 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
841 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
842 return true;
845 /* and dsp[rs], rd */
846 /* and rs,rd */
847 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
849 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
850 return true;
853 /* and rs,rs2,rd */
854 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
856 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
857 return true;
860 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
862 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
863 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
864 tcg_gen_mov_i32(ret, cpu_psw_s);
867 /* or #uimm:4, rd */
868 /* or #imm, rd */
869 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
871 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
872 return true;
875 /* or dsp[rs], rd */
876 /* or rs,rd */
877 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
879 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
880 return true;
883 /* or rs,rs2,rd */
884 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
886 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
887 return true;
890 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
892 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
893 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
894 tcg_gen_mov_i32(ret, cpu_psw_s);
897 /* xor #imm, rd */
898 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
900 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
901 return true;
904 /* xor dsp[rs], rd */
905 /* xor rs,rd */
906 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
908 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
909 return true;
912 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
914 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
915 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
918 /* tst #imm, rd */
919 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
921 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
922 return true;
925 /* tst dsp[rs], rd */
926 /* tst rs, rd */
927 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
929 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
930 return true;
933 static void rx_not(TCGv ret, TCGv arg1)
935 tcg_gen_not_i32(ret, arg1);
936 tcg_gen_mov_i32(cpu_psw_z, ret);
937 tcg_gen_mov_i32(cpu_psw_s, ret);
940 /* not rd */
941 /* not rs, rd */
942 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
944 rx_gen_op_rr(rx_not, a->rd, a->rs);
945 return true;
948 static void rx_neg(TCGv ret, TCGv arg1)
950 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
951 tcg_gen_neg_i32(ret, arg1);
952 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
953 tcg_gen_mov_i32(cpu_psw_z, ret);
954 tcg_gen_mov_i32(cpu_psw_s, ret);
958 /* neg rd */
959 /* neg rs, rd */
960 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
962 rx_gen_op_rr(rx_neg, a->rd, a->rs);
963 return true;
966 /* ret = arg1 + arg2 + psw_c */
967 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
969 TCGv z = tcg_constant_i32(0);
970 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
971 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
972 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
973 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
974 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
975 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
976 tcg_gen_mov_i32(ret, cpu_psw_s);
979 /* adc #imm, rd */
980 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
982 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
983 return true;
986 /* adc rs, rd */
987 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
989 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
990 return true;
993 /* adc dsp[rs], rd */
994 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
996 /* mi only 2 */
997 if (a->mi != 2) {
998 return false;
1000 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1001 return true;
1004 /* ret = arg1 + arg2 */
1005 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1007 TCGv z = tcg_constant_i32(0);
1008 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1009 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1010 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
1011 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
1012 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1013 tcg_gen_mov_i32(ret, cpu_psw_s);
1016 /* add #uimm4, rd */
1017 /* add #imm, rs, rd */
1018 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1020 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1021 return true;
1024 /* add rs, rd */
1025 /* add dsp[rs], rd */
1026 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1028 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1029 return true;
1032 /* add rs, rs2, rd */
1033 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1035 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1036 return true;
1039 /* ret = arg1 - arg2 */
1040 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1042 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1043 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1044 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1045 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
1046 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
1047 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1048 /* CMP not required return */
1049 if (ret) {
1050 tcg_gen_mov_i32(ret, cpu_psw_s);
1054 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1056 rx_sub(NULL, arg1, arg2);
1059 /* ret = arg1 - arg2 - !psw_c */
1060 /* -> ret = arg1 + ~arg2 + psw_c */
1061 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1063 TCGv temp;
1064 temp = tcg_temp_new();
1065 tcg_gen_not_i32(temp, arg2);
1066 rx_adc(ret, arg1, temp);
1069 /* cmp #imm4, rs2 */
1070 /* cmp #imm8, rs2 */
1071 /* cmp #imm, rs2 */
1072 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1074 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1075 return true;
1078 /* cmp rs, rs2 */
1079 /* cmp dsp[rs], rs2 */
1080 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1082 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1083 return true;
1086 /* sub #imm4, rd */
1087 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1089 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1090 return true;
1093 /* sub rs, rd */
1094 /* sub dsp[rs], rd */
1095 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1097 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1098 return true;
1101 /* sub rs2, rs, rd */
1102 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1104 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1105 return true;
1108 /* sbb rs, rd */
1109 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1111 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1112 return true;
1115 /* sbb dsp[rs], rd */
1116 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1118 /* mi only 2 */
1119 if (a->mi != 2) {
1120 return false;
1122 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1123 return true;
1126 /* abs rd */
1127 /* abs rs, rd */
1128 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1130 rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs);
1131 return true;
1134 /* max #imm, rd */
1135 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1137 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1138 return true;
1141 /* max rs, rd */
1142 /* max dsp[rs], rd */
1143 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1145 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1146 return true;
1149 /* min #imm, rd */
1150 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1152 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1153 return true;
1156 /* min rs, rd */
1157 /* min dsp[rs], rd */
1158 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1160 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1161 return true;
1164 /* mul #uimm4, rd */
1165 /* mul #imm, rd */
1166 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1168 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1169 return true;
1172 /* mul rs, rd */
1173 /* mul dsp[rs], rd */
1174 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1176 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1177 return true;
1180 /* mul rs, rs2, rd */
1181 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1183 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1184 return true;
1187 /* emul #imm, rd */
1188 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1190 TCGv imm = tcg_constant_i32(a->imm);
1191 if (a->rd > 14) {
1192 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1194 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1195 cpu_regs[a->rd], imm);
1196 return true;
1199 /* emul rs, rd */
1200 /* emul dsp[rs], rd */
1201 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1203 TCGv val, mem;
1204 if (a->rd > 14) {
1205 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1207 mem = tcg_temp_new();
1208 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1209 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1210 cpu_regs[a->rd], val);
1211 return true;
1214 /* emulu #imm, rd */
1215 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1217 TCGv imm = tcg_constant_i32(a->imm);
1218 if (a->rd > 14) {
1219 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1221 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1222 cpu_regs[a->rd], imm);
1223 return true;
1226 /* emulu rs, rd */
1227 /* emulu dsp[rs], rd */
1228 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1230 TCGv val, mem;
1231 if (a->rd > 14) {
1232 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1234 mem = tcg_temp_new();
1235 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1236 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1237 cpu_regs[a->rd], val);
1238 return true;
1241 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1243 gen_helper_div(ret, tcg_env, arg1, arg2);
1246 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1248 gen_helper_divu(ret, tcg_env, arg1, arg2);
1251 /* div #imm, rd */
1252 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1254 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1255 return true;
1258 /* div rs, rd */
1259 /* div dsp[rs], rd */
1260 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1262 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1263 return true;
1266 /* divu #imm, rd */
1267 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1269 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1270 return true;
1273 /* divu rs, rd */
1274 /* divu dsp[rs], rd */
1275 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1277 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1278 return true;
1282 /* shll #imm:5, rd */
1283 /* shll #imm:5, rs2, rd */
1284 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1286 TCGv tmp;
1287 tmp = tcg_temp_new();
1288 if (a->imm) {
1289 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1290 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1291 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1292 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1293 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1294 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1295 } else {
1296 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1297 tcg_gen_movi_i32(cpu_psw_c, 0);
1298 tcg_gen_movi_i32(cpu_psw_o, 0);
1300 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1301 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1302 return true;
1305 /* shll rs, rd */
1306 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1308 TCGLabel *noshift, *done;
1309 TCGv count, tmp;
1311 noshift = gen_new_label();
1312 done = gen_new_label();
1313 /* if (cpu_regs[a->rs]) { */
1314 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1315 count = tcg_temp_new();
1316 tmp = tcg_temp_new();
1317 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1318 tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp);
1319 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1320 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1321 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1322 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1323 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1324 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1325 tcg_gen_br(done);
1326 /* } else { */
1327 gen_set_label(noshift);
1328 tcg_gen_movi_i32(cpu_psw_c, 0);
1329 tcg_gen_movi_i32(cpu_psw_o, 0);
1330 /* } */
1331 gen_set_label(done);
1332 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1333 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1334 return true;
1337 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1338 unsigned int alith)
1340 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1341 tcg_gen_shri_i32, tcg_gen_sari_i32,
1343 tcg_debug_assert(alith < 2);
1344 if (imm) {
1345 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1346 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1347 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1348 } else {
1349 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1350 tcg_gen_movi_i32(cpu_psw_c, 0);
1352 tcg_gen_movi_i32(cpu_psw_o, 0);
1353 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1354 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1357 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1359 TCGLabel *noshift, *done;
1360 TCGv count;
1361 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1362 tcg_gen_shri_i32, tcg_gen_sari_i32,
1364 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1365 tcg_gen_shr_i32, tcg_gen_sar_i32,
1367 tcg_debug_assert(alith < 2);
1368 noshift = gen_new_label();
1369 done = gen_new_label();
1370 count = tcg_temp_new();
1371 /* if (cpu_regs[rs]) { */
1372 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1373 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1374 tcg_gen_subi_i32(count, count, 1);
1375 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1376 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1377 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1378 tcg_gen_br(done);
1379 /* } else { */
1380 gen_set_label(noshift);
1381 tcg_gen_movi_i32(cpu_psw_c, 0);
1382 /* } */
1383 gen_set_label(done);
1384 tcg_gen_movi_i32(cpu_psw_o, 0);
1385 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1386 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1389 /* shar #imm:5, rd */
1390 /* shar #imm:5, rs2, rd */
1391 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1393 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1394 return true;
1397 /* shar rs, rd */
1398 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1400 shiftr_reg(a->rd, a->rs, 1);
1401 return true;
1404 /* shlr #imm:5, rd */
1405 /* shlr #imm:5, rs2, rd */
1406 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1408 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1409 return true;
1412 /* shlr rs, rd */
1413 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1415 shiftr_reg(a->rd, a->rs, 0);
1416 return true;
1419 /* rolc rd */
1420 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1422 TCGv tmp;
1423 tmp = tcg_temp_new();
1424 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1425 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1426 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1427 tcg_gen_mov_i32(cpu_psw_c, tmp);
1428 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1429 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1430 return true;
1433 /* rorc rd */
1434 static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1436 TCGv tmp;
1437 tmp = tcg_temp_new();
1438 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1439 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1440 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1441 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1442 tcg_gen_mov_i32(cpu_psw_c, tmp);
1443 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1444 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1445 return true;
1448 enum {ROTR = 0, ROTL = 1};
1449 enum {ROT_IMM = 0, ROT_REG = 1};
1450 static inline void rx_rot(int ir, int dir, int rd, int src)
1452 switch (dir) {
1453 case ROTL:
1454 if (ir == ROT_IMM) {
1455 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1456 } else {
1457 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1459 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1460 break;
1461 case ROTR:
1462 if (ir == ROT_IMM) {
1463 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1464 } else {
1465 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1467 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1468 break;
1470 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1471 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1474 /* rotl #imm, rd */
1475 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1477 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1478 return true;
1481 /* rotl rs, rd */
1482 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1484 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1485 return true;
1488 /* rotr #imm, rd */
1489 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1491 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1492 return true;
1495 /* rotr rs, rd */
1496 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1498 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1499 return true;
1502 /* revl rs, rd */
1503 static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1505 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1506 return true;
1509 /* revw rs, rd */
1510 static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1512 TCGv tmp;
1513 tmp = tcg_temp_new();
1514 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1515 tcg_gen_shli_i32(tmp, tmp, 8);
1516 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1517 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1518 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1519 return true;
1522 /* conditional branch helper */
1523 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1525 DisasCompare dc;
1526 TCGLabel *t, *done;
1528 switch (cd) {
1529 case 0 ... 13:
1530 dc.temp = tcg_temp_new();
1531 psw_cond(&dc, cd);
1532 t = gen_new_label();
1533 done = gen_new_label();
1534 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1535 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1536 tcg_gen_br(done);
1537 gen_set_label(t);
1538 gen_goto_tb(ctx, 1, ctx->pc + dst);
1539 gen_set_label(done);
1540 break;
1541 case 14:
1542 /* always true case */
1543 gen_goto_tb(ctx, 0, ctx->pc + dst);
1544 break;
1545 case 15:
1546 /* always false case */
1547 /* Nothing do */
1548 break;
1552 /* beq dsp:3 / bne dsp:3 */
1553 /* beq dsp:8 / bne dsp:8 */
1554 /* bc dsp:8 / bnc dsp:8 */
1555 /* bgtu dsp:8 / bleu dsp:8 */
1556 /* bpz dsp:8 / bn dsp:8 */
1557 /* bge dsp:8 / blt dsp:8 */
1558 /* bgt dsp:8 / ble dsp:8 */
1559 /* bo dsp:8 / bno dsp:8 */
1560 /* beq dsp:16 / bne dsp:16 */
1561 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1563 rx_bcnd_main(ctx, a->cd, a->dsp);
1564 return true;
1567 /* bra dsp:3 */
1568 /* bra dsp:8 */
1569 /* bra dsp:16 */
1570 /* bra dsp:24 */
1571 static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1573 rx_bcnd_main(ctx, 14, a->dsp);
1574 return true;
1577 /* bra rs */
1578 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1580 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1581 ctx->base.is_jmp = DISAS_JUMP;
1582 return true;
1585 static inline void rx_save_pc(DisasContext *ctx)
1587 TCGv pc = tcg_constant_i32(ctx->base.pc_next);
1588 push(pc);
1591 /* jmp rs */
1592 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1594 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1595 ctx->base.is_jmp = DISAS_JUMP;
1596 return true;
1599 /* jsr rs */
1600 static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1602 rx_save_pc(ctx);
1603 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1604 ctx->base.is_jmp = DISAS_JUMP;
1605 return true;
1608 /* bsr dsp:16 */
1609 /* bsr dsp:24 */
1610 static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1612 rx_save_pc(ctx);
1613 rx_bcnd_main(ctx, 14, a->dsp);
1614 return true;
1617 /* bsr rs */
1618 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1620 rx_save_pc(ctx);
1621 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1622 ctx->base.is_jmp = DISAS_JUMP;
1623 return true;
1626 /* rts */
1627 static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1629 pop(cpu_pc);
1630 ctx->base.is_jmp = DISAS_JUMP;
1631 return true;
1634 /* nop */
1635 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1637 return true;
1640 /* scmpu */
1641 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1643 gen_helper_scmpu(tcg_env);
1644 return true;
1647 /* smovu */
1648 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1650 gen_helper_smovu(tcg_env);
1651 return true;
1654 /* smovf */
1655 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1657 gen_helper_smovf(tcg_env);
1658 return true;
1661 /* smovb */
1662 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1664 gen_helper_smovb(tcg_env);
1665 return true;
1668 #define STRING(op) \
1669 do { \
1670 TCGv size = tcg_constant_i32(a->sz); \
1671 gen_helper_##op(tcg_env, size); \
1672 } while (0)
1674 /* suntile.<bwl> */
1675 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1677 STRING(suntil);
1678 return true;
1681 /* swhile.<bwl> */
1682 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1684 STRING(swhile);
1685 return true;
1687 /* sstr.<bwl> */
1688 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1690 STRING(sstr);
1691 return true;
1694 /* rmpa.<bwl> */
1695 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1697 STRING(rmpa);
1698 return true;
1701 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1703 TCGv_i64 tmp0, tmp1;
1704 tmp0 = tcg_temp_new_i64();
1705 tmp1 = tcg_temp_new_i64();
1706 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1707 tcg_gen_sari_i64(tmp0, tmp0, 16);
1708 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1709 tcg_gen_sari_i64(tmp1, tmp1, 16);
1710 tcg_gen_mul_i64(ret, tmp0, tmp1);
1711 tcg_gen_shli_i64(ret, ret, 16);
1714 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1716 TCGv_i64 tmp0, tmp1;
1717 tmp0 = tcg_temp_new_i64();
1718 tmp1 = tcg_temp_new_i64();
1719 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1720 tcg_gen_ext16s_i64(tmp0, tmp0);
1721 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1722 tcg_gen_ext16s_i64(tmp1, tmp1);
1723 tcg_gen_mul_i64(ret, tmp0, tmp1);
1724 tcg_gen_shli_i64(ret, ret, 16);
1727 /* mulhi rs,rs2 */
1728 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1730 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1731 return true;
1734 /* mullo rs,rs2 */
1735 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1737 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1738 return true;
1741 /* machi rs,rs2 */
1742 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1744 TCGv_i64 tmp;
1745 tmp = tcg_temp_new_i64();
1746 rx_mul64hi(tmp, a->rs, a->rs2);
1747 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1748 return true;
1751 /* maclo rs,rs2 */
1752 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1754 TCGv_i64 tmp;
1755 tmp = tcg_temp_new_i64();
1756 rx_mul64lo(tmp, a->rs, a->rs2);
1757 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1758 return true;
1761 /* mvfachi rd */
1762 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1764 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1765 return true;
1768 /* mvfacmi rd */
1769 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1771 TCGv_i64 rd64;
1772 rd64 = tcg_temp_new_i64();
1773 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1774 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1775 return true;
1778 /* mvtachi rs */
1779 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1781 TCGv_i64 rs64;
1782 rs64 = tcg_temp_new_i64();
1783 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1784 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1785 return true;
1788 /* mvtaclo rs */
1789 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1791 TCGv_i64 rs64;
1792 rs64 = tcg_temp_new_i64();
1793 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1794 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1795 return true;
1798 /* racw #imm */
1799 static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1801 TCGv imm = tcg_constant_i32(a->imm + 1);
1802 gen_helper_racw(tcg_env, imm);
1803 return true;
1806 /* sat rd */
1807 static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1809 TCGv tmp, z;
1810 tmp = tcg_temp_new();
1811 z = tcg_constant_i32(0);
1812 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1813 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1814 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1815 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1816 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1817 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1818 return true;
1821 /* satr */
1822 static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1824 gen_helper_satr(tcg_env);
1825 return true;
1828 #define cat3(a, b, c) a##b##c
1829 #define FOP(name, op) \
1830 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1831 cat3(arg_, name, _ir) * a) \
1833 TCGv imm = tcg_constant_i32(li(ctx, 0)); \
1834 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1835 cpu_regs[a->rd], imm); \
1836 return true; \
1838 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1839 cat3(arg_, name, _mr) * a) \
1841 TCGv val, mem; \
1842 mem = tcg_temp_new(); \
1843 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1844 gen_helper_##op(cpu_regs[a->rd], tcg_env, \
1845 cpu_regs[a->rd], val); \
1846 return true; \
1849 #define FCONVOP(name, op) \
1850 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1852 TCGv val, mem; \
1853 mem = tcg_temp_new(); \
1854 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1855 gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \
1856 return true; \
1859 FOP(FADD, fadd)
1860 FOP(FSUB, fsub)
1861 FOP(FMUL, fmul)
1862 FOP(FDIV, fdiv)
1864 /* fcmp #imm, rd */
1865 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1867 TCGv imm = tcg_constant_i32(li(ctx, 0));
1868 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm);
1869 return true;
1872 /* fcmp dsp[rs], rd */
1873 /* fcmp rs, rd */
1874 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1876 TCGv val, mem;
1877 mem = tcg_temp_new();
1878 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1879 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val);
1880 return true;
1883 FCONVOP(FTOI, ftoi)
1884 FCONVOP(ROUND, round)
1886 /* itof rs, rd */
1887 /* itof dsp[rs], rd */
1888 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1890 TCGv val, mem;
1891 mem = tcg_temp_new();
1892 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1893 gen_helper_itof(cpu_regs[a->rd], tcg_env, val);
1894 return true;
1897 static void rx_bsetm(TCGv mem, TCGv mask)
1899 TCGv val;
1900 val = tcg_temp_new();
1901 rx_gen_ld(MO_8, val, mem);
1902 tcg_gen_or_i32(val, val, mask);
1903 rx_gen_st(MO_8, val, mem);
1906 static void rx_bclrm(TCGv mem, TCGv mask)
1908 TCGv val;
1909 val = tcg_temp_new();
1910 rx_gen_ld(MO_8, val, mem);
1911 tcg_gen_andc_i32(val, val, mask);
1912 rx_gen_st(MO_8, val, mem);
1915 static void rx_btstm(TCGv mem, TCGv mask)
1917 TCGv val;
1918 val = tcg_temp_new();
1919 rx_gen_ld(MO_8, val, mem);
1920 tcg_gen_and_i32(val, val, mask);
1921 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
1922 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
1925 static void rx_bnotm(TCGv mem, TCGv mask)
1927 TCGv val;
1928 val = tcg_temp_new();
1929 rx_gen_ld(MO_8, val, mem);
1930 tcg_gen_xor_i32(val, val, mask);
1931 rx_gen_st(MO_8, val, mem);
1934 static void rx_bsetr(TCGv reg, TCGv mask)
1936 tcg_gen_or_i32(reg, reg, mask);
1939 static void rx_bclrr(TCGv reg, TCGv mask)
1941 tcg_gen_andc_i32(reg, reg, mask);
1944 static inline void rx_btstr(TCGv reg, TCGv mask)
1946 TCGv t0;
1947 t0 = tcg_temp_new();
1948 tcg_gen_and_i32(t0, reg, mask);
1949 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
1950 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
1953 static inline void rx_bnotr(TCGv reg, TCGv mask)
1955 tcg_gen_xor_i32(reg, reg, mask);
1958 #define BITOP(name, op) \
1959 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
1960 cat3(arg_, name, _im) * a) \
1962 TCGv mask, mem, addr; \
1963 mem = tcg_temp_new(); \
1964 mask = tcg_constant_i32(1 << a->imm); \
1965 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1966 cat3(rx_, op, m)(addr, mask); \
1967 return true; \
1969 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1970 cat3(arg_, name, _ir) * a) \
1972 TCGv mask; \
1973 mask = tcg_constant_i32(1 << a->imm); \
1974 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1975 return true; \
1977 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
1978 cat3(arg_, name, _rr) * a) \
1980 TCGv mask, b; \
1981 mask = tcg_temp_new(); \
1982 b = tcg_temp_new(); \
1983 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
1984 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1985 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
1986 return true; \
1988 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
1989 cat3(arg_, name, _rm) * a) \
1991 TCGv mask, mem, addr, b; \
1992 mask = tcg_temp_new(); \
1993 b = tcg_temp_new(); \
1994 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
1995 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
1996 mem = tcg_temp_new(); \
1997 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
1998 cat3(rx_, op, m)(addr, mask); \
1999 return true; \
2002 BITOP(BSET, bset)
2003 BITOP(BCLR, bclr)
2004 BITOP(BTST, btst)
2005 BITOP(BNOT, bnot)
2007 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2009 TCGv bit;
2010 DisasCompare dc;
2011 dc.temp = tcg_temp_new();
2012 bit = tcg_temp_new();
2013 psw_cond(&dc, cond);
2014 tcg_gen_andi_i32(val, val, ~(1 << pos));
2015 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2016 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2019 /* bmcnd #imm, dsp[rd] */
2020 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2022 TCGv val, mem, addr;
2023 val = tcg_temp_new();
2024 mem = tcg_temp_new();
2025 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2026 rx_gen_ld(MO_8, val, addr);
2027 bmcnd_op(val, a->cd, a->imm);
2028 rx_gen_st(MO_8, val, addr);
2029 return true;
2032 /* bmcond #imm, rd */
2033 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2035 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2036 return true;
2039 enum {
2040 PSW_C = 0,
2041 PSW_Z = 1,
2042 PSW_S = 2,
2043 PSW_O = 3,
2044 PSW_I = 8,
2045 PSW_U = 9,
2048 static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2050 if (cb < 8) {
2051 switch (cb) {
2052 case PSW_C:
2053 tcg_gen_movi_i32(cpu_psw_c, val);
2054 break;
2055 case PSW_Z:
2056 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2057 break;
2058 case PSW_S:
2059 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2060 break;
2061 case PSW_O:
2062 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2063 break;
2064 default:
2065 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb);
2066 break;
2068 } else if (is_privileged(ctx, 0)) {
2069 switch (cb) {
2070 case PSW_I:
2071 tcg_gen_movi_i32(cpu_psw_i, val);
2072 ctx->base.is_jmp = DISAS_UPDATE;
2073 break;
2074 case PSW_U:
2075 if (FIELD_EX32(ctx->tb_flags, PSW, U) != val) {
2076 ctx->tb_flags = FIELD_DP32(ctx->tb_flags, PSW, U, val);
2077 tcg_gen_movi_i32(cpu_psw_u, val);
2078 tcg_gen_mov_i32(val ? cpu_isp : cpu_usp, cpu_sp);
2079 tcg_gen_mov_i32(cpu_sp, val ? cpu_usp : cpu_isp);
2081 break;
2082 default:
2083 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb);
2084 break;
2089 /* clrpsw psw */
2090 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2092 clrsetpsw(ctx, a->cb, 0);
2093 return true;
2096 /* setpsw psw */
2097 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2099 clrsetpsw(ctx, a->cb, 1);
2100 return true;
2103 /* mvtipl #imm */
2104 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2106 if (is_privileged(ctx, 1)) {
2107 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2108 ctx->base.is_jmp = DISAS_UPDATE;
2110 return true;
2113 /* mvtc #imm, rd */
2114 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2116 TCGv imm;
2118 imm = tcg_constant_i32(a->imm);
2119 move_to_cr(ctx, imm, a->cr);
2120 return true;
2123 /* mvtc rs, rd */
2124 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2126 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2127 return true;
2130 /* mvfc rs, rd */
2131 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2133 move_from_cr(ctx, cpu_regs[a->rd], a->cr, ctx->pc);
2134 return true;
2137 /* rtfi */
2138 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2140 TCGv psw;
2141 if (is_privileged(ctx, 1)) {
2142 psw = tcg_temp_new();
2143 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2144 tcg_gen_mov_i32(psw, cpu_bpsw);
2145 gen_helper_set_psw_rte(tcg_env, psw);
2146 ctx->base.is_jmp = DISAS_EXIT;
2148 return true;
2151 /* rte */
2152 static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2154 TCGv psw;
2155 if (is_privileged(ctx, 1)) {
2156 psw = tcg_temp_new();
2157 pop(cpu_pc);
2158 pop(psw);
2159 gen_helper_set_psw_rte(tcg_env, psw);
2160 ctx->base.is_jmp = DISAS_EXIT;
2162 return true;
2165 /* brk */
2166 static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2168 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2169 gen_helper_rxbrk(tcg_env);
2170 ctx->base.is_jmp = DISAS_NORETURN;
2171 return true;
2174 /* int #imm */
2175 static bool trans_INT(DisasContext *ctx, arg_INT *a)
2177 TCGv vec;
2179 tcg_debug_assert(a->imm < 0x100);
2180 vec = tcg_constant_i32(a->imm);
2181 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2182 gen_helper_rxint(tcg_env, vec);
2183 ctx->base.is_jmp = DISAS_NORETURN;
2184 return true;
2187 /* wait */
2188 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2190 if (is_privileged(ctx, 1)) {
2191 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2192 gen_helper_wait(tcg_env);
2194 return true;
2197 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2199 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2200 ctx->env = cpu_env(cs);
2201 ctx->tb_flags = ctx->base.tb->flags;
2204 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2208 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2210 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2212 tcg_gen_insn_start(ctx->base.pc_next);
2215 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2217 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2218 uint32_t insn;
2220 ctx->pc = ctx->base.pc_next;
2221 insn = decode_load(ctx);
2222 if (!decode(ctx, insn)) {
2223 gen_helper_raise_illegal_instruction(tcg_env);
2227 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2229 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2231 switch (ctx->base.is_jmp) {
2232 case DISAS_NEXT:
2233 case DISAS_TOO_MANY:
2234 gen_goto_tb(ctx, 0, dcbase->pc_next);
2235 break;
2236 case DISAS_JUMP:
2237 tcg_gen_lookup_and_goto_ptr();
2238 break;
2239 case DISAS_UPDATE:
2240 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2241 /* fall through */
2242 case DISAS_EXIT:
2243 tcg_gen_exit_tb(NULL, 0);
2244 break;
2245 case DISAS_NORETURN:
2246 break;
2247 default:
2248 g_assert_not_reached();
2252 static const TranslatorOps rx_tr_ops = {
2253 .init_disas_context = rx_tr_init_disas_context,
2254 .tb_start = rx_tr_tb_start,
2255 .insn_start = rx_tr_insn_start,
2256 .translate_insn = rx_tr_translate_insn,
2257 .tb_stop = rx_tr_tb_stop,
2260 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2261 vaddr pc, void *host_pc)
2263 DisasContext dc;
2265 translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
2268 #define ALLOC_REGISTER(sym, name) \
2269 cpu_##sym = tcg_global_mem_new_i32(tcg_env, \
2270 offsetof(CPURXState, sym), name)
2272 void rx_translate_init(void)
2274 static const char * const regnames[NUM_REGS] = {
2275 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2276 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2278 int i;
2280 for (i = 0; i < NUM_REGS; i++) {
2281 cpu_regs[i] = tcg_global_mem_new_i32(tcg_env,
2282 offsetof(CPURXState, regs[i]),
2283 regnames[i]);
2285 ALLOC_REGISTER(pc, "PC");
2286 ALLOC_REGISTER(psw_o, "PSW(O)");
2287 ALLOC_REGISTER(psw_s, "PSW(S)");
2288 ALLOC_REGISTER(psw_z, "PSW(Z)");
2289 ALLOC_REGISTER(psw_c, "PSW(C)");
2290 ALLOC_REGISTER(psw_u, "PSW(U)");
2291 ALLOC_REGISTER(psw_i, "PSW(I)");
2292 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2293 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2294 ALLOC_REGISTER(usp, "USP");
2295 ALLOC_REGISTER(fpsw, "FPSW");
2296 ALLOC_REGISTER(bpsw, "BPSW");
2297 ALLOC_REGISTER(bpc, "BPC");
2298 ALLOC_REGISTER(isp, "ISP");
2299 ALLOC_REGISTER(fintv, "FINTV");
2300 ALLOC_REGISTER(intb, "INTB");
2301 cpu_acc = tcg_global_mem_new_i64(tcg_env,
2302 offsetof(CPURXState, acc), "ACC");