2 * CRIS emulation for qemu: main translation routines.
4 * Copyright (c) 2008 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
24 * The condition code translation is in need of attention.
40 #include "crisv32-decode.h"
41 #include "qemu-common.h"
48 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
50 # define LOG_DIS(...) do { } while (0)
54 #define BUG() (gen_BUG(dc, __FILE__, __LINE__))
55 #define BUG_ON(x) ({if (x) BUG();})
59 /* Used by the decoder. */
60 #define EXTRACT_FIELD(src, start, end) \
61 (((src) >> start) & ((1 << (end - start + 1)) - 1))
63 #define CC_MASK_NZ 0xc
64 #define CC_MASK_NZV 0xe
65 #define CC_MASK_NZVC 0xf
66 #define CC_MASK_RNZV 0x10e
68 static TCGv_ptr cpu_env
;
69 static TCGv cpu_R
[16];
70 static TCGv cpu_PR
[16];
74 static TCGv cc_result
;
79 static TCGv env_btaken
;
80 static TCGv env_btarget
;
83 #include "gen-icount.h"
85 /* This is the state at translation time. */
86 typedef struct DisasContext
{
95 unsigned int zsize
, zzsize
;
104 int cc_size_uptodate
; /* -1 invalid or last written value. */
106 int cc_x_uptodate
; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
107 int flags_uptodate
; /* Wether or not $ccs is uptodate. */
108 int flagx_known
; /* Wether or not flags_x has the x flag known at
112 int clear_x
; /* Clear x after this insn? */
113 int cpustate_changed
;
114 unsigned int tb_flags
; /* tb dependent flags. */
119 #define JMP_INDIRECT 2
120 int jmp
; /* 0=nojmp, 1=direct, 2=indirect. */
125 struct TranslationBlock
*tb
;
126 int singlestep_enabled
;
129 static void gen_BUG(DisasContext
*dc
, const char *file
, int line
)
131 printf ("BUG: pc=%x %s %d\n", dc
->pc
, file
, line
);
132 qemu_log("BUG: pc=%x %s %d\n", dc
->pc
, file
, line
);
133 cpu_abort(dc
->env
, "%s:%d\n", file
, line
);
136 static const char *regnames
[] =
138 "$r0", "$r1", "$r2", "$r3",
139 "$r4", "$r5", "$r6", "$r7",
140 "$r8", "$r9", "$r10", "$r11",
141 "$r12", "$r13", "$sp", "$acr",
143 static const char *pregnames
[] =
145 "$bz", "$vr", "$pid", "$srs",
146 "$wz", "$exs", "$eda", "$mof",
147 "$dz", "$ebp", "$erp", "$srp",
148 "$nrp", "$ccs", "$usp", "$spc",
151 /* We need this table to handle preg-moves with implicit width. */
152 static int preg_sizes
[] = {
163 #define t_gen_mov_TN_env(tn, member) \
164 _t_gen_mov_TN_env((tn), offsetof(CPUState, member))
165 #define t_gen_mov_env_TN(member, tn) \
166 _t_gen_mov_env_TN(offsetof(CPUState, member), (tn))
168 static inline void t_gen_mov_TN_reg(TCGv tn
, int r
)
171 fprintf(stderr
, "wrong register read $r%d\n", r
);
172 tcg_gen_mov_tl(tn
, cpu_R
[r
]);
174 static inline void t_gen_mov_reg_TN(int r
, TCGv tn
)
177 fprintf(stderr
, "wrong register write $r%d\n", r
);
178 tcg_gen_mov_tl(cpu_R
[r
], tn
);
181 static inline void _t_gen_mov_TN_env(TCGv tn
, int offset
)
183 if (offset
> sizeof (CPUState
))
184 fprintf(stderr
, "wrong load from env from off=%d\n", offset
);
185 tcg_gen_ld_tl(tn
, cpu_env
, offset
);
187 static inline void _t_gen_mov_env_TN(int offset
, TCGv tn
)
189 if (offset
> sizeof (CPUState
))
190 fprintf(stderr
, "wrong store to env at off=%d\n", offset
);
191 tcg_gen_st_tl(tn
, cpu_env
, offset
);
194 static inline void t_gen_mov_TN_preg(TCGv tn
, int r
)
197 fprintf(stderr
, "wrong register read $p%d\n", r
);
198 if (r
== PR_BZ
|| r
== PR_WZ
|| r
== PR_DZ
)
199 tcg_gen_mov_tl(tn
, tcg_const_tl(0));
201 tcg_gen_mov_tl(tn
, tcg_const_tl(32));
202 else if (r
== PR_EDA
) {
203 printf("read from EDA!\n");
204 tcg_gen_mov_tl(tn
, cpu_PR
[r
]);
207 tcg_gen_mov_tl(tn
, cpu_PR
[r
]);
209 static inline void t_gen_mov_preg_TN(DisasContext
*dc
, int r
, TCGv tn
)
212 fprintf(stderr
, "wrong register write $p%d\n", r
);
213 if (r
== PR_BZ
|| r
== PR_WZ
|| r
== PR_DZ
)
215 else if (r
== PR_SRS
)
216 tcg_gen_andi_tl(cpu_PR
[r
], tn
, 3);
219 gen_helper_tlb_flush_pid(tn
);
220 if (dc
->tb_flags
& S_FLAG
&& r
== PR_SPC
)
221 gen_helper_spc_write(tn
);
222 else if (r
== PR_CCS
)
223 dc
->cpustate_changed
= 1;
224 tcg_gen_mov_tl(cpu_PR
[r
], tn
);
228 static inline void t_gen_raise_exception(uint32_t index
)
230 TCGv_i32 tmp
= tcg_const_i32(index
);
231 gen_helper_raise_exception(tmp
);
232 tcg_temp_free_i32(tmp
);
235 static void t_gen_lsl(TCGv d
, TCGv a
, TCGv b
)
240 t_31
= tcg_const_tl(31);
241 tcg_gen_shl_tl(d
, a
, b
);
243 tcg_gen_sub_tl(t0
, t_31
, b
);
244 tcg_gen_sar_tl(t0
, t0
, t_31
);
245 tcg_gen_and_tl(t0
, t0
, d
);
246 tcg_gen_xor_tl(d
, d
, t0
);
251 static void t_gen_lsr(TCGv d
, TCGv a
, TCGv b
)
256 t_31
= tcg_temp_new();
257 tcg_gen_shr_tl(d
, a
, b
);
259 tcg_gen_movi_tl(t_31
, 31);
260 tcg_gen_sub_tl(t0
, t_31
, b
);
261 tcg_gen_sar_tl(t0
, t0
, t_31
);
262 tcg_gen_and_tl(t0
, t0
, d
);
263 tcg_gen_xor_tl(d
, d
, t0
);
268 static void t_gen_asr(TCGv d
, TCGv a
, TCGv b
)
273 t_31
= tcg_temp_new();
274 tcg_gen_sar_tl(d
, a
, b
);
276 tcg_gen_movi_tl(t_31
, 31);
277 tcg_gen_sub_tl(t0
, t_31
, b
);
278 tcg_gen_sar_tl(t0
, t0
, t_31
);
279 tcg_gen_or_tl(d
, d
, t0
);
284 /* 64-bit signed mul, lower result in d and upper in d2. */
285 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
289 t0
= tcg_temp_new_i64();
290 t1
= tcg_temp_new_i64();
292 tcg_gen_ext_i32_i64(t0
, a
);
293 tcg_gen_ext_i32_i64(t1
, b
);
294 tcg_gen_mul_i64(t0
, t0
, t1
);
296 tcg_gen_trunc_i64_i32(d
, t0
);
297 tcg_gen_shri_i64(t0
, t0
, 32);
298 tcg_gen_trunc_i64_i32(d2
, t0
);
300 tcg_temp_free_i64(t0
);
301 tcg_temp_free_i64(t1
);
304 /* 64-bit unsigned muls, lower result in d and upper in d2. */
305 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
309 t0
= tcg_temp_new_i64();
310 t1
= tcg_temp_new_i64();
312 tcg_gen_extu_i32_i64(t0
, a
);
313 tcg_gen_extu_i32_i64(t1
, b
);
314 tcg_gen_mul_i64(t0
, t0
, t1
);
316 tcg_gen_trunc_i64_i32(d
, t0
);
317 tcg_gen_shri_i64(t0
, t0
, 32);
318 tcg_gen_trunc_i64_i32(d2
, t0
);
320 tcg_temp_free_i64(t0
);
321 tcg_temp_free_i64(t1
);
324 static void t_gen_cris_dstep(TCGv d
, TCGv a
, TCGv b
)
328 l1
= gen_new_label();
335 tcg_gen_shli_tl(d
, a
, 1);
336 tcg_gen_brcond_tl(TCG_COND_LTU
, d
, b
, l1
);
337 tcg_gen_sub_tl(d
, d
, b
);
341 /* Extended arithmetics on CRIS. */
342 static inline void t_gen_add_flag(TCGv d
, int flag
)
347 t_gen_mov_TN_preg(c
, PR_CCS
);
348 /* Propagate carry into d. */
349 tcg_gen_andi_tl(c
, c
, 1 << flag
);
351 tcg_gen_shri_tl(c
, c
, flag
);
352 tcg_gen_add_tl(d
, d
, c
);
356 static inline void t_gen_addx_carry(DisasContext
*dc
, TCGv d
)
358 if (dc
->flagx_known
) {
363 t_gen_mov_TN_preg(c
, PR_CCS
);
364 /* C flag is already at bit 0. */
365 tcg_gen_andi_tl(c
, c
, C_FLAG
);
366 tcg_gen_add_tl(d
, d
, c
);
374 t_gen_mov_TN_preg(x
, PR_CCS
);
375 tcg_gen_mov_tl(c
, x
);
377 /* Propagate carry into d if X is set. Branch free. */
378 tcg_gen_andi_tl(c
, c
, C_FLAG
);
379 tcg_gen_andi_tl(x
, x
, X_FLAG
);
380 tcg_gen_shri_tl(x
, x
, 4);
382 tcg_gen_and_tl(x
, x
, c
);
383 tcg_gen_add_tl(d
, d
, x
);
389 static inline void t_gen_subx_carry(DisasContext
*dc
, TCGv d
)
391 if (dc
->flagx_known
) {
396 t_gen_mov_TN_preg(c
, PR_CCS
);
397 /* C flag is already at bit 0. */
398 tcg_gen_andi_tl(c
, c
, C_FLAG
);
399 tcg_gen_sub_tl(d
, d
, c
);
407 t_gen_mov_TN_preg(x
, PR_CCS
);
408 tcg_gen_mov_tl(c
, x
);
410 /* Propagate carry into d if X is set. Branch free. */
411 tcg_gen_andi_tl(c
, c
, C_FLAG
);
412 tcg_gen_andi_tl(x
, x
, X_FLAG
);
413 tcg_gen_shri_tl(x
, x
, 4);
415 tcg_gen_and_tl(x
, x
, c
);
416 tcg_gen_sub_tl(d
, d
, x
);
422 /* Swap the two bytes within each half word of the s operand.
423 T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
424 static inline void t_gen_swapb(TCGv d
, TCGv s
)
429 org_s
= tcg_temp_new();
431 /* d and s may refer to the same object. */
432 tcg_gen_mov_tl(org_s
, s
);
433 tcg_gen_shli_tl(t
, org_s
, 8);
434 tcg_gen_andi_tl(d
, t
, 0xff00ff00);
435 tcg_gen_shri_tl(t
, org_s
, 8);
436 tcg_gen_andi_tl(t
, t
, 0x00ff00ff);
437 tcg_gen_or_tl(d
, d
, t
);
439 tcg_temp_free(org_s
);
442 /* Swap the halfwords of the s operand. */
443 static inline void t_gen_swapw(TCGv d
, TCGv s
)
446 /* d and s refer the same object. */
448 tcg_gen_mov_tl(t
, s
);
449 tcg_gen_shli_tl(d
, t
, 16);
450 tcg_gen_shri_tl(t
, t
, 16);
451 tcg_gen_or_tl(d
, d
, t
);
455 /* Reverse the within each byte.
456 T0 = (((T0 << 7) & 0x80808080) |
457 ((T0 << 5) & 0x40404040) |
458 ((T0 << 3) & 0x20202020) |
459 ((T0 << 1) & 0x10101010) |
460 ((T0 >> 1) & 0x08080808) |
461 ((T0 >> 3) & 0x04040404) |
462 ((T0 >> 5) & 0x02020202) |
463 ((T0 >> 7) & 0x01010101));
465 static inline void t_gen_swapr(TCGv d
, TCGv s
)
468 int shift
; /* LSL when positive, LSR when negative. */
483 /* d and s refer the same object. */
485 org_s
= tcg_temp_new();
486 tcg_gen_mov_tl(org_s
, s
);
488 tcg_gen_shli_tl(t
, org_s
, bitrev
[0].shift
);
489 tcg_gen_andi_tl(d
, t
, bitrev
[0].mask
);
490 for (i
= 1; i
< ARRAY_SIZE(bitrev
); i
++) {
491 if (bitrev
[i
].shift
>= 0) {
492 tcg_gen_shli_tl(t
, org_s
, bitrev
[i
].shift
);
494 tcg_gen_shri_tl(t
, org_s
, -bitrev
[i
].shift
);
496 tcg_gen_andi_tl(t
, t
, bitrev
[i
].mask
);
497 tcg_gen_or_tl(d
, d
, t
);
500 tcg_temp_free(org_s
);
503 static void t_gen_cc_jmp(TCGv pc_true
, TCGv pc_false
)
508 l1
= gen_new_label();
509 btaken
= tcg_temp_new();
511 /* Conditional jmp. */
512 tcg_gen_mov_tl(btaken
, env_btaken
);
513 tcg_gen_mov_tl(env_pc
, pc_false
);
514 tcg_gen_brcondi_tl(TCG_COND_EQ
, btaken
, 0, l1
);
515 tcg_gen_mov_tl(env_pc
, pc_true
);
518 tcg_temp_free(btaken
);
521 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
523 TranslationBlock
*tb
;
525 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
527 tcg_gen_movi_tl(env_pc
, dest
);
528 tcg_gen_exit_tb((long)tb
+ n
);
530 tcg_gen_movi_tl(env_pc
, dest
);
535 /* Sign extend at translation time. */
536 static int sign_extend(unsigned int val
, unsigned int width
)
548 static inline void cris_clear_x_flag(DisasContext
*dc
)
550 if (dc
->flagx_known
&& dc
->flags_x
)
551 dc
->flags_uptodate
= 0;
557 static void cris_flush_cc_state(DisasContext
*dc
)
559 if (dc
->cc_size_uptodate
!= dc
->cc_size
) {
560 tcg_gen_movi_tl(cc_size
, dc
->cc_size
);
561 dc
->cc_size_uptodate
= dc
->cc_size
;
563 tcg_gen_movi_tl(cc_op
, dc
->cc_op
);
564 tcg_gen_movi_tl(cc_mask
, dc
->cc_mask
);
567 static void cris_evaluate_flags(DisasContext
*dc
)
569 if (dc
->flags_uptodate
)
572 cris_flush_cc_state(dc
);
577 gen_helper_evaluate_flags_mcp(cpu_PR
[PR_CCS
],
578 cpu_PR
[PR_CCS
], cc_src
,
582 gen_helper_evaluate_flags_muls(cpu_PR
[PR_CCS
],
583 cpu_PR
[PR_CCS
], cc_result
,
587 gen_helper_evaluate_flags_mulu(cpu_PR
[PR_CCS
],
588 cpu_PR
[PR_CCS
], cc_result
,
601 gen_helper_evaluate_flags_move_4(cpu_PR
[PR_CCS
],
602 cpu_PR
[PR_CCS
], cc_result
);
605 gen_helper_evaluate_flags_move_2(cpu_PR
[PR_CCS
],
606 cpu_PR
[PR_CCS
], cc_result
);
609 gen_helper_evaluate_flags();
618 if (dc
->cc_size
== 4)
619 gen_helper_evaluate_flags_sub_4(cpu_PR
[PR_CCS
],
620 cpu_PR
[PR_CCS
], cc_src
, cc_dest
, cc_result
);
622 gen_helper_evaluate_flags();
629 gen_helper_evaluate_flags_alu_4(cpu_PR
[PR_CCS
],
630 cpu_PR
[PR_CCS
], cc_src
, cc_dest
, cc_result
);
633 gen_helper_evaluate_flags();
639 if (dc
->flagx_known
) {
641 tcg_gen_ori_tl(cpu_PR
[PR_CCS
],
642 cpu_PR
[PR_CCS
], X_FLAG
);
644 tcg_gen_andi_tl(cpu_PR
[PR_CCS
],
645 cpu_PR
[PR_CCS
], ~X_FLAG
);
647 dc
->flags_uptodate
= 1;
650 static void cris_cc_mask(DisasContext
*dc
, unsigned int mask
)
659 /* Check if we need to evaluate the condition codes due to
661 ovl
= (dc
->cc_mask
^ mask
) & ~mask
;
663 /* TODO: optimize this case. It trigs all the time. */
664 cris_evaluate_flags (dc
);
670 static void cris_update_cc_op(DisasContext
*dc
, int op
, int size
)
674 dc
->flags_uptodate
= 0;
677 static inline void cris_update_cc_x(DisasContext
*dc
)
679 /* Save the x flag state at the time of the cc snapshot. */
680 if (dc
->flagx_known
) {
681 if (dc
->cc_x_uptodate
== (2 | dc
->flags_x
))
683 tcg_gen_movi_tl(cc_x
, dc
->flags_x
);
684 dc
->cc_x_uptodate
= 2 | dc
->flags_x
;
687 tcg_gen_andi_tl(cc_x
, cpu_PR
[PR_CCS
], X_FLAG
);
688 dc
->cc_x_uptodate
= 1;
692 /* Update cc prior to executing ALU op. Needs source operands untouched. */
693 static void cris_pre_alu_update_cc(DisasContext
*dc
, int op
,
694 TCGv dst
, TCGv src
, int size
)
697 cris_update_cc_op(dc
, op
, size
);
698 tcg_gen_mov_tl(cc_src
, src
);
707 tcg_gen_mov_tl(cc_dest
, dst
);
709 cris_update_cc_x(dc
);
713 /* Update cc after executing ALU op. needs the result. */
714 static inline void cris_update_result(DisasContext
*dc
, TCGv res
)
717 tcg_gen_mov_tl(cc_result
, res
);
720 /* Returns one if the write back stage should execute. */
721 static void cris_alu_op_exec(DisasContext
*dc
, int op
,
722 TCGv dst
, TCGv a
, TCGv b
, int size
)
724 /* Emit the ALU insns. */
728 tcg_gen_add_tl(dst
, a
, b
);
729 /* Extended arithmetics. */
730 t_gen_addx_carry(dc
, dst
);
733 tcg_gen_add_tl(dst
, a
, b
);
734 t_gen_add_flag(dst
, 0); /* C_FLAG. */
737 tcg_gen_add_tl(dst
, a
, b
);
738 t_gen_add_flag(dst
, 8); /* R_FLAG. */
741 tcg_gen_sub_tl(dst
, a
, b
);
742 /* Extended arithmetics. */
743 t_gen_subx_carry(dc
, dst
);
746 tcg_gen_mov_tl(dst
, b
);
749 tcg_gen_or_tl(dst
, a
, b
);
752 tcg_gen_and_tl(dst
, a
, b
);
755 tcg_gen_xor_tl(dst
, a
, b
);
758 t_gen_lsl(dst
, a
, b
);
761 t_gen_lsr(dst
, a
, b
);
764 t_gen_asr(dst
, a
, b
);
767 tcg_gen_neg_tl(dst
, b
);
768 /* Extended arithmetics. */
769 t_gen_subx_carry(dc
, dst
);
772 gen_helper_lz(dst
, b
);
775 t_gen_muls(dst
, cpu_PR
[PR_MOF
], a
, b
);
778 t_gen_mulu(dst
, cpu_PR
[PR_MOF
], a
, b
);
781 t_gen_cris_dstep(dst
, a
, b
);
786 l1
= gen_new_label();
787 tcg_gen_mov_tl(dst
, a
);
788 tcg_gen_brcond_tl(TCG_COND_LEU
, a
, b
, l1
);
789 tcg_gen_mov_tl(dst
, b
);
794 tcg_gen_sub_tl(dst
, a
, b
);
795 /* Extended arithmetics. */
796 t_gen_subx_carry(dc
, dst
);
799 qemu_log("illegal ALU op.\n");
805 tcg_gen_andi_tl(dst
, dst
, 0xff);
807 tcg_gen_andi_tl(dst
, dst
, 0xffff);
810 static void cris_alu(DisasContext
*dc
, int op
,
811 TCGv d
, TCGv op_a
, TCGv op_b
, int size
)
818 if (op
== CC_OP_CMP
) {
819 tmp
= tcg_temp_new();
821 } else if (size
== 4) {
825 tmp
= tcg_temp_new();
828 cris_pre_alu_update_cc(dc
, op
, op_a
, op_b
, size
);
829 cris_alu_op_exec(dc
, op
, tmp
, op_a
, op_b
, size
);
830 cris_update_result(dc
, tmp
);
835 tcg_gen_andi_tl(d
, d
, ~0xff);
837 tcg_gen_andi_tl(d
, d
, ~0xffff);
838 tcg_gen_or_tl(d
, d
, tmp
);
840 if (!TCGV_EQUAL(tmp
, d
))
844 static int arith_cc(DisasContext
*dc
)
848 case CC_OP_ADDC
: return 1;
849 case CC_OP_ADD
: return 1;
850 case CC_OP_SUB
: return 1;
851 case CC_OP_DSTEP
: return 1;
852 case CC_OP_LSL
: return 1;
853 case CC_OP_LSR
: return 1;
854 case CC_OP_ASR
: return 1;
855 case CC_OP_CMP
: return 1;
856 case CC_OP_NEG
: return 1;
857 case CC_OP_OR
: return 1;
858 case CC_OP_AND
: return 1;
859 case CC_OP_XOR
: return 1;
860 case CC_OP_MULU
: return 1;
861 case CC_OP_MULS
: return 1;
869 static void gen_tst_cc (DisasContext
*dc
, TCGv cc
, int cond
)
871 int arith_opt
, move_opt
;
873 /* TODO: optimize more condition codes. */
876 * If the flags are live, we've gotta look into the bits of CCS.
877 * Otherwise, if we just did an arithmetic operation we try to
878 * evaluate the condition code faster.
880 * When this function is done, T0 should be non-zero if the condition
883 arith_opt
= arith_cc(dc
) && !dc
->flags_uptodate
;
884 move_opt
= (dc
->cc_op
== CC_OP_MOVE
);
887 if (arith_opt
|| move_opt
) {
888 /* If cc_result is zero, T0 should be
889 non-zero otherwise T0 should be zero. */
891 l1
= gen_new_label();
892 tcg_gen_movi_tl(cc
, 0);
893 tcg_gen_brcondi_tl(TCG_COND_NE
, cc_result
,
895 tcg_gen_movi_tl(cc
, 1);
899 cris_evaluate_flags(dc
);
901 cpu_PR
[PR_CCS
], Z_FLAG
);
905 if (arith_opt
|| move_opt
)
906 tcg_gen_mov_tl(cc
, cc_result
);
908 cris_evaluate_flags(dc
);
909 tcg_gen_xori_tl(cc
, cpu_PR
[PR_CCS
],
911 tcg_gen_andi_tl(cc
, cc
, Z_FLAG
);
915 cris_evaluate_flags(dc
);
916 tcg_gen_andi_tl(cc
, cpu_PR
[PR_CCS
], C_FLAG
);
919 cris_evaluate_flags(dc
);
920 tcg_gen_xori_tl(cc
, cpu_PR
[PR_CCS
], C_FLAG
);
921 tcg_gen_andi_tl(cc
, cc
, C_FLAG
);
924 cris_evaluate_flags(dc
);
925 tcg_gen_andi_tl(cc
, cpu_PR
[PR_CCS
], V_FLAG
);
928 cris_evaluate_flags(dc
);
929 tcg_gen_xori_tl(cc
, cpu_PR
[PR_CCS
],
931 tcg_gen_andi_tl(cc
, cc
, V_FLAG
);
934 if (arith_opt
|| move_opt
) {
937 if (dc
->cc_size
== 1)
939 else if (dc
->cc_size
== 2)
942 tcg_gen_shri_tl(cc
, cc_result
, bits
);
943 tcg_gen_xori_tl(cc
, cc
, 1);
945 cris_evaluate_flags(dc
);
946 tcg_gen_xori_tl(cc
, cpu_PR
[PR_CCS
],
948 tcg_gen_andi_tl(cc
, cc
, N_FLAG
);
952 if (arith_opt
|| move_opt
) {
955 if (dc
->cc_size
== 1)
957 else if (dc
->cc_size
== 2)
960 tcg_gen_shri_tl(cc
, cc_result
, 31);
963 cris_evaluate_flags(dc
);
964 tcg_gen_andi_tl(cc
, cpu_PR
[PR_CCS
],
969 cris_evaluate_flags(dc
);
970 tcg_gen_andi_tl(cc
, cpu_PR
[PR_CCS
],
974 cris_evaluate_flags(dc
);
978 tmp
= tcg_temp_new();
979 tcg_gen_xori_tl(tmp
, cpu_PR
[PR_CCS
],
981 /* Overlay the C flag on top of the Z. */
982 tcg_gen_shli_tl(cc
, tmp
, 2);
983 tcg_gen_and_tl(cc
, tmp
, cc
);
984 tcg_gen_andi_tl(cc
, cc
, Z_FLAG
);
990 cris_evaluate_flags(dc
);
991 /* Overlay the V flag on top of the N. */
992 tcg_gen_shli_tl(cc
, cpu_PR
[PR_CCS
], 2);
995 tcg_gen_andi_tl(cc
, cc
, N_FLAG
);
996 tcg_gen_xori_tl(cc
, cc
, N_FLAG
);
999 cris_evaluate_flags(dc
);
1000 /* Overlay the V flag on top of the N. */
1001 tcg_gen_shli_tl(cc
, cpu_PR
[PR_CCS
], 2);
1003 cpu_PR
[PR_CCS
], cc
);
1004 tcg_gen_andi_tl(cc
, cc
, N_FLAG
);
1007 cris_evaluate_flags(dc
);
1014 /* To avoid a shift we overlay everything on
1016 tcg_gen_shri_tl(n
, cpu_PR
[PR_CCS
], 2);
1017 tcg_gen_shri_tl(z
, cpu_PR
[PR_CCS
], 1);
1019 tcg_gen_xori_tl(z
, z
, 2);
1021 tcg_gen_xor_tl(n
, n
, cpu_PR
[PR_CCS
]);
1022 tcg_gen_xori_tl(n
, n
, 2);
1023 tcg_gen_and_tl(cc
, z
, n
);
1024 tcg_gen_andi_tl(cc
, cc
, 2);
1031 cris_evaluate_flags(dc
);
1038 /* To avoid a shift we overlay everything on
1040 tcg_gen_shri_tl(n
, cpu_PR
[PR_CCS
], 2);
1041 tcg_gen_shri_tl(z
, cpu_PR
[PR_CCS
], 1);
1043 tcg_gen_xor_tl(n
, n
, cpu_PR
[PR_CCS
]);
1044 tcg_gen_or_tl(cc
, z
, n
);
1045 tcg_gen_andi_tl(cc
, cc
, 2);
1052 cris_evaluate_flags(dc
);
1053 tcg_gen_andi_tl(cc
, cpu_PR
[PR_CCS
], P_FLAG
);
1056 tcg_gen_movi_tl(cc
, 1);
1064 static void cris_store_direct_jmp(DisasContext
*dc
)
1066 /* Store the direct jmp state into the cpu-state. */
1067 if (dc
->jmp
== JMP_DIRECT
) {
1068 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
1069 tcg_gen_movi_tl(env_btaken
, 1);
1073 static void cris_prepare_cc_branch (DisasContext
*dc
,
1074 int offset
, int cond
)
1076 /* This helps us re-schedule the micro-code to insns in delay-slots
1077 before the actual jump. */
1078 dc
->delayed_branch
= 2;
1079 dc
->jmp_pc
= dc
->pc
+ offset
;
1083 dc
->jmp
= JMP_INDIRECT
;
1084 gen_tst_cc (dc
, env_btaken
, cond
);
1085 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
1087 /* Allow chaining. */
1088 dc
->jmp
= JMP_DIRECT
;
1093 /* jumps, when the dest is in a live reg for example. Direct should be set
1094 when the dest addr is constant to allow tb chaining. */
1095 static inline void cris_prepare_jmp (DisasContext
*dc
, unsigned int type
)
1097 /* This helps us re-schedule the micro-code to insns in delay-slots
1098 before the actual jump. */
1099 dc
->delayed_branch
= 2;
1101 if (type
== JMP_INDIRECT
)
1102 tcg_gen_movi_tl(env_btaken
, 1);
1105 static void gen_load64(DisasContext
*dc
, TCGv_i64 dst
, TCGv addr
)
1107 int mem_index
= cpu_mmu_index(dc
->env
);
1109 /* If we get a fault on a delayslot we must keep the jmp state in
1110 the cpu-state to be able to re-execute the jmp. */
1111 if (dc
->delayed_branch
== 1)
1112 cris_store_direct_jmp(dc
);
1114 tcg_gen_qemu_ld64(dst
, addr
, mem_index
);
1117 static void gen_load(DisasContext
*dc
, TCGv dst
, TCGv addr
,
1118 unsigned int size
, int sign
)
1120 int mem_index
= cpu_mmu_index(dc
->env
);
1122 /* If we get a fault on a delayslot we must keep the jmp state in
1123 the cpu-state to be able to re-execute the jmp. */
1124 if (dc
->delayed_branch
== 1)
1125 cris_store_direct_jmp(dc
);
1129 tcg_gen_qemu_ld8s(dst
, addr
, mem_index
);
1131 tcg_gen_qemu_ld8u(dst
, addr
, mem_index
);
1133 else if (size
== 2) {
1135 tcg_gen_qemu_ld16s(dst
, addr
, mem_index
);
1137 tcg_gen_qemu_ld16u(dst
, addr
, mem_index
);
1139 else if (size
== 4) {
1140 tcg_gen_qemu_ld32u(dst
, addr
, mem_index
);
1147 static void gen_store (DisasContext
*dc
, TCGv addr
, TCGv val
,
1150 int mem_index
= cpu_mmu_index(dc
->env
);
1152 /* If we get a fault on a delayslot we must keep the jmp state in
1153 the cpu-state to be able to re-execute the jmp. */
1154 if (dc
->delayed_branch
== 1)
1155 cris_store_direct_jmp(dc
);
1158 /* Conditional writes. We only support the kind were X and P are known
1159 at translation time. */
1160 if (dc
->flagx_known
&& dc
->flags_x
&& (dc
->tb_flags
& P_FLAG
)) {
1162 cris_evaluate_flags(dc
);
1163 tcg_gen_ori_tl(cpu_PR
[PR_CCS
], cpu_PR
[PR_CCS
], C_FLAG
);
1168 tcg_gen_qemu_st8(val
, addr
, mem_index
);
1170 tcg_gen_qemu_st16(val
, addr
, mem_index
);
1172 tcg_gen_qemu_st32(val
, addr
, mem_index
);
1174 if (dc
->flagx_known
&& dc
->flags_x
) {
1175 cris_evaluate_flags(dc
);
1176 tcg_gen_andi_tl(cpu_PR
[PR_CCS
], cpu_PR
[PR_CCS
], ~C_FLAG
);
1180 static inline void t_gen_sext(TCGv d
, TCGv s
, int size
)
1183 tcg_gen_ext8s_i32(d
, s
);
1185 tcg_gen_ext16s_i32(d
, s
);
1186 else if(!TCGV_EQUAL(d
, s
))
1187 tcg_gen_mov_tl(d
, s
);
1190 static inline void t_gen_zext(TCGv d
, TCGv s
, int size
)
1193 tcg_gen_ext8u_i32(d
, s
);
1195 tcg_gen_ext16u_i32(d
, s
);
1196 else if (!TCGV_EQUAL(d
, s
))
1197 tcg_gen_mov_tl(d
, s
);
1201 static char memsize_char(int size
)
1205 case 1: return 'b'; break;
1206 case 2: return 'w'; break;
1207 case 4: return 'd'; break;
1215 static inline unsigned int memsize_z(DisasContext
*dc
)
1217 return dc
->zsize
+ 1;
1220 static inline unsigned int memsize_zz(DisasContext
*dc
)
1231 static inline void do_postinc (DisasContext
*dc
, int size
)
1234 tcg_gen_addi_tl(cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], size
);
1237 static inline void dec_prep_move_r(DisasContext
*dc
, int rs
, int rd
,
1238 int size
, int s_ext
, TCGv dst
)
1241 t_gen_sext(dst
, cpu_R
[rs
], size
);
1243 t_gen_zext(dst
, cpu_R
[rs
], size
);
1246 /* Prepare T0 and T1 for a register alu operation.
1247 s_ext decides if the operand1 should be sign-extended or zero-extended when
1249 static void dec_prep_alu_r(DisasContext
*dc
, int rs
, int rd
,
1250 int size
, int s_ext
, TCGv dst
, TCGv src
)
1252 dec_prep_move_r(dc
, rs
, rd
, size
, s_ext
, src
);
1255 t_gen_sext(dst
, cpu_R
[rd
], size
);
1257 t_gen_zext(dst
, cpu_R
[rd
], size
);
1260 static int dec_prep_move_m(DisasContext
*dc
, int s_ext
, int memsize
,
1263 unsigned int rs
, rd
;
1270 is_imm
= rs
== 15 && dc
->postinc
;
1272 /* Load [$rs] onto T1. */
1274 insn_len
= 2 + memsize
;
1281 imm
= ldsb_code(dc
->pc
+ 2);
1283 imm
= ldsw_code(dc
->pc
+ 2);
1286 imm
= ldub_code(dc
->pc
+ 2);
1288 imm
= lduw_code(dc
->pc
+ 2);
1291 imm
= ldl_code(dc
->pc
+ 2);
1293 tcg_gen_movi_tl(dst
, imm
);
1296 cris_flush_cc_state(dc
);
1297 gen_load(dc
, dst
, cpu_R
[rs
], memsize
, 0);
1299 t_gen_sext(dst
, dst
, memsize
);
1301 t_gen_zext(dst
, dst
, memsize
);
1306 /* Prepare T0 and T1 for a memory + alu operation.
1307 s_ext decides if the operand1 should be sign-extended or zero-extended when
1309 static int dec_prep_alu_m(DisasContext
*dc
, int s_ext
, int memsize
,
1314 insn_len
= dec_prep_move_m(dc
, s_ext
, memsize
, src
);
1315 tcg_gen_mov_tl(dst
, cpu_R
[dc
->op2
]);
1320 static const char *cc_name(int cc
)
1322 static const char *cc_names
[16] = {
1323 "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1324 "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1327 return cc_names
[cc
];
1331 /* Start of insn decoders. */
1333 static unsigned int dec_bccq(DisasContext
*dc
)
1337 uint32_t cond
= dc
->op2
;
1340 offset
= EXTRACT_FIELD (dc
->ir
, 1, 7);
1341 sign
= EXTRACT_FIELD(dc
->ir
, 0, 0);
1344 offset
|= sign
<< 8;
1346 offset
= sign_extend(offset
, 8);
1348 LOG_DIS("b%s %x\n", cc_name(cond
), dc
->pc
+ offset
);
1350 /* op2 holds the condition-code. */
1351 cris_cc_mask(dc
, 0);
1352 cris_prepare_cc_branch (dc
, offset
, cond
);
1355 static unsigned int dec_addoq(DisasContext
*dc
)
1359 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 7);
1360 imm
= sign_extend(dc
->op1
, 7);
1362 LOG_DIS("addoq %d, $r%u\n", imm
, dc
->op2
);
1363 cris_cc_mask(dc
, 0);
1364 /* Fetch register operand, */
1365 tcg_gen_addi_tl(cpu_R
[R_ACR
], cpu_R
[dc
->op2
], imm
);
1369 static unsigned int dec_addq(DisasContext
*dc
)
1371 LOG_DIS("addq %u, $r%u\n", dc
->op1
, dc
->op2
);
1373 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 5);
1375 cris_cc_mask(dc
, CC_MASK_NZVC
);
1377 cris_alu(dc
, CC_OP_ADD
,
1378 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], tcg_const_tl(dc
->op1
), 4);
1381 static unsigned int dec_moveq(DisasContext
*dc
)
1385 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 5);
1386 imm
= sign_extend(dc
->op1
, 5);
1387 LOG_DIS("moveq %d, $r%u\n", imm
, dc
->op2
);
1389 tcg_gen_mov_tl(cpu_R
[dc
->op2
], tcg_const_tl(imm
));
1392 static unsigned int dec_subq(DisasContext
*dc
)
1394 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 5);
1396 LOG_DIS("subq %u, $r%u\n", dc
->op1
, dc
->op2
);
1398 cris_cc_mask(dc
, CC_MASK_NZVC
);
1399 cris_alu(dc
, CC_OP_SUB
,
1400 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], tcg_const_tl(dc
->op1
), 4);
1403 static unsigned int dec_cmpq(DisasContext
*dc
)
1406 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 5);
1407 imm
= sign_extend(dc
->op1
, 5);
1409 LOG_DIS("cmpq %d, $r%d\n", imm
, dc
->op2
);
1410 cris_cc_mask(dc
, CC_MASK_NZVC
);
1412 cris_alu(dc
, CC_OP_CMP
,
1413 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], tcg_const_tl(imm
), 4);
1416 static unsigned int dec_andq(DisasContext
*dc
)
1419 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 5);
1420 imm
= sign_extend(dc
->op1
, 5);
1422 LOG_DIS("andq %d, $r%d\n", imm
, dc
->op2
);
1423 cris_cc_mask(dc
, CC_MASK_NZ
);
1425 cris_alu(dc
, CC_OP_AND
,
1426 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], tcg_const_tl(imm
), 4);
1429 static unsigned int dec_orq(DisasContext
*dc
)
1432 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 5);
1433 imm
= sign_extend(dc
->op1
, 5);
1434 LOG_DIS("orq %d, $r%d\n", imm
, dc
->op2
);
1435 cris_cc_mask(dc
, CC_MASK_NZ
);
1437 cris_alu(dc
, CC_OP_OR
,
1438 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], tcg_const_tl(imm
), 4);
1441 static unsigned int dec_btstq(DisasContext
*dc
)
1443 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 4);
1444 LOG_DIS("btstq %u, $r%d\n", dc
->op1
, dc
->op2
);
1446 cris_cc_mask(dc
, CC_MASK_NZ
);
1447 cris_evaluate_flags(dc
);
1448 gen_helper_btst(cpu_PR
[PR_CCS
], cpu_R
[dc
->op2
],
1449 tcg_const_tl(dc
->op1
), cpu_PR
[PR_CCS
]);
1450 cris_alu(dc
, CC_OP_MOVE
,
1451 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], 4);
1452 cris_update_cc_op(dc
, CC_OP_FLAGS
, 4);
1453 dc
->flags_uptodate
= 1;
1456 static unsigned int dec_asrq(DisasContext
*dc
)
1458 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 4);
1459 LOG_DIS("asrq %u, $r%d\n", dc
->op1
, dc
->op2
);
1460 cris_cc_mask(dc
, CC_MASK_NZ
);
1462 tcg_gen_sari_tl(cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], dc
->op1
);
1463 cris_alu(dc
, CC_OP_MOVE
,
1465 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], 4);
1468 static unsigned int dec_lslq(DisasContext
*dc
)
1470 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 4);
1471 LOG_DIS("lslq %u, $r%d\n", dc
->op1
, dc
->op2
);
1473 cris_cc_mask(dc
, CC_MASK_NZ
);
1475 tcg_gen_shli_tl(cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], dc
->op1
);
1477 cris_alu(dc
, CC_OP_MOVE
,
1479 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], 4);
1482 static unsigned int dec_lsrq(DisasContext
*dc
)
1484 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 4);
1485 LOG_DIS("lsrq %u, $r%d\n", dc
->op1
, dc
->op2
);
1487 cris_cc_mask(dc
, CC_MASK_NZ
);
1489 tcg_gen_shri_tl(cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], dc
->op1
);
1490 cris_alu(dc
, CC_OP_MOVE
,
1492 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], 4);
1496 static unsigned int dec_move_r(DisasContext
*dc
)
1498 int size
= memsize_zz(dc
);
1500 LOG_DIS("move.%c $r%u, $r%u\n",
1501 memsize_char(size
), dc
->op1
, dc
->op2
);
1503 cris_cc_mask(dc
, CC_MASK_NZ
);
1505 dec_prep_move_r(dc
, dc
->op1
, dc
->op2
, size
, 0, cpu_R
[dc
->op2
]);
1506 cris_cc_mask(dc
, CC_MASK_NZ
);
1507 cris_update_cc_op(dc
, CC_OP_MOVE
, 4);
1508 cris_update_cc_x(dc
);
1509 cris_update_result(dc
, cpu_R
[dc
->op2
]);
1514 t0
= tcg_temp_new();
1515 dec_prep_move_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t0
);
1516 cris_alu(dc
, CC_OP_MOVE
,
1518 cpu_R
[dc
->op2
], t0
, size
);
1524 static unsigned int dec_scc_r(DisasContext
*dc
)
1528 LOG_DIS("s%s $r%u\n",
1529 cc_name(cond
), dc
->op1
);
1535 gen_tst_cc (dc
, cpu_R
[dc
->op1
], cond
);
1536 l1
= gen_new_label();
1537 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_R
[dc
->op1
], 0, l1
);
1538 tcg_gen_movi_tl(cpu_R
[dc
->op1
], 1);
1542 tcg_gen_movi_tl(cpu_R
[dc
->op1
], 1);
1544 cris_cc_mask(dc
, 0);
1548 static inline void cris_alu_alloc_temps(DisasContext
*dc
, int size
, TCGv
*t
)
1551 t
[0] = cpu_R
[dc
->op2
];
1552 t
[1] = cpu_R
[dc
->op1
];
1554 t
[0] = tcg_temp_new();
1555 t
[1] = tcg_temp_new();
1559 static inline void cris_alu_free_temps(DisasContext
*dc
, int size
, TCGv
*t
)
1562 tcg_temp_free(t
[0]);
1563 tcg_temp_free(t
[1]);
1567 static unsigned int dec_and_r(DisasContext
*dc
)
1570 int size
= memsize_zz(dc
);
1572 LOG_DIS("and.%c $r%u, $r%u\n",
1573 memsize_char(size
), dc
->op1
, dc
->op2
);
1575 cris_cc_mask(dc
, CC_MASK_NZ
);
1577 cris_alu_alloc_temps(dc
, size
, t
);
1578 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1579 cris_alu(dc
, CC_OP_AND
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1580 cris_alu_free_temps(dc
, size
, t
);
1584 static unsigned int dec_lz_r(DisasContext
*dc
)
1587 LOG_DIS("lz $r%u, $r%u\n",
1589 cris_cc_mask(dc
, CC_MASK_NZ
);
1590 t0
= tcg_temp_new();
1591 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, 4, 0, cpu_R
[dc
->op2
], t0
);
1592 cris_alu(dc
, CC_OP_LZ
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
, 4);
1597 static unsigned int dec_lsl_r(DisasContext
*dc
)
1600 int size
= memsize_zz(dc
);
1602 LOG_DIS("lsl.%c $r%u, $r%u\n",
1603 memsize_char(size
), dc
->op1
, dc
->op2
);
1605 cris_cc_mask(dc
, CC_MASK_NZ
);
1606 cris_alu_alloc_temps(dc
, size
, t
);
1607 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1608 tcg_gen_andi_tl(t
[1], t
[1], 63);
1609 cris_alu(dc
, CC_OP_LSL
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1610 cris_alu_alloc_temps(dc
, size
, t
);
1614 static unsigned int dec_lsr_r(DisasContext
*dc
)
1617 int size
= memsize_zz(dc
);
1619 LOG_DIS("lsr.%c $r%u, $r%u\n",
1620 memsize_char(size
), dc
->op1
, dc
->op2
);
1622 cris_cc_mask(dc
, CC_MASK_NZ
);
1623 cris_alu_alloc_temps(dc
, size
, t
);
1624 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1625 tcg_gen_andi_tl(t
[1], t
[1], 63);
1626 cris_alu(dc
, CC_OP_LSR
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1627 cris_alu_free_temps(dc
, size
, t
);
1631 static unsigned int dec_asr_r(DisasContext
*dc
)
1634 int size
= memsize_zz(dc
);
1636 LOG_DIS("asr.%c $r%u, $r%u\n",
1637 memsize_char(size
), dc
->op1
, dc
->op2
);
1639 cris_cc_mask(dc
, CC_MASK_NZ
);
1640 cris_alu_alloc_temps(dc
, size
, t
);
1641 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 1, t
[0], t
[1]);
1642 tcg_gen_andi_tl(t
[1], t
[1], 63);
1643 cris_alu(dc
, CC_OP_ASR
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1644 cris_alu_free_temps(dc
, size
, t
);
1648 static unsigned int dec_muls_r(DisasContext
*dc
)
1651 int size
= memsize_zz(dc
);
1653 LOG_DIS("muls.%c $r%u, $r%u\n",
1654 memsize_char(size
), dc
->op1
, dc
->op2
);
1655 cris_cc_mask(dc
, CC_MASK_NZV
);
1656 cris_alu_alloc_temps(dc
, size
, t
);
1657 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 1, t
[0], t
[1]);
1659 cris_alu(dc
, CC_OP_MULS
, cpu_R
[dc
->op2
], t
[0], t
[1], 4);
1660 cris_alu_free_temps(dc
, size
, t
);
1664 static unsigned int dec_mulu_r(DisasContext
*dc
)
1667 int size
= memsize_zz(dc
);
1669 LOG_DIS("mulu.%c $r%u, $r%u\n",
1670 memsize_char(size
), dc
->op1
, dc
->op2
);
1671 cris_cc_mask(dc
, CC_MASK_NZV
);
1672 cris_alu_alloc_temps(dc
, size
, t
);
1673 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1675 cris_alu(dc
, CC_OP_MULU
, cpu_R
[dc
->op2
], t
[0], t
[1], 4);
1676 cris_alu_alloc_temps(dc
, size
, t
);
1681 static unsigned int dec_dstep_r(DisasContext
*dc
)
1683 LOG_DIS("dstep $r%u, $r%u\n", dc
->op1
, dc
->op2
);
1684 cris_cc_mask(dc
, CC_MASK_NZ
);
1685 cris_alu(dc
, CC_OP_DSTEP
,
1686 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], cpu_R
[dc
->op1
], 4);
1690 static unsigned int dec_xor_r(DisasContext
*dc
)
1693 int size
= memsize_zz(dc
);
1694 LOG_DIS("xor.%c $r%u, $r%u\n",
1695 memsize_char(size
), dc
->op1
, dc
->op2
);
1696 BUG_ON(size
!= 4); /* xor is dword. */
1697 cris_cc_mask(dc
, CC_MASK_NZ
);
1698 cris_alu_alloc_temps(dc
, size
, t
);
1699 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1701 cris_alu(dc
, CC_OP_XOR
, cpu_R
[dc
->op2
], t
[0], t
[1], 4);
1702 cris_alu_free_temps(dc
, size
, t
);
1706 static unsigned int dec_bound_r(DisasContext
*dc
)
1709 int size
= memsize_zz(dc
);
1710 LOG_DIS("bound.%c $r%u, $r%u\n",
1711 memsize_char(size
), dc
->op1
, dc
->op2
);
1712 cris_cc_mask(dc
, CC_MASK_NZ
);
1713 l0
= tcg_temp_local_new();
1714 dec_prep_move_r(dc
, dc
->op1
, dc
->op2
, size
, 0, l0
);
1715 cris_alu(dc
, CC_OP_BOUND
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], l0
, 4);
1720 static unsigned int dec_cmp_r(DisasContext
*dc
)
1723 int size
= memsize_zz(dc
);
1724 LOG_DIS("cmp.%c $r%u, $r%u\n",
1725 memsize_char(size
), dc
->op1
, dc
->op2
);
1726 cris_cc_mask(dc
, CC_MASK_NZVC
);
1727 cris_alu_alloc_temps(dc
, size
, t
);
1728 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1730 cris_alu(dc
, CC_OP_CMP
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1731 cris_alu_free_temps(dc
, size
, t
);
1735 static unsigned int dec_abs_r(DisasContext
*dc
)
1739 LOG_DIS("abs $r%u, $r%u\n",
1741 cris_cc_mask(dc
, CC_MASK_NZ
);
1743 t0
= tcg_temp_new();
1744 tcg_gen_sari_tl(t0
, cpu_R
[dc
->op1
], 31);
1745 tcg_gen_xor_tl(cpu_R
[dc
->op2
], cpu_R
[dc
->op1
], t0
);
1746 tcg_gen_sub_tl(cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
);
1749 cris_alu(dc
, CC_OP_MOVE
,
1750 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], 4);
1754 static unsigned int dec_add_r(DisasContext
*dc
)
1757 int size
= memsize_zz(dc
);
1758 LOG_DIS("add.%c $r%u, $r%u\n",
1759 memsize_char(size
), dc
->op1
, dc
->op2
);
1760 cris_cc_mask(dc
, CC_MASK_NZVC
);
1761 cris_alu_alloc_temps(dc
, size
, t
);
1762 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1764 cris_alu(dc
, CC_OP_ADD
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1765 cris_alu_free_temps(dc
, size
, t
);
1769 static unsigned int dec_addc_r(DisasContext
*dc
)
1771 LOG_DIS("addc $r%u, $r%u\n",
1773 cris_evaluate_flags(dc
);
1774 /* Set for this insn. */
1775 dc
->flagx_known
= 1;
1776 dc
->flags_x
= X_FLAG
;
1778 cris_cc_mask(dc
, CC_MASK_NZVC
);
1779 cris_alu(dc
, CC_OP_ADDC
,
1780 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], cpu_R
[dc
->op1
], 4);
1784 static unsigned int dec_mcp_r(DisasContext
*dc
)
1786 LOG_DIS("mcp $p%u, $r%u\n",
1788 cris_evaluate_flags(dc
);
1789 cris_cc_mask(dc
, CC_MASK_RNZV
);
1790 cris_alu(dc
, CC_OP_MCP
,
1791 cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], cpu_PR
[dc
->op2
], 4);
1796 static char * swapmode_name(int mode
, char *modename
) {
1799 modename
[i
++] = 'n';
1801 modename
[i
++] = 'w';
1803 modename
[i
++] = 'b';
1805 modename
[i
++] = 'r';
1811 static unsigned int dec_swap_r(DisasContext
*dc
)
1817 LOG_DIS("swap%s $r%u\n",
1818 swapmode_name(dc
->op2
, modename
), dc
->op1
);
1820 cris_cc_mask(dc
, CC_MASK_NZ
);
1821 t0
= tcg_temp_new();
1822 t_gen_mov_TN_reg(t0
, dc
->op1
);
1824 tcg_gen_not_tl(t0
, t0
);
1826 t_gen_swapw(t0
, t0
);
1828 t_gen_swapb(t0
, t0
);
1830 t_gen_swapr(t0
, t0
);
1831 cris_alu(dc
, CC_OP_MOVE
,
1832 cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], t0
, 4);
1837 static unsigned int dec_or_r(DisasContext
*dc
)
1840 int size
= memsize_zz(dc
);
1841 LOG_DIS("or.%c $r%u, $r%u\n",
1842 memsize_char(size
), dc
->op1
, dc
->op2
);
1843 cris_cc_mask(dc
, CC_MASK_NZ
);
1844 cris_alu_alloc_temps(dc
, size
, t
);
1845 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1846 cris_alu(dc
, CC_OP_OR
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1847 cris_alu_free_temps(dc
, size
, t
);
1851 static unsigned int dec_addi_r(DisasContext
*dc
)
1854 LOG_DIS("addi.%c $r%u, $r%u\n",
1855 memsize_char(memsize_zz(dc
)), dc
->op2
, dc
->op1
);
1856 cris_cc_mask(dc
, 0);
1857 t0
= tcg_temp_new();
1858 tcg_gen_shl_tl(t0
, cpu_R
[dc
->op2
], tcg_const_tl(dc
->zzsize
));
1859 tcg_gen_add_tl(cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], t0
);
1864 static unsigned int dec_addi_acr(DisasContext
*dc
)
1867 LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1868 memsize_char(memsize_zz(dc
)), dc
->op2
, dc
->op1
);
1869 cris_cc_mask(dc
, 0);
1870 t0
= tcg_temp_new();
1871 tcg_gen_shl_tl(t0
, cpu_R
[dc
->op2
], tcg_const_tl(dc
->zzsize
));
1872 tcg_gen_add_tl(cpu_R
[R_ACR
], cpu_R
[dc
->op1
], t0
);
1877 static unsigned int dec_neg_r(DisasContext
*dc
)
1880 int size
= memsize_zz(dc
);
1881 LOG_DIS("neg.%c $r%u, $r%u\n",
1882 memsize_char(size
), dc
->op1
, dc
->op2
);
1883 cris_cc_mask(dc
, CC_MASK_NZVC
);
1884 cris_alu_alloc_temps(dc
, size
, t
);
1885 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1887 cris_alu(dc
, CC_OP_NEG
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1888 cris_alu_free_temps(dc
, size
, t
);
1892 static unsigned int dec_btst_r(DisasContext
*dc
)
1894 LOG_DIS("btst $r%u, $r%u\n",
1896 cris_cc_mask(dc
, CC_MASK_NZ
);
1897 cris_evaluate_flags(dc
);
1898 gen_helper_btst(cpu_PR
[PR_CCS
], cpu_R
[dc
->op2
],
1899 cpu_R
[dc
->op1
], cpu_PR
[PR_CCS
]);
1900 cris_alu(dc
, CC_OP_MOVE
, cpu_R
[dc
->op2
],
1901 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], 4);
1902 cris_update_cc_op(dc
, CC_OP_FLAGS
, 4);
1903 dc
->flags_uptodate
= 1;
1907 static unsigned int dec_sub_r(DisasContext
*dc
)
1910 int size
= memsize_zz(dc
);
1911 LOG_DIS("sub.%c $r%u, $r%u\n",
1912 memsize_char(size
), dc
->op1
, dc
->op2
);
1913 cris_cc_mask(dc
, CC_MASK_NZVC
);
1914 cris_alu_alloc_temps(dc
, size
, t
);
1915 dec_prep_alu_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t
[0], t
[1]);
1916 cris_alu(dc
, CC_OP_SUB
, cpu_R
[dc
->op2
], t
[0], t
[1], size
);
1917 cris_alu_free_temps(dc
, size
, t
);
1921 /* Zero extension. From size to dword. */
1922 static unsigned int dec_movu_r(DisasContext
*dc
)
1925 int size
= memsize_z(dc
);
1926 LOG_DIS("movu.%c $r%u, $r%u\n",
1930 cris_cc_mask(dc
, CC_MASK_NZ
);
1931 t0
= tcg_temp_new();
1932 dec_prep_move_r(dc
, dc
->op1
, dc
->op2
, size
, 0, t0
);
1933 cris_alu(dc
, CC_OP_MOVE
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
, 4);
1938 /* Sign extension. From size to dword. */
1939 static unsigned int dec_movs_r(DisasContext
*dc
)
1942 int size
= memsize_z(dc
);
1943 LOG_DIS("movs.%c $r%u, $r%u\n",
1947 cris_cc_mask(dc
, CC_MASK_NZ
);
1948 t0
= tcg_temp_new();
1949 /* Size can only be qi or hi. */
1950 t_gen_sext(t0
, cpu_R
[dc
->op1
], size
);
1951 cris_alu(dc
, CC_OP_MOVE
,
1952 cpu_R
[dc
->op2
], cpu_R
[dc
->op1
], t0
, 4);
1957 /* zero extension. From size to dword. */
1958 static unsigned int dec_addu_r(DisasContext
*dc
)
1961 int size
= memsize_z(dc
);
1962 LOG_DIS("addu.%c $r%u, $r%u\n",
1966 cris_cc_mask(dc
, CC_MASK_NZVC
);
1967 t0
= tcg_temp_new();
1968 /* Size can only be qi or hi. */
1969 t_gen_zext(t0
, cpu_R
[dc
->op1
], size
);
1970 cris_alu(dc
, CC_OP_ADD
,
1971 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
, 4);
1976 /* Sign extension. From size to dword. */
1977 static unsigned int dec_adds_r(DisasContext
*dc
)
1980 int size
= memsize_z(dc
);
1981 LOG_DIS("adds.%c $r%u, $r%u\n",
1985 cris_cc_mask(dc
, CC_MASK_NZVC
);
1986 t0
= tcg_temp_new();
1987 /* Size can only be qi or hi. */
1988 t_gen_sext(t0
, cpu_R
[dc
->op1
], size
);
1989 cris_alu(dc
, CC_OP_ADD
,
1990 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
, 4);
1995 /* Zero extension. From size to dword. */
1996 static unsigned int dec_subu_r(DisasContext
*dc
)
1999 int size
= memsize_z(dc
);
2000 LOG_DIS("subu.%c $r%u, $r%u\n",
2004 cris_cc_mask(dc
, CC_MASK_NZVC
);
2005 t0
= tcg_temp_new();
2006 /* Size can only be qi or hi. */
2007 t_gen_zext(t0
, cpu_R
[dc
->op1
], size
);
2008 cris_alu(dc
, CC_OP_SUB
,
2009 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
, 4);
2014 /* Sign extension. From size to dword. */
2015 static unsigned int dec_subs_r(DisasContext
*dc
)
2018 int size
= memsize_z(dc
);
2019 LOG_DIS("subs.%c $r%u, $r%u\n",
2023 cris_cc_mask(dc
, CC_MASK_NZVC
);
2024 t0
= tcg_temp_new();
2025 /* Size can only be qi or hi. */
2026 t_gen_sext(t0
, cpu_R
[dc
->op1
], size
);
2027 cris_alu(dc
, CC_OP_SUB
,
2028 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
, 4);
2033 static unsigned int dec_setclrf(DisasContext
*dc
)
2036 int set
= (~dc
->opcode
>> 2) & 1;
2039 flags
= (EXTRACT_FIELD(dc
->ir
, 12, 15) << 4)
2040 | EXTRACT_FIELD(dc
->ir
, 0, 3);
2041 if (set
&& flags
== 0) {
2044 } else if (!set
&& (flags
& 0x20)) {
2049 set
? "set" : "clr",
2053 /* User space is not allowed to touch these. Silently ignore. */
2054 if (dc
->tb_flags
& U_FLAG
) {
2055 flags
&= ~(S_FLAG
| I_FLAG
| U_FLAG
);
2058 if (flags
& X_FLAG
) {
2059 dc
->flagx_known
= 1;
2061 dc
->flags_x
= X_FLAG
;
2066 /* Break the TB if the P flag changes. */
2067 if (flags
& P_FLAG
) {
2068 if ((set
&& !(dc
->tb_flags
& P_FLAG
))
2069 || (!set
&& (dc
->tb_flags
& P_FLAG
))) {
2070 tcg_gen_movi_tl(env_pc
, dc
->pc
+ 2);
2071 dc
->is_jmp
= DISAS_UPDATE
;
2072 dc
->cpustate_changed
= 1;
2075 if (flags
& S_FLAG
) {
2076 dc
->cpustate_changed
= 1;
2080 /* Simply decode the flags. */
2081 cris_evaluate_flags (dc
);
2082 cris_update_cc_op(dc
, CC_OP_FLAGS
, 4);
2083 cris_update_cc_x(dc
);
2084 tcg_gen_movi_tl(cc_op
, dc
->cc_op
);
2087 if (!(dc
->tb_flags
& U_FLAG
) && (flags
& U_FLAG
)) {
2088 /* Enter user mode. */
2089 t_gen_mov_env_TN(ksp
, cpu_R
[R_SP
]);
2090 tcg_gen_mov_tl(cpu_R
[R_SP
], cpu_PR
[PR_USP
]);
2091 dc
->cpustate_changed
= 1;
2093 tcg_gen_ori_tl(cpu_PR
[PR_CCS
], cpu_PR
[PR_CCS
], flags
);
2096 tcg_gen_andi_tl(cpu_PR
[PR_CCS
], cpu_PR
[PR_CCS
], ~flags
);
2098 dc
->flags_uptodate
= 1;
2103 static unsigned int dec_move_rs(DisasContext
*dc
)
2105 LOG_DIS("move $r%u, $s%u\n", dc
->op1
, dc
->op2
);
2106 cris_cc_mask(dc
, 0);
2107 gen_helper_movl_sreg_reg(tcg_const_tl(dc
->op2
), tcg_const_tl(dc
->op1
));
2110 static unsigned int dec_move_sr(DisasContext
*dc
)
2112 LOG_DIS("move $s%u, $r%u\n", dc
->op2
, dc
->op1
);
2113 cris_cc_mask(dc
, 0);
2114 gen_helper_movl_reg_sreg(tcg_const_tl(dc
->op1
), tcg_const_tl(dc
->op2
));
2118 static unsigned int dec_move_rp(DisasContext
*dc
)
2121 LOG_DIS("move $r%u, $p%u\n", dc
->op1
, dc
->op2
);
2122 cris_cc_mask(dc
, 0);
2124 t
[0] = tcg_temp_new();
2125 if (dc
->op2
== PR_CCS
) {
2126 cris_evaluate_flags(dc
);
2127 t_gen_mov_TN_reg(t
[0], dc
->op1
);
2128 if (dc
->tb_flags
& U_FLAG
) {
2129 t
[1] = tcg_temp_new();
2130 /* User space is not allowed to touch all flags. */
2131 tcg_gen_andi_tl(t
[0], t
[0], 0x39f);
2132 tcg_gen_andi_tl(t
[1], cpu_PR
[PR_CCS
], ~0x39f);
2133 tcg_gen_or_tl(t
[0], t
[1], t
[0]);
2134 tcg_temp_free(t
[1]);
2138 t_gen_mov_TN_reg(t
[0], dc
->op1
);
2140 t_gen_mov_preg_TN(dc
, dc
->op2
, t
[0]);
2141 if (dc
->op2
== PR_CCS
) {
2142 cris_update_cc_op(dc
, CC_OP_FLAGS
, 4);
2143 dc
->flags_uptodate
= 1;
2145 tcg_temp_free(t
[0]);
2148 static unsigned int dec_move_pr(DisasContext
*dc
)
2151 LOG_DIS("move $p%u, $r%u\n", dc
->op1
, dc
->op2
);
2152 cris_cc_mask(dc
, 0);
2154 if (dc
->op2
== PR_CCS
)
2155 cris_evaluate_flags(dc
);
2157 t0
= tcg_temp_new();
2158 t_gen_mov_TN_preg(t0
, dc
->op2
);
2159 cris_alu(dc
, CC_OP_MOVE
,
2160 cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], t0
, preg_sizes
[dc
->op2
]);
2165 static unsigned int dec_move_mr(DisasContext
*dc
)
2167 int memsize
= memsize_zz(dc
);
2169 LOG_DIS("move.%c [$r%u%s, $r%u\n",
2170 memsize_char(memsize
),
2171 dc
->op1
, dc
->postinc
? "+]" : "]",
2175 insn_len
= dec_prep_move_m(dc
, 0, 4, cpu_R
[dc
->op2
]);
2176 cris_cc_mask(dc
, CC_MASK_NZ
);
2177 cris_update_cc_op(dc
, CC_OP_MOVE
, 4);
2178 cris_update_cc_x(dc
);
2179 cris_update_result(dc
, cpu_R
[dc
->op2
]);
2184 t0
= tcg_temp_new();
2185 insn_len
= dec_prep_move_m(dc
, 0, memsize
, t0
);
2186 cris_cc_mask(dc
, CC_MASK_NZ
);
2187 cris_alu(dc
, CC_OP_MOVE
,
2188 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t0
, memsize
);
2191 do_postinc(dc
, memsize
);
2195 static inline void cris_alu_m_alloc_temps(TCGv
*t
)
2197 t
[0] = tcg_temp_new();
2198 t
[1] = tcg_temp_new();
2201 static inline void cris_alu_m_free_temps(TCGv
*t
)
2203 tcg_temp_free(t
[0]);
2204 tcg_temp_free(t
[1]);
2207 static unsigned int dec_movs_m(DisasContext
*dc
)
2210 int memsize
= memsize_z(dc
);
2212 LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2213 memsize_char(memsize
),
2214 dc
->op1
, dc
->postinc
? "+]" : "]",
2217 cris_alu_m_alloc_temps(t
);
2219 insn_len
= dec_prep_alu_m(dc
, 1, memsize
, t
[0], t
[1]);
2220 cris_cc_mask(dc
, CC_MASK_NZ
);
2221 cris_alu(dc
, CC_OP_MOVE
,
2222 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1], 4);
2223 do_postinc(dc
, memsize
);
2224 cris_alu_m_free_temps(t
);
2228 static unsigned int dec_addu_m(DisasContext
*dc
)
2231 int memsize
= memsize_z(dc
);
2233 LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2234 memsize_char(memsize
),
2235 dc
->op1
, dc
->postinc
? "+]" : "]",
2238 cris_alu_m_alloc_temps(t
);
2240 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2241 cris_cc_mask(dc
, CC_MASK_NZVC
);
2242 cris_alu(dc
, CC_OP_ADD
,
2243 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1], 4);
2244 do_postinc(dc
, memsize
);
2245 cris_alu_m_free_temps(t
);
2249 static unsigned int dec_adds_m(DisasContext
*dc
)
2252 int memsize
= memsize_z(dc
);
2254 LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2255 memsize_char(memsize
),
2256 dc
->op1
, dc
->postinc
? "+]" : "]",
2259 cris_alu_m_alloc_temps(t
);
2261 insn_len
= dec_prep_alu_m(dc
, 1, memsize
, t
[0], t
[1]);
2262 cris_cc_mask(dc
, CC_MASK_NZVC
);
2263 cris_alu(dc
, CC_OP_ADD
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1], 4);
2264 do_postinc(dc
, memsize
);
2265 cris_alu_m_free_temps(t
);
2269 static unsigned int dec_subu_m(DisasContext
*dc
)
2272 int memsize
= memsize_z(dc
);
2274 LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2275 memsize_char(memsize
),
2276 dc
->op1
, dc
->postinc
? "+]" : "]",
2279 cris_alu_m_alloc_temps(t
);
2281 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2282 cris_cc_mask(dc
, CC_MASK_NZVC
);
2283 cris_alu(dc
, CC_OP_SUB
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1], 4);
2284 do_postinc(dc
, memsize
);
2285 cris_alu_m_free_temps(t
);
2289 static unsigned int dec_subs_m(DisasContext
*dc
)
2292 int memsize
= memsize_z(dc
);
2294 LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2295 memsize_char(memsize
),
2296 dc
->op1
, dc
->postinc
? "+]" : "]",
2299 cris_alu_m_alloc_temps(t
);
2301 insn_len
= dec_prep_alu_m(dc
, 1, memsize
, t
[0], t
[1]);
2302 cris_cc_mask(dc
, CC_MASK_NZVC
);
2303 cris_alu(dc
, CC_OP_SUB
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1], 4);
2304 do_postinc(dc
, memsize
);
2305 cris_alu_m_free_temps(t
);
2309 static unsigned int dec_movu_m(DisasContext
*dc
)
2312 int memsize
= memsize_z(dc
);
2315 LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2316 memsize_char(memsize
),
2317 dc
->op1
, dc
->postinc
? "+]" : "]",
2320 cris_alu_m_alloc_temps(t
);
2321 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2322 cris_cc_mask(dc
, CC_MASK_NZ
);
2323 cris_alu(dc
, CC_OP_MOVE
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1], 4);
2324 do_postinc(dc
, memsize
);
2325 cris_alu_m_free_temps(t
);
2329 static unsigned int dec_cmpu_m(DisasContext
*dc
)
2332 int memsize
= memsize_z(dc
);
2334 LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2335 memsize_char(memsize
),
2336 dc
->op1
, dc
->postinc
? "+]" : "]",
2339 cris_alu_m_alloc_temps(t
);
2340 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2341 cris_cc_mask(dc
, CC_MASK_NZVC
);
2342 cris_alu(dc
, CC_OP_CMP
, cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1], 4);
2343 do_postinc(dc
, memsize
);
2344 cris_alu_m_free_temps(t
);
2348 static unsigned int dec_cmps_m(DisasContext
*dc
)
2351 int memsize
= memsize_z(dc
);
2353 LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2354 memsize_char(memsize
),
2355 dc
->op1
, dc
->postinc
? "+]" : "]",
2358 cris_alu_m_alloc_temps(t
);
2359 insn_len
= dec_prep_alu_m(dc
, 1, memsize
, t
[0], t
[1]);
2360 cris_cc_mask(dc
, CC_MASK_NZVC
);
2361 cris_alu(dc
, CC_OP_CMP
,
2362 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1],
2364 do_postinc(dc
, memsize
);
2365 cris_alu_m_free_temps(t
);
2369 static unsigned int dec_cmp_m(DisasContext
*dc
)
2372 int memsize
= memsize_zz(dc
);
2374 LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2375 memsize_char(memsize
),
2376 dc
->op1
, dc
->postinc
? "+]" : "]",
2379 cris_alu_m_alloc_temps(t
);
2380 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2381 cris_cc_mask(dc
, CC_MASK_NZVC
);
2382 cris_alu(dc
, CC_OP_CMP
,
2383 cpu_R
[dc
->op2
], cpu_R
[dc
->op2
], t
[1],
2385 do_postinc(dc
, memsize
);
2386 cris_alu_m_free_temps(t
);
2390 static unsigned int dec_test_m(DisasContext
*dc
)
2393 int memsize
= memsize_zz(dc
);
2395 LOG_DIS("test.%d [$r%u%s] op2=%x\n",
2396 memsize_char(memsize
),
2397 dc
->op1
, dc
->postinc
? "+]" : "]",
2400 cris_evaluate_flags(dc
);
2402 cris_alu_m_alloc_temps(t
);
2403 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2404 cris_cc_mask(dc
, CC_MASK_NZ
);
2405 tcg_gen_andi_tl(cpu_PR
[PR_CCS
], cpu_PR
[PR_CCS
], ~3);
2407 cris_alu(dc
, CC_OP_CMP
,
2408 cpu_R
[dc
->op2
], t
[1], tcg_const_tl(0), memsize_zz(dc
));
2409 do_postinc(dc
, memsize
);
2410 cris_alu_m_free_temps(t
);
2414 static unsigned int dec_and_m(DisasContext
*dc
)
2417 int memsize
= memsize_zz(dc
);
2419 LOG_DIS("and.%d [$r%u%s, $r%u\n",
2420 memsize_char(memsize
),
2421 dc
->op1
, dc
->postinc
? "+]" : "]",
2424 cris_alu_m_alloc_temps(t
);
2425 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2426 cris_cc_mask(dc
, CC_MASK_NZ
);
2427 cris_alu(dc
, CC_OP_AND
, cpu_R
[dc
->op2
], t
[0], t
[1], memsize_zz(dc
));
2428 do_postinc(dc
, memsize
);
2429 cris_alu_m_free_temps(t
);
2433 static unsigned int dec_add_m(DisasContext
*dc
)
2436 int memsize
= memsize_zz(dc
);
2438 LOG_DIS("add.%d [$r%u%s, $r%u\n",
2439 memsize_char(memsize
),
2440 dc
->op1
, dc
->postinc
? "+]" : "]",
2443 cris_alu_m_alloc_temps(t
);
2444 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2445 cris_cc_mask(dc
, CC_MASK_NZVC
);
2446 cris_alu(dc
, CC_OP_ADD
,
2447 cpu_R
[dc
->op2
], t
[0], t
[1], memsize_zz(dc
));
2448 do_postinc(dc
, memsize
);
2449 cris_alu_m_free_temps(t
);
2453 static unsigned int dec_addo_m(DisasContext
*dc
)
2456 int memsize
= memsize_zz(dc
);
2458 LOG_DIS("add.%d [$r%u%s, $r%u\n",
2459 memsize_char(memsize
),
2460 dc
->op1
, dc
->postinc
? "+]" : "]",
2463 cris_alu_m_alloc_temps(t
);
2464 insn_len
= dec_prep_alu_m(dc
, 1, memsize
, t
[0], t
[1]);
2465 cris_cc_mask(dc
, 0);
2466 cris_alu(dc
, CC_OP_ADD
, cpu_R
[R_ACR
], t
[0], t
[1], 4);
2467 do_postinc(dc
, memsize
);
2468 cris_alu_m_free_temps(t
);
2472 static unsigned int dec_bound_m(DisasContext
*dc
)
2475 int memsize
= memsize_zz(dc
);
2477 LOG_DIS("bound.%d [$r%u%s, $r%u\n",
2478 memsize_char(memsize
),
2479 dc
->op1
, dc
->postinc
? "+]" : "]",
2482 l
[0] = tcg_temp_local_new();
2483 l
[1] = tcg_temp_local_new();
2484 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, l
[0], l
[1]);
2485 cris_cc_mask(dc
, CC_MASK_NZ
);
2486 cris_alu(dc
, CC_OP_BOUND
, cpu_R
[dc
->op2
], l
[0], l
[1], 4);
2487 do_postinc(dc
, memsize
);
2488 tcg_temp_free(l
[0]);
2489 tcg_temp_free(l
[1]);
2493 static unsigned int dec_addc_mr(DisasContext
*dc
)
2497 LOG_DIS("addc [$r%u%s, $r%u\n",
2498 dc
->op1
, dc
->postinc
? "+]" : "]",
2501 cris_evaluate_flags(dc
);
2503 /* Set for this insn. */
2504 dc
->flagx_known
= 1;
2505 dc
->flags_x
= X_FLAG
;
2507 cris_alu_m_alloc_temps(t
);
2508 insn_len
= dec_prep_alu_m(dc
, 0, 4, t
[0], t
[1]);
2509 cris_cc_mask(dc
, CC_MASK_NZVC
);
2510 cris_alu(dc
, CC_OP_ADDC
, cpu_R
[dc
->op2
], t
[0], t
[1], 4);
2512 cris_alu_m_free_temps(t
);
2516 static unsigned int dec_sub_m(DisasContext
*dc
)
2519 int memsize
= memsize_zz(dc
);
2521 LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2522 memsize_char(memsize
),
2523 dc
->op1
, dc
->postinc
? "+]" : "]",
2524 dc
->op2
, dc
->ir
, dc
->zzsize
);
2526 cris_alu_m_alloc_temps(t
);
2527 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2528 cris_cc_mask(dc
, CC_MASK_NZVC
);
2529 cris_alu(dc
, CC_OP_SUB
, cpu_R
[dc
->op2
], t
[0], t
[1], memsize
);
2530 do_postinc(dc
, memsize
);
2531 cris_alu_m_free_temps(t
);
2535 static unsigned int dec_or_m(DisasContext
*dc
)
2538 int memsize
= memsize_zz(dc
);
2540 LOG_DIS("or.%d [$r%u%s, $r%u pc=%x\n",
2541 memsize_char(memsize
),
2542 dc
->op1
, dc
->postinc
? "+]" : "]",
2545 cris_alu_m_alloc_temps(t
);
2546 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2547 cris_cc_mask(dc
, CC_MASK_NZ
);
2548 cris_alu(dc
, CC_OP_OR
,
2549 cpu_R
[dc
->op2
], t
[0], t
[1], memsize_zz(dc
));
2550 do_postinc(dc
, memsize
);
2551 cris_alu_m_free_temps(t
);
2555 static unsigned int dec_move_mp(DisasContext
*dc
)
2558 int memsize
= memsize_zz(dc
);
2561 LOG_DIS("move.%c [$r%u%s, $p%u\n",
2562 memsize_char(memsize
),
2564 dc
->postinc
? "+]" : "]",
2567 cris_alu_m_alloc_temps(t
);
2568 insn_len
= dec_prep_alu_m(dc
, 0, memsize
, t
[0], t
[1]);
2569 cris_cc_mask(dc
, 0);
2570 if (dc
->op2
== PR_CCS
) {
2571 cris_evaluate_flags(dc
);
2572 if (dc
->tb_flags
& U_FLAG
) {
2573 /* User space is not allowed to touch all flags. */
2574 tcg_gen_andi_tl(t
[1], t
[1], 0x39f);
2575 tcg_gen_andi_tl(t
[0], cpu_PR
[PR_CCS
], ~0x39f);
2576 tcg_gen_or_tl(t
[1], t
[0], t
[1]);
2580 t_gen_mov_preg_TN(dc
, dc
->op2
, t
[1]);
2582 do_postinc(dc
, memsize
);
2583 cris_alu_m_free_temps(t
);
2587 static unsigned int dec_move_pm(DisasContext
*dc
)
2592 memsize
= preg_sizes
[dc
->op2
];
2594 LOG_DIS("move.%c $p%u, [$r%u%s\n",
2595 memsize_char(memsize
),
2596 dc
->op2
, dc
->op1
, dc
->postinc
? "+]" : "]");
2598 /* prepare store. Address in T0, value in T1. */
2599 if (dc
->op2
== PR_CCS
)
2600 cris_evaluate_flags(dc
);
2601 t0
= tcg_temp_new();
2602 t_gen_mov_TN_preg(t0
, dc
->op2
);
2603 cris_flush_cc_state(dc
);
2604 gen_store(dc
, cpu_R
[dc
->op1
], t0
, memsize
);
2607 cris_cc_mask(dc
, 0);
2609 tcg_gen_addi_tl(cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], memsize
);
2613 static unsigned int dec_movem_mr(DisasContext
*dc
)
2619 int nr
= dc
->op2
+ 1;
2621 LOG_DIS("movem [$r%u%s, $r%u\n", dc
->op1
,
2622 dc
->postinc
? "+]" : "]", dc
->op2
);
2624 addr
= tcg_temp_new();
2625 /* There are probably better ways of doing this. */
2626 cris_flush_cc_state(dc
);
2627 for (i
= 0; i
< (nr
>> 1); i
++) {
2628 tmp
[i
] = tcg_temp_new_i64();
2629 tcg_gen_addi_tl(addr
, cpu_R
[dc
->op1
], i
* 8);
2630 gen_load64(dc
, tmp
[i
], addr
);
2633 tmp32
= tcg_temp_new_i32();
2634 tcg_gen_addi_tl(addr
, cpu_R
[dc
->op1
], i
* 8);
2635 gen_load(dc
, tmp32
, addr
, 4, 0);
2638 tcg_temp_free(addr
);
2640 for (i
= 0; i
< (nr
>> 1); i
++) {
2641 tcg_gen_trunc_i64_i32(cpu_R
[i
* 2], tmp
[i
]);
2642 tcg_gen_shri_i64(tmp
[i
], tmp
[i
], 32);
2643 tcg_gen_trunc_i64_i32(cpu_R
[i
* 2 + 1], tmp
[i
]);
2644 tcg_temp_free_i64(tmp
[i
]);
2647 tcg_gen_mov_tl(cpu_R
[dc
->op2
], tmp32
);
2648 tcg_temp_free(tmp32
);
2651 /* writeback the updated pointer value. */
2653 tcg_gen_addi_tl(cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], nr
* 4);
2655 /* gen_load might want to evaluate the previous insns flags. */
2656 cris_cc_mask(dc
, 0);
2660 static unsigned int dec_movem_rm(DisasContext
*dc
)
2666 LOG_DIS("movem $r%u, [$r%u%s\n", dc
->op2
, dc
->op1
,
2667 dc
->postinc
? "+]" : "]");
2669 cris_flush_cc_state(dc
);
2671 tmp
= tcg_temp_new();
2672 addr
= tcg_temp_new();
2673 tcg_gen_movi_tl(tmp
, 4);
2674 tcg_gen_mov_tl(addr
, cpu_R
[dc
->op1
]);
2675 for (i
= 0; i
<= dc
->op2
; i
++) {
2676 /* Displace addr. */
2677 /* Perform the store. */
2678 gen_store(dc
, addr
, cpu_R
[i
], 4);
2679 tcg_gen_add_tl(addr
, addr
, tmp
);
2682 tcg_gen_mov_tl(cpu_R
[dc
->op1
], addr
);
2683 cris_cc_mask(dc
, 0);
2685 tcg_temp_free(addr
);
2689 static unsigned int dec_move_rm(DisasContext
*dc
)
2693 memsize
= memsize_zz(dc
);
2695 LOG_DIS("move.%d $r%u, [$r%u]\n",
2696 memsize
, dc
->op2
, dc
->op1
);
2698 /* prepare store. */
2699 cris_flush_cc_state(dc
);
2700 gen_store(dc
, cpu_R
[dc
->op1
], cpu_R
[dc
->op2
], memsize
);
2703 tcg_gen_addi_tl(cpu_R
[dc
->op1
], cpu_R
[dc
->op1
], memsize
);
2704 cris_cc_mask(dc
, 0);
2708 static unsigned int dec_lapcq(DisasContext
*dc
)
2710 LOG_DIS("lapcq %x, $r%u\n",
2711 dc
->pc
+ dc
->op1
*2, dc
->op2
);
2712 cris_cc_mask(dc
, 0);
2713 tcg_gen_movi_tl(cpu_R
[dc
->op2
], dc
->pc
+ dc
->op1
* 2);
2717 static unsigned int dec_lapc_im(DisasContext
*dc
)
2725 cris_cc_mask(dc
, 0);
2726 imm
= ldl_code(dc
->pc
+ 2);
2727 LOG_DIS("lapc 0x%x, $r%u\n", imm
+ dc
->pc
, dc
->op2
);
2731 t_gen_mov_reg_TN(rd
, tcg_const_tl(pc
));
2735 /* Jump to special reg. */
2736 static unsigned int dec_jump_p(DisasContext
*dc
)
2738 LOG_DIS("jump $p%u\n", dc
->op2
);
2740 if (dc
->op2
== PR_CCS
)
2741 cris_evaluate_flags(dc
);
2742 t_gen_mov_TN_preg(env_btarget
, dc
->op2
);
2743 /* rete will often have low bit set to indicate delayslot. */
2744 tcg_gen_andi_tl(env_btarget
, env_btarget
, ~1);
2745 cris_cc_mask(dc
, 0);
2746 cris_prepare_jmp(dc
, JMP_INDIRECT
);
2750 /* Jump and save. */
2751 static unsigned int dec_jas_r(DisasContext
*dc
)
2753 LOG_DIS("jas $r%u, $p%u\n", dc
->op1
, dc
->op2
);
2754 cris_cc_mask(dc
, 0);
2755 /* Store the return address in Pd. */
2756 tcg_gen_mov_tl(env_btarget
, cpu_R
[dc
->op1
]);
2759 t_gen_mov_preg_TN(dc
, dc
->op2
, tcg_const_tl(dc
->pc
+ 4));
2761 cris_prepare_jmp(dc
, JMP_INDIRECT
);
2765 static unsigned int dec_jas_im(DisasContext
*dc
)
2769 imm
= ldl_code(dc
->pc
+ 2);
2771 LOG_DIS("jas 0x%x\n", imm
);
2772 cris_cc_mask(dc
, 0);
2773 /* Store the return address in Pd. */
2774 t_gen_mov_preg_TN(dc
, dc
->op2
, tcg_const_tl(dc
->pc
+ 8));
2777 cris_prepare_jmp(dc
, JMP_DIRECT
);
2781 static unsigned int dec_jasc_im(DisasContext
*dc
)
2785 imm
= ldl_code(dc
->pc
+ 2);
2787 LOG_DIS("jasc 0x%x\n", imm
);
2788 cris_cc_mask(dc
, 0);
2789 /* Store the return address in Pd. */
2790 t_gen_mov_preg_TN(dc
, dc
->op2
, tcg_const_tl(dc
->pc
+ 8 + 4));
2793 cris_prepare_jmp(dc
, JMP_DIRECT
);
2797 static unsigned int dec_jasc_r(DisasContext
*dc
)
2799 LOG_DIS("jasc_r $r%u, $p%u\n", dc
->op1
, dc
->op2
);
2800 cris_cc_mask(dc
, 0);
2801 /* Store the return address in Pd. */
2802 tcg_gen_mov_tl(env_btarget
, cpu_R
[dc
->op1
]);
2803 t_gen_mov_preg_TN(dc
, dc
->op2
, tcg_const_tl(dc
->pc
+ 4 + 4));
2804 cris_prepare_jmp(dc
, JMP_INDIRECT
);
2808 static unsigned int dec_bcc_im(DisasContext
*dc
)
2811 uint32_t cond
= dc
->op2
;
2813 offset
= ldsw_code(dc
->pc
+ 2);
2815 LOG_DIS("b%s %d pc=%x dst=%x\n",
2816 cc_name(cond
), offset
,
2817 dc
->pc
, dc
->pc
+ offset
);
2819 cris_cc_mask(dc
, 0);
2820 /* op2 holds the condition-code. */
2821 cris_prepare_cc_branch (dc
, offset
, cond
);
2825 static unsigned int dec_bas_im(DisasContext
*dc
)
2830 simm
= ldl_code(dc
->pc
+ 2);
2832 LOG_DIS("bas 0x%x, $p%u\n", dc
->pc
+ simm
, dc
->op2
);
2833 cris_cc_mask(dc
, 0);
2834 /* Store the return address in Pd. */
2835 t_gen_mov_preg_TN(dc
, dc
->op2
, tcg_const_tl(dc
->pc
+ 8));
2837 dc
->jmp_pc
= dc
->pc
+ simm
;
2838 cris_prepare_jmp(dc
, JMP_DIRECT
);
2842 static unsigned int dec_basc_im(DisasContext
*dc
)
2845 simm
= ldl_code(dc
->pc
+ 2);
2847 LOG_DIS("basc 0x%x, $p%u\n", dc
->pc
+ simm
, dc
->op2
);
2848 cris_cc_mask(dc
, 0);
2849 /* Store the return address in Pd. */
2850 t_gen_mov_preg_TN(dc
, dc
->op2
, tcg_const_tl(dc
->pc
+ 12));
2852 dc
->jmp_pc
= dc
->pc
+ simm
;
2853 cris_prepare_jmp(dc
, JMP_DIRECT
);
2857 static unsigned int dec_rfe_etc(DisasContext
*dc
)
2859 cris_cc_mask(dc
, 0);
2861 if (dc
->op2
== 15) {
2862 t_gen_mov_env_TN(halted
, tcg_const_tl(1));
2863 tcg_gen_movi_tl(env_pc
, dc
->pc
+ 2);
2864 t_gen_raise_exception(EXCP_HLT
);
2868 switch (dc
->op2
& 7) {
2872 cris_evaluate_flags(dc
);
2874 dc
->is_jmp
= DISAS_UPDATE
;
2879 cris_evaluate_flags(dc
);
2881 dc
->is_jmp
= DISAS_UPDATE
;
2884 LOG_DIS("break %d\n", dc
->op1
);
2885 cris_evaluate_flags (dc
);
2887 tcg_gen_movi_tl(env_pc
, dc
->pc
+ 2);
2889 /* Breaks start at 16 in the exception vector. */
2890 t_gen_mov_env_TN(trap_vector
,
2891 tcg_const_tl(dc
->op1
+ 16));
2892 t_gen_raise_exception(EXCP_BREAK
);
2893 dc
->is_jmp
= DISAS_UPDATE
;
2896 printf ("op2=%x\n", dc
->op2
);
2904 static unsigned int dec_ftag_fidx_d_m(DisasContext
*dc
)
2909 static unsigned int dec_ftag_fidx_i_m(DisasContext
*dc
)
2914 static unsigned int dec_null(DisasContext
*dc
)
2916 printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2917 dc
->pc
, dc
->opcode
, dc
->op1
, dc
->op2
);
2923 static struct decoder_info
{
2928 unsigned int (*dec
)(DisasContext
*dc
);
2930 /* Order matters here. */
2931 {DEC_MOVEQ
, dec_moveq
},
2932 {DEC_BTSTQ
, dec_btstq
},
2933 {DEC_CMPQ
, dec_cmpq
},
2934 {DEC_ADDOQ
, dec_addoq
},
2935 {DEC_ADDQ
, dec_addq
},
2936 {DEC_SUBQ
, dec_subq
},
2937 {DEC_ANDQ
, dec_andq
},
2939 {DEC_ASRQ
, dec_asrq
},
2940 {DEC_LSLQ
, dec_lslq
},
2941 {DEC_LSRQ
, dec_lsrq
},
2942 {DEC_BCCQ
, dec_bccq
},
2944 {DEC_BCC_IM
, dec_bcc_im
},
2945 {DEC_JAS_IM
, dec_jas_im
},
2946 {DEC_JAS_R
, dec_jas_r
},
2947 {DEC_JASC_IM
, dec_jasc_im
},
2948 {DEC_JASC_R
, dec_jasc_r
},
2949 {DEC_BAS_IM
, dec_bas_im
},
2950 {DEC_BASC_IM
, dec_basc_im
},
2951 {DEC_JUMP_P
, dec_jump_p
},
2952 {DEC_LAPC_IM
, dec_lapc_im
},
2953 {DEC_LAPCQ
, dec_lapcq
},
2955 {DEC_RFE_ETC
, dec_rfe_etc
},
2956 {DEC_ADDC_MR
, dec_addc_mr
},
2958 {DEC_MOVE_MP
, dec_move_mp
},
2959 {DEC_MOVE_PM
, dec_move_pm
},
2960 {DEC_MOVEM_MR
, dec_movem_mr
},
2961 {DEC_MOVEM_RM
, dec_movem_rm
},
2962 {DEC_MOVE_PR
, dec_move_pr
},
2963 {DEC_SCC_R
, dec_scc_r
},
2964 {DEC_SETF
, dec_setclrf
},
2965 {DEC_CLEARF
, dec_setclrf
},
2967 {DEC_MOVE_SR
, dec_move_sr
},
2968 {DEC_MOVE_RP
, dec_move_rp
},
2969 {DEC_SWAP_R
, dec_swap_r
},
2970 {DEC_ABS_R
, dec_abs_r
},
2971 {DEC_LZ_R
, dec_lz_r
},
2972 {DEC_MOVE_RS
, dec_move_rs
},
2973 {DEC_BTST_R
, dec_btst_r
},
2974 {DEC_ADDC_R
, dec_addc_r
},
2976 {DEC_DSTEP_R
, dec_dstep_r
},
2977 {DEC_XOR_R
, dec_xor_r
},
2978 {DEC_MCP_R
, dec_mcp_r
},
2979 {DEC_CMP_R
, dec_cmp_r
},
2981 {DEC_ADDI_R
, dec_addi_r
},
2982 {DEC_ADDI_ACR
, dec_addi_acr
},
2984 {DEC_ADD_R
, dec_add_r
},
2985 {DEC_SUB_R
, dec_sub_r
},
2987 {DEC_ADDU_R
, dec_addu_r
},
2988 {DEC_ADDS_R
, dec_adds_r
},
2989 {DEC_SUBU_R
, dec_subu_r
},
2990 {DEC_SUBS_R
, dec_subs_r
},
2991 {DEC_LSL_R
, dec_lsl_r
},
2993 {DEC_AND_R
, dec_and_r
},
2994 {DEC_OR_R
, dec_or_r
},
2995 {DEC_BOUND_R
, dec_bound_r
},
2996 {DEC_ASR_R
, dec_asr_r
},
2997 {DEC_LSR_R
, dec_lsr_r
},
2999 {DEC_MOVU_R
, dec_movu_r
},
3000 {DEC_MOVS_R
, dec_movs_r
},
3001 {DEC_NEG_R
, dec_neg_r
},
3002 {DEC_MOVE_R
, dec_move_r
},
3004 {DEC_FTAG_FIDX_I_M
, dec_ftag_fidx_i_m
},
3005 {DEC_FTAG_FIDX_D_M
, dec_ftag_fidx_d_m
},
3007 {DEC_MULS_R
, dec_muls_r
},
3008 {DEC_MULU_R
, dec_mulu_r
},
3010 {DEC_ADDU_M
, dec_addu_m
},
3011 {DEC_ADDS_M
, dec_adds_m
},
3012 {DEC_SUBU_M
, dec_subu_m
},
3013 {DEC_SUBS_M
, dec_subs_m
},
3015 {DEC_CMPU_M
, dec_cmpu_m
},
3016 {DEC_CMPS_M
, dec_cmps_m
},
3017 {DEC_MOVU_M
, dec_movu_m
},
3018 {DEC_MOVS_M
, dec_movs_m
},
3020 {DEC_CMP_M
, dec_cmp_m
},
3021 {DEC_ADDO_M
, dec_addo_m
},
3022 {DEC_BOUND_M
, dec_bound_m
},
3023 {DEC_ADD_M
, dec_add_m
},
3024 {DEC_SUB_M
, dec_sub_m
},
3025 {DEC_AND_M
, dec_and_m
},
3026 {DEC_OR_M
, dec_or_m
},
3027 {DEC_MOVE_RM
, dec_move_rm
},
3028 {DEC_TEST_M
, dec_test_m
},
3029 {DEC_MOVE_MR
, dec_move_mr
},
3034 static inline unsigned int
3035 cris_decoder(DisasContext
*dc
)
3037 unsigned int insn_len
= 2;
3040 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
)))
3041 tcg_gen_debug_insn_start(dc
->pc
);
3043 /* Load a halfword onto the instruction register. */
3044 dc
->ir
= lduw_code(dc
->pc
);
3046 /* Now decode it. */
3047 dc
->opcode
= EXTRACT_FIELD(dc
->ir
, 4, 11);
3048 dc
->op1
= EXTRACT_FIELD(dc
->ir
, 0, 3);
3049 dc
->op2
= EXTRACT_FIELD(dc
->ir
, 12, 15);
3050 dc
->zsize
= EXTRACT_FIELD(dc
->ir
, 4, 4);
3051 dc
->zzsize
= EXTRACT_FIELD(dc
->ir
, 4, 5);
3052 dc
->postinc
= EXTRACT_FIELD(dc
->ir
, 10, 10);
3054 /* Large switch for all insns. */
3055 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
3056 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
)
3058 insn_len
= decinfo
[i
].dec(dc
);
3063 #if !defined(CONFIG_USER_ONLY)
3064 /* Single-stepping ? */
3065 if (dc
->tb_flags
& S_FLAG
) {
3068 l1
= gen_new_label();
3069 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_PR
[PR_SPC
], dc
->pc
, l1
);
3070 /* We treat SPC as a break with an odd trap vector. */
3071 cris_evaluate_flags (dc
);
3072 t_gen_mov_env_TN(trap_vector
, tcg_const_tl(3));
3073 tcg_gen_movi_tl(env_pc
, dc
->pc
+ insn_len
);
3074 tcg_gen_movi_tl(cpu_PR
[PR_SPC
], dc
->pc
+ insn_len
);
3075 t_gen_raise_exception(EXCP_BREAK
);
3082 static void check_breakpoint(CPUState
*env
, DisasContext
*dc
)
3086 if (unlikely(!TAILQ_EMPTY(&env
->breakpoints
))) {
3087 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3088 if (bp
->pc
== dc
->pc
) {
3089 cris_evaluate_flags (dc
);
3090 tcg_gen_movi_tl(env_pc
, dc
->pc
);
3091 t_gen_raise_exception(EXCP_DEBUG
);
3092 dc
->is_jmp
= DISAS_UPDATE
;
3100 * Delay slots on QEMU/CRIS.
3102 * If an exception hits on a delayslot, the core will let ERP (the Exception
3103 * Return Pointer) point to the branch (the previous) insn and set the lsb to
3104 * to give SW a hint that the exception actually hit on the dslot.
3106 * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3107 * the core and any jmp to an odd addresses will mask off that lsb. It is
3108 * simply there to let sw know there was an exception on a dslot.
3110 * When the software returns from an exception, the branch will re-execute.
3111 * On QEMU care needs to be taken when a branch+delayslot sequence is broken
3112 * and the branch and delayslot dont share pages.
3114 * The TB contaning the branch insn will set up env->btarget and evaluate
3115 * env->btaken. When the translation loop exits we will note that the branch
3116 * sequence is broken and let env->dslot be the size of the branch insn (those
3119 * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3120 * set). It will also expect to have env->dslot setup with the size of the
3121 * delay slot so that env->pc - env->dslot point to the branch insn. This TB
3122 * will execute the dslot and take the branch, either to btarget or just one
3125 * When exceptions occur, we check for env->dslot in do_interrupt to detect
3126 * broken branch sequences and setup $erp accordingly (i.e let it point to the
3127 * branch and set lsb). Then env->dslot gets cleared so that the exception
3128 * handler can enter. When returning from exceptions (jump $erp) the lsb gets
3129 * masked off and we will reexecute the branch insn.
3133 /* generate intermediate code for basic block 'tb'. */
3135 gen_intermediate_code_internal(CPUState
*env
, TranslationBlock
*tb
,
3138 uint16_t *gen_opc_end
;
3140 unsigned int insn_len
;
3142 struct DisasContext ctx
;
3143 struct DisasContext
*dc
= &ctx
;
3144 uint32_t next_page_start
;
3149 qemu_log_try_set_file(stderr
);
3151 /* Odd PC indicates that branch is rexecuting due to exception in the
3152 * delayslot, like in real hw.
3154 pc_start
= tb
->pc
& ~1;
3158 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3160 dc
->is_jmp
= DISAS_NEXT
;
3163 dc
->singlestep_enabled
= env
->singlestep_enabled
;
3164 dc
->flags_uptodate
= 1;
3165 dc
->flagx_known
= 1;
3166 dc
->flags_x
= tb
->flags
& X_FLAG
;
3167 dc
->cc_x_uptodate
= 0;
3171 cris_update_cc_op(dc
, CC_OP_FLAGS
, 4);
3172 dc
->cc_size_uptodate
= -1;
3174 /* Decode TB flags. */
3175 dc
->tb_flags
= tb
->flags
& (S_FLAG
| P_FLAG
| U_FLAG
| X_FLAG
);
3176 dc
->delayed_branch
= !!(tb
->flags
& 7);
3177 if (dc
->delayed_branch
)
3178 dc
->jmp
= JMP_INDIRECT
;
3180 dc
->jmp
= JMP_NOJMP
;
3182 dc
->cpustate_changed
= 0;
3184 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3186 "srch=%d pc=%x %x flg=%llx bt=%x ds=%u ccs=%x\n"
3192 search_pc
, dc
->pc
, dc
->ppc
,
3193 (unsigned long long)tb
->flags
,
3194 env
->btarget
, (unsigned)tb
->flags
& 7,
3196 env
->pregs
[PR_PID
], env
->pregs
[PR_USP
],
3197 env
->regs
[0], env
->regs
[1], env
->regs
[2], env
->regs
[3],
3198 env
->regs
[4], env
->regs
[5], env
->regs
[6], env
->regs
[7],
3199 env
->regs
[8], env
->regs
[9],
3200 env
->regs
[10], env
->regs
[11],
3201 env
->regs
[12], env
->regs
[13],
3202 env
->regs
[14], env
->regs
[15]);
3203 qemu_log("--------------\n");
3204 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3207 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
3210 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3212 max_insns
= CF_COUNT_MASK
;
3217 check_breakpoint(env
, dc
);
3220 j
= gen_opc_ptr
- gen_opc_buf
;
3224 gen_opc_instr_start
[lj
++] = 0;
3226 if (dc
->delayed_branch
== 1)
3227 gen_opc_pc
[lj
] = dc
->ppc
| 1;
3229 gen_opc_pc
[lj
] = dc
->pc
;
3230 gen_opc_instr_start
[lj
] = 1;
3231 gen_opc_icount
[lj
] = num_insns
;
3235 LOG_DIS("%8.8x:\t", dc
->pc
);
3237 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3241 insn_len
= cris_decoder(dc
);
3245 cris_clear_x_flag(dc
);
3248 /* Check for delayed branches here. If we do it before
3249 actually generating any host code, the simulator will just
3250 loop doing nothing for on this program location. */
3251 if (dc
->delayed_branch
) {
3252 dc
->delayed_branch
--;
3253 if (dc
->delayed_branch
== 0)
3256 t_gen_mov_env_TN(dslot
,
3258 if (dc
->jmp
== JMP_DIRECT
) {
3259 dc
->is_jmp
= DISAS_NEXT
;
3261 t_gen_cc_jmp(env_btarget
,
3262 tcg_const_tl(dc
->pc
));
3263 dc
->is_jmp
= DISAS_JUMP
;
3269 /* If we are rexecuting a branch due to exceptions on
3270 delay slots dont break. */
3271 if (!(tb
->pc
& 1) && env
->singlestep_enabled
)
3273 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
3274 && gen_opc_ptr
< gen_opc_end
3276 && (dc
->pc
< next_page_start
)
3277 && num_insns
< max_insns
);
3280 if (dc
->jmp
== JMP_DIRECT
&& !dc
->delayed_branch
)
3283 if (tb
->cflags
& CF_LAST_IO
)
3285 /* Force an update if the per-tb cpu state has changed. */
3286 if (dc
->is_jmp
== DISAS_NEXT
3287 && (dc
->cpustate_changed
|| !dc
->flagx_known
3288 || (dc
->flags_x
!= (tb
->flags
& X_FLAG
)))) {
3289 dc
->is_jmp
= DISAS_UPDATE
;
3290 tcg_gen_movi_tl(env_pc
, npc
);
3292 /* Broken branch+delayslot sequence. */
3293 if (dc
->delayed_branch
== 1) {
3294 /* Set env->dslot to the size of the branch insn. */
3295 t_gen_mov_env_TN(dslot
, tcg_const_tl(dc
->pc
- dc
->ppc
));
3296 cris_store_direct_jmp(dc
);
3299 cris_evaluate_flags (dc
);
3301 if (unlikely(env
->singlestep_enabled
)) {
3302 if (dc
->is_jmp
== DISAS_NEXT
)
3303 tcg_gen_movi_tl(env_pc
, npc
);
3304 t_gen_raise_exception(EXCP_DEBUG
);
3306 switch(dc
->is_jmp
) {
3308 gen_goto_tb(dc
, 1, npc
);
3313 /* indicate that the hash table must be used
3314 to find the next TB */
3319 /* nothing more to generate */
3323 gen_icount_end(tb
, num_insns
);
3324 *gen_opc_ptr
= INDEX_op_end
;
3326 j
= gen_opc_ptr
- gen_opc_buf
;
3329 gen_opc_instr_start
[lj
++] = 0;
3331 tb
->size
= dc
->pc
- pc_start
;
3332 tb
->icount
= num_insns
;
3337 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3338 log_target_disas(pc_start
, dc
->pc
- pc_start
, 0);
3339 qemu_log("\nisize=%d osize=%zd\n",
3340 dc
->pc
- pc_start
, gen_opc_ptr
- gen_opc_buf
);
3346 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3348 gen_intermediate_code_internal(env
, tb
, 0);
3351 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3353 gen_intermediate_code_internal(env
, tb
, 1);
3356 void cpu_dump_state (CPUState
*env
, FILE *f
,
3357 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
3366 cpu_fprintf(f
, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3367 "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3368 env
->pc
, env
->pregs
[PR_CCS
], env
->btaken
, env
->btarget
,
3370 env
->cc_src
, env
->cc_dest
, env
->cc_result
, env
->cc_mask
);
3373 for (i
= 0; i
< 16; i
++) {
3374 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
3375 if ((i
+ 1) % 4 == 0)
3376 cpu_fprintf(f
, "\n");
3378 cpu_fprintf(f
, "\nspecial regs:\n");
3379 for (i
= 0; i
< 16; i
++) {
3380 cpu_fprintf(f
, "p%2.2d=%8.8x ", i
, env
->pregs
[i
]);
3381 if ((i
+ 1) % 4 == 0)
3382 cpu_fprintf(f
, "\n");
3384 srs
= env
->pregs
[PR_SRS
];
3385 cpu_fprintf(f
, "\nsupport function regs bank %x:\n", srs
);
3387 for (i
= 0; i
< 16; i
++) {
3388 cpu_fprintf(f
, "s%2.2d=%8.8x ",
3389 i
, env
->sregs
[srs
][i
]);
3390 if ((i
+ 1) % 4 == 0)
3391 cpu_fprintf(f
, "\n");
3394 cpu_fprintf(f
, "\n\n");
3398 CPUCRISState
*cpu_cris_init (const char *cpu_model
)
3401 static int tcg_initialized
= 0;
3404 env
= qemu_mallocz(sizeof(CPUCRISState
));
3409 if (tcg_initialized
)
3412 tcg_initialized
= 1;
3414 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
3415 cc_x
= tcg_global_mem_new(TCG_AREG0
,
3416 offsetof(CPUState
, cc_x
), "cc_x");
3417 cc_src
= tcg_global_mem_new(TCG_AREG0
,
3418 offsetof(CPUState
, cc_src
), "cc_src");
3419 cc_dest
= tcg_global_mem_new(TCG_AREG0
,
3420 offsetof(CPUState
, cc_dest
),
3422 cc_result
= tcg_global_mem_new(TCG_AREG0
,
3423 offsetof(CPUState
, cc_result
),
3425 cc_op
= tcg_global_mem_new(TCG_AREG0
,
3426 offsetof(CPUState
, cc_op
), "cc_op");
3427 cc_size
= tcg_global_mem_new(TCG_AREG0
,
3428 offsetof(CPUState
, cc_size
),
3430 cc_mask
= tcg_global_mem_new(TCG_AREG0
,
3431 offsetof(CPUState
, cc_mask
),
3434 env_pc
= tcg_global_mem_new(TCG_AREG0
,
3435 offsetof(CPUState
, pc
),
3437 env_btarget
= tcg_global_mem_new(TCG_AREG0
,
3438 offsetof(CPUState
, btarget
),
3440 env_btaken
= tcg_global_mem_new(TCG_AREG0
,
3441 offsetof(CPUState
, btaken
),
3443 for (i
= 0; i
< 16; i
++) {
3444 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
3445 offsetof(CPUState
, regs
[i
]),
3448 for (i
= 0; i
< 16; i
++) {
3449 cpu_PR
[i
] = tcg_global_mem_new(TCG_AREG0
,
3450 offsetof(CPUState
, pregs
[i
]),
3454 #define GEN_HELPER 2
3460 void cpu_reset (CPUCRISState
*env
)
3462 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
3463 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
3464 log_cpu_state(env
, 0);
3467 memset(env
, 0, offsetof(CPUCRISState
, breakpoints
));
3470 env
->pregs
[PR_VR
] = 32;
3471 #if defined(CONFIG_USER_ONLY)
3472 /* start in user mode with interrupts enabled. */
3473 env
->pregs
[PR_CCS
] |= U_FLAG
| I_FLAG
;
3476 env
->pregs
[PR_CCS
] = 0;
3480 void gen_pc_load(CPUState
*env
, struct TranslationBlock
*tb
,
3481 unsigned long searched_pc
, int pc_pos
, void *puc
)
3483 env
->pc
= gen_opc_pc
[pc_pos
];