Merge branch 'upstream-merge' into next
[qemu-dev-zwu.git] / target-cris / translate.c
blobe2607d64c01db88ec42db79adb1d5c1dbad00637
1 /*
2 * CRIS emulation for qemu: main translation routines.
4 * Copyright (c) 2008 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * FIXME:
23 * The condition code translation is in need of attention.
26 #include <stdarg.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <inttypes.h>
32 #include "cpu.h"
33 #include "exec-all.h"
34 #include "disas.h"
35 #include "tcg-op.h"
36 #include "helper.h"
37 #include "mmu.h"
38 #include "crisv32-decode.h"
39 #include "qemu-common.h"
41 #define GEN_HELPER 1
42 #include "helper.h"
44 #define DISAS_CRIS 0
45 #if DISAS_CRIS
46 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 #else
48 # define LOG_DIS(...) do { } while (0)
49 #endif
51 #define D(x)
52 #define BUG() (gen_BUG(dc, __FILE__, __LINE__))
53 #define BUG_ON(x) ({if (x) BUG();})
55 #define DISAS_SWI 5
57 /* Used by the decoder. */
58 #define EXTRACT_FIELD(src, start, end) \
59 (((src) >> start) & ((1 << (end - start + 1)) - 1))
61 #define CC_MASK_NZ 0xc
62 #define CC_MASK_NZV 0xe
63 #define CC_MASK_NZVC 0xf
64 #define CC_MASK_RNZV 0x10e
66 static TCGv_ptr cpu_env;
67 static TCGv cpu_R[16];
68 static TCGv cpu_PR[16];
69 static TCGv cc_x;
70 static TCGv cc_src;
71 static TCGv cc_dest;
72 static TCGv cc_result;
73 static TCGv cc_op;
74 static TCGv cc_size;
75 static TCGv cc_mask;
77 static TCGv env_btaken;
78 static TCGv env_btarget;
79 static TCGv env_pc;
81 #include "gen-icount.h"
83 /* This is the state at translation time. */
84 typedef struct DisasContext {
85 CPUState *env;
86 target_ulong pc, ppc;
88 /* Decoder. */
89 unsigned int (*decoder)(struct DisasContext *dc);
90 uint32_t ir;
91 uint32_t opcode;
92 unsigned int op1;
93 unsigned int op2;
94 unsigned int zsize, zzsize;
95 unsigned int mode;
96 unsigned int postinc;
98 unsigned int size;
99 unsigned int src;
100 unsigned int dst;
101 unsigned int cond;
103 int update_cc;
104 int cc_op;
105 int cc_size;
106 uint32_t cc_mask;
108 int cc_size_uptodate; /* -1 invalid or last written value. */
110 int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
111 int flags_uptodate; /* Wether or not $ccs is uptodate. */
112 int flagx_known; /* Wether or not flags_x has the x flag known at
113 translation time. */
114 int flags_x;
116 int clear_x; /* Clear x after this insn? */
117 int clear_prefix; /* Clear prefix after this insn? */
118 int clear_locked_irq; /* Clear the irq lockout. */
119 int cpustate_changed;
120 unsigned int tb_flags; /* tb dependent flags. */
121 int is_jmp;
123 #define JMP_NOJMP 0
124 #define JMP_DIRECT 1
125 #define JMP_DIRECT_CC 2
126 #define JMP_INDIRECT 3
127 int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
128 uint32_t jmp_pc;
130 int delayed_branch;
132 struct TranslationBlock *tb;
133 int singlestep_enabled;
134 } DisasContext;
136 static void gen_BUG(DisasContext *dc, const char *file, int line)
138 printf ("BUG: pc=%x %s %d\n", dc->pc, file, line);
139 qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
140 cpu_abort(dc->env, "%s:%d\n", file, line);
143 static const char *regnames[] =
145 "$r0", "$r1", "$r2", "$r3",
146 "$r4", "$r5", "$r6", "$r7",
147 "$r8", "$r9", "$r10", "$r11",
148 "$r12", "$r13", "$sp", "$acr",
150 static const char *pregnames[] =
152 "$bz", "$vr", "$pid", "$srs",
153 "$wz", "$exs", "$eda", "$mof",
154 "$dz", "$ebp", "$erp", "$srp",
155 "$nrp", "$ccs", "$usp", "$spc",
158 /* We need this table to handle preg-moves with implicit width. */
159 static int preg_sizes[] = {
160 1, /* bz. */
161 1, /* vr. */
162 4, /* pid. */
163 1, /* srs. */
164 2, /* wz. */
165 4, 4, 4,
166 4, 4, 4, 4,
167 4, 4, 4, 4,
170 #define t_gen_mov_TN_env(tn, member) \
171 _t_gen_mov_TN_env((tn), offsetof(CPUState, member))
172 #define t_gen_mov_env_TN(member, tn) \
173 _t_gen_mov_env_TN(offsetof(CPUState, member), (tn))
175 static inline void t_gen_mov_TN_reg(TCGv tn, int r)
177 if (r < 0 || r > 15)
178 fprintf(stderr, "wrong register read $r%d\n", r);
179 tcg_gen_mov_tl(tn, cpu_R[r]);
181 static inline void t_gen_mov_reg_TN(int r, TCGv tn)
183 if (r < 0 || r > 15)
184 fprintf(stderr, "wrong register write $r%d\n", r);
185 tcg_gen_mov_tl(cpu_R[r], tn);
188 static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
190 if (offset > sizeof (CPUState))
191 fprintf(stderr, "wrong load from env from off=%d\n", offset);
192 tcg_gen_ld_tl(tn, cpu_env, offset);
194 static inline void _t_gen_mov_env_TN(int offset, TCGv tn)
196 if (offset > sizeof (CPUState))
197 fprintf(stderr, "wrong store to env at off=%d\n", offset);
198 tcg_gen_st_tl(tn, cpu_env, offset);
201 static inline void t_gen_mov_TN_preg(TCGv tn, int r)
203 if (r < 0 || r > 15)
204 fprintf(stderr, "wrong register read $p%d\n", r);
205 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
206 tcg_gen_mov_tl(tn, tcg_const_tl(0));
207 else if (r == PR_VR)
208 tcg_gen_mov_tl(tn, tcg_const_tl(32));
209 else
210 tcg_gen_mov_tl(tn, cpu_PR[r]);
212 static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
214 if (r < 0 || r > 15)
215 fprintf(stderr, "wrong register write $p%d\n", r);
216 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
217 return;
218 else if (r == PR_SRS)
219 tcg_gen_andi_tl(cpu_PR[r], tn, 3);
220 else {
221 if (r == PR_PID)
222 gen_helper_tlb_flush_pid(tn);
223 if (dc->tb_flags & S_FLAG && r == PR_SPC)
224 gen_helper_spc_write(tn);
225 else if (r == PR_CCS)
226 dc->cpustate_changed = 1;
227 tcg_gen_mov_tl(cpu_PR[r], tn);
231 /* Sign extend at translation time. */
232 static int sign_extend(unsigned int val, unsigned int width)
234 int sval;
236 /* LSL. */
237 val <<= 31 - width;
238 sval = val;
239 /* ASR. */
240 sval >>= 31 - width;
241 return sval;
244 static int cris_fetch(DisasContext *dc, uint32_t addr,
245 unsigned int size, unsigned int sign)
247 int r;
249 switch (size) {
250 case 4:
252 r = ldl_code(addr);
253 break;
255 case 2:
257 if (sign) {
258 r = ldsw_code(addr);
259 } else {
260 r = lduw_code(addr);
262 break;
264 case 1:
266 if (sign) {
267 r = ldsb_code(addr);
268 } else {
269 r = ldub_code(addr);
271 break;
273 default:
274 cpu_abort(dc->env, "Invalid fetch size %d\n", size);
275 break;
277 return r;
280 static void cris_lock_irq(DisasContext *dc)
282 dc->clear_locked_irq = 0;
283 t_gen_mov_env_TN(locked_irq, tcg_const_tl(1));
286 static inline void t_gen_raise_exception(uint32_t index)
288 TCGv_i32 tmp = tcg_const_i32(index);
289 gen_helper_raise_exception(tmp);
290 tcg_temp_free_i32(tmp);
293 static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
295 TCGv t0, t_31;
297 t0 = tcg_temp_new();
298 t_31 = tcg_const_tl(31);
299 tcg_gen_shl_tl(d, a, b);
301 tcg_gen_sub_tl(t0, t_31, b);
302 tcg_gen_sar_tl(t0, t0, t_31);
303 tcg_gen_and_tl(t0, t0, d);
304 tcg_gen_xor_tl(d, d, t0);
305 tcg_temp_free(t0);
306 tcg_temp_free(t_31);
309 static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
311 TCGv t0, t_31;
313 t0 = tcg_temp_new();
314 t_31 = tcg_temp_new();
315 tcg_gen_shr_tl(d, a, b);
317 tcg_gen_movi_tl(t_31, 31);
318 tcg_gen_sub_tl(t0, t_31, b);
319 tcg_gen_sar_tl(t0, t0, t_31);
320 tcg_gen_and_tl(t0, t0, d);
321 tcg_gen_xor_tl(d, d, t0);
322 tcg_temp_free(t0);
323 tcg_temp_free(t_31);
326 static void t_gen_asr(TCGv d, TCGv a, TCGv b)
328 TCGv t0, t_31;
330 t0 = tcg_temp_new();
331 t_31 = tcg_temp_new();
332 tcg_gen_sar_tl(d, a, b);
334 tcg_gen_movi_tl(t_31, 31);
335 tcg_gen_sub_tl(t0, t_31, b);
336 tcg_gen_sar_tl(t0, t0, t_31);
337 tcg_gen_or_tl(d, d, t0);
338 tcg_temp_free(t0);
339 tcg_temp_free(t_31);
342 /* 64-bit signed mul, lower result in d and upper in d2. */
343 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
345 TCGv_i64 t0, t1;
347 t0 = tcg_temp_new_i64();
348 t1 = tcg_temp_new_i64();
350 tcg_gen_ext_i32_i64(t0, a);
351 tcg_gen_ext_i32_i64(t1, b);
352 tcg_gen_mul_i64(t0, t0, t1);
354 tcg_gen_trunc_i64_i32(d, t0);
355 tcg_gen_shri_i64(t0, t0, 32);
356 tcg_gen_trunc_i64_i32(d2, t0);
358 tcg_temp_free_i64(t0);
359 tcg_temp_free_i64(t1);
362 /* 64-bit unsigned muls, lower result in d and upper in d2. */
363 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
365 TCGv_i64 t0, t1;
367 t0 = tcg_temp_new_i64();
368 t1 = tcg_temp_new_i64();
370 tcg_gen_extu_i32_i64(t0, a);
371 tcg_gen_extu_i32_i64(t1, b);
372 tcg_gen_mul_i64(t0, t0, t1);
374 tcg_gen_trunc_i64_i32(d, t0);
375 tcg_gen_shri_i64(t0, t0, 32);
376 tcg_gen_trunc_i64_i32(d2, t0);
378 tcg_temp_free_i64(t0);
379 tcg_temp_free_i64(t1);
382 static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
384 int l1;
386 l1 = gen_new_label();
389 * d <<= 1
390 * if (d >= s)
391 * d -= s;
393 tcg_gen_shli_tl(d, a, 1);
394 tcg_gen_brcond_tl(TCG_COND_LTU, d, b, l1);
395 tcg_gen_sub_tl(d, d, b);
396 gen_set_label(l1);
399 static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
401 TCGv t;
404 * d <<= 1
405 * if (n)
406 * d += s;
408 t = tcg_temp_new();
409 tcg_gen_shli_tl(d, a, 1);
410 tcg_gen_shli_tl(t, ccs, 31 - 3);
411 tcg_gen_sari_tl(t, t, 31);
412 tcg_gen_and_tl(t, t, b);
413 tcg_gen_add_tl(d, d, t);
414 tcg_temp_free(t);
417 /* Extended arithmetics on CRIS. */
418 static inline void t_gen_add_flag(TCGv d, int flag)
420 TCGv c;
422 c = tcg_temp_new();
423 t_gen_mov_TN_preg(c, PR_CCS);
424 /* Propagate carry into d. */
425 tcg_gen_andi_tl(c, c, 1 << flag);
426 if (flag)
427 tcg_gen_shri_tl(c, c, flag);
428 tcg_gen_add_tl(d, d, c);
429 tcg_temp_free(c);
432 static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
434 if (dc->flagx_known) {
435 if (dc->flags_x) {
436 TCGv c;
438 c = tcg_temp_new();
439 t_gen_mov_TN_preg(c, PR_CCS);
440 /* C flag is already at bit 0. */
441 tcg_gen_andi_tl(c, c, C_FLAG);
442 tcg_gen_add_tl(d, d, c);
443 tcg_temp_free(c);
445 } else {
446 TCGv x, c;
448 x = tcg_temp_new();
449 c = tcg_temp_new();
450 t_gen_mov_TN_preg(x, PR_CCS);
451 tcg_gen_mov_tl(c, x);
453 /* Propagate carry into d if X is set. Branch free. */
454 tcg_gen_andi_tl(c, c, C_FLAG);
455 tcg_gen_andi_tl(x, x, X_FLAG);
456 tcg_gen_shri_tl(x, x, 4);
458 tcg_gen_and_tl(x, x, c);
459 tcg_gen_add_tl(d, d, x);
460 tcg_temp_free(x);
461 tcg_temp_free(c);
465 static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
467 if (dc->flagx_known) {
468 if (dc->flags_x) {
469 TCGv c;
471 c = tcg_temp_new();
472 t_gen_mov_TN_preg(c, PR_CCS);
473 /* C flag is already at bit 0. */
474 tcg_gen_andi_tl(c, c, C_FLAG);
475 tcg_gen_sub_tl(d, d, c);
476 tcg_temp_free(c);
478 } else {
479 TCGv x, c;
481 x = tcg_temp_new();
482 c = tcg_temp_new();
483 t_gen_mov_TN_preg(x, PR_CCS);
484 tcg_gen_mov_tl(c, x);
486 /* Propagate carry into d if X is set. Branch free. */
487 tcg_gen_andi_tl(c, c, C_FLAG);
488 tcg_gen_andi_tl(x, x, X_FLAG);
489 tcg_gen_shri_tl(x, x, 4);
491 tcg_gen_and_tl(x, x, c);
492 tcg_gen_sub_tl(d, d, x);
493 tcg_temp_free(x);
494 tcg_temp_free(c);
498 /* Swap the two bytes within each half word of the s operand.
499 T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
500 static inline void t_gen_swapb(TCGv d, TCGv s)
502 TCGv t, org_s;
504 t = tcg_temp_new();
505 org_s = tcg_temp_new();
507 /* d and s may refer to the same object. */
508 tcg_gen_mov_tl(org_s, s);
509 tcg_gen_shli_tl(t, org_s, 8);
510 tcg_gen_andi_tl(d, t, 0xff00ff00);
511 tcg_gen_shri_tl(t, org_s, 8);
512 tcg_gen_andi_tl(t, t, 0x00ff00ff);
513 tcg_gen_or_tl(d, d, t);
514 tcg_temp_free(t);
515 tcg_temp_free(org_s);
518 /* Swap the halfwords of the s operand. */
519 static inline void t_gen_swapw(TCGv d, TCGv s)
521 TCGv t;
522 /* d and s refer the same object. */
523 t = tcg_temp_new();
524 tcg_gen_mov_tl(t, s);
525 tcg_gen_shli_tl(d, t, 16);
526 tcg_gen_shri_tl(t, t, 16);
527 tcg_gen_or_tl(d, d, t);
528 tcg_temp_free(t);
531 /* Reverse the within each byte.
532 T0 = (((T0 << 7) & 0x80808080) |
533 ((T0 << 5) & 0x40404040) |
534 ((T0 << 3) & 0x20202020) |
535 ((T0 << 1) & 0x10101010) |
536 ((T0 >> 1) & 0x08080808) |
537 ((T0 >> 3) & 0x04040404) |
538 ((T0 >> 5) & 0x02020202) |
539 ((T0 >> 7) & 0x01010101));
541 static inline void t_gen_swapr(TCGv d, TCGv s)
543 struct {
544 int shift; /* LSL when positive, LSR when negative. */
545 uint32_t mask;
546 } bitrev [] = {
547 {7, 0x80808080},
548 {5, 0x40404040},
549 {3, 0x20202020},
550 {1, 0x10101010},
551 {-1, 0x08080808},
552 {-3, 0x04040404},
553 {-5, 0x02020202},
554 {-7, 0x01010101}
556 int i;
557 TCGv t, org_s;
559 /* d and s refer the same object. */
560 t = tcg_temp_new();
561 org_s = tcg_temp_new();
562 tcg_gen_mov_tl(org_s, s);
564 tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
565 tcg_gen_andi_tl(d, t, bitrev[0].mask);
566 for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
567 if (bitrev[i].shift >= 0) {
568 tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
569 } else {
570 tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
572 tcg_gen_andi_tl(t, t, bitrev[i].mask);
573 tcg_gen_or_tl(d, d, t);
575 tcg_temp_free(t);
576 tcg_temp_free(org_s);
579 static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
581 int l1;
583 l1 = gen_new_label();
585 /* Conditional jmp. */
586 tcg_gen_mov_tl(env_pc, pc_false);
587 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
588 tcg_gen_mov_tl(env_pc, pc_true);
589 gen_set_label(l1);
592 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
594 TranslationBlock *tb;
595 tb = dc->tb;
596 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
597 tcg_gen_goto_tb(n);
598 tcg_gen_movi_tl(env_pc, dest);
599 tcg_gen_exit_tb((tcg_target_long)tb + n);
600 } else {
601 tcg_gen_movi_tl(env_pc, dest);
602 tcg_gen_exit_tb(0);
606 static inline void cris_clear_x_flag(DisasContext *dc)
608 if (dc->flagx_known && dc->flags_x)
609 dc->flags_uptodate = 0;
611 dc->flagx_known = 1;
612 dc->flags_x = 0;
615 static void cris_flush_cc_state(DisasContext *dc)
617 if (dc->cc_size_uptodate != dc->cc_size) {
618 tcg_gen_movi_tl(cc_size, dc->cc_size);
619 dc->cc_size_uptodate = dc->cc_size;
621 tcg_gen_movi_tl(cc_op, dc->cc_op);
622 tcg_gen_movi_tl(cc_mask, dc->cc_mask);
625 static void cris_evaluate_flags(DisasContext *dc)
627 if (dc->flags_uptodate)
628 return;
630 cris_flush_cc_state(dc);
632 switch (dc->cc_op)
634 case CC_OP_MCP:
635 gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
636 cpu_PR[PR_CCS], cc_src,
637 cc_dest, cc_result);
638 break;
639 case CC_OP_MULS:
640 gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
641 cpu_PR[PR_CCS], cc_result,
642 cpu_PR[PR_MOF]);
643 break;
644 case CC_OP_MULU:
645 gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
646 cpu_PR[PR_CCS], cc_result,
647 cpu_PR[PR_MOF]);
648 break;
649 case CC_OP_MOVE:
650 case CC_OP_AND:
651 case CC_OP_OR:
652 case CC_OP_XOR:
653 case CC_OP_ASR:
654 case CC_OP_LSR:
655 case CC_OP_LSL:
656 switch (dc->cc_size)
658 case 4:
659 gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
660 cpu_PR[PR_CCS], cc_result);
661 break;
662 case 2:
663 gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
664 cpu_PR[PR_CCS], cc_result);
665 break;
666 default:
667 gen_helper_evaluate_flags();
668 break;
670 break;
671 case CC_OP_FLAGS:
672 /* live. */
673 break;
674 case CC_OP_SUB:
675 case CC_OP_CMP:
676 if (dc->cc_size == 4)
677 gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
678 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
679 else
680 gen_helper_evaluate_flags();
682 break;
683 default:
684 switch (dc->cc_size)
686 case 4:
687 gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
688 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
689 break;
690 default:
691 gen_helper_evaluate_flags();
692 break;
694 break;
697 if (dc->flagx_known) {
698 if (dc->flags_x)
699 tcg_gen_ori_tl(cpu_PR[PR_CCS],
700 cpu_PR[PR_CCS], X_FLAG);
701 else if (dc->cc_op == CC_OP_FLAGS)
702 tcg_gen_andi_tl(cpu_PR[PR_CCS],
703 cpu_PR[PR_CCS], ~X_FLAG);
705 dc->flags_uptodate = 1;
708 static void cris_cc_mask(DisasContext *dc, unsigned int mask)
710 uint32_t ovl;
712 if (!mask) {
713 dc->update_cc = 0;
714 return;
717 /* Check if we need to evaluate the condition codes due to
718 CC overlaying. */
719 ovl = (dc->cc_mask ^ mask) & ~mask;
720 if (ovl) {
721 /* TODO: optimize this case. It trigs all the time. */
722 cris_evaluate_flags (dc);
724 dc->cc_mask = mask;
725 dc->update_cc = 1;
728 static void cris_update_cc_op(DisasContext *dc, int op, int size)
730 dc->cc_op = op;
731 dc->cc_size = size;
732 dc->flags_uptodate = 0;
735 static inline void cris_update_cc_x(DisasContext *dc)
737 /* Save the x flag state at the time of the cc snapshot. */
738 if (dc->flagx_known) {
739 if (dc->cc_x_uptodate == (2 | dc->flags_x))
740 return;
741 tcg_gen_movi_tl(cc_x, dc->flags_x);
742 dc->cc_x_uptodate = 2 | dc->flags_x;
744 else {
745 tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
746 dc->cc_x_uptodate = 1;
750 /* Update cc prior to executing ALU op. Needs source operands untouched. */
751 static void cris_pre_alu_update_cc(DisasContext *dc, int op,
752 TCGv dst, TCGv src, int size)
754 if (dc->update_cc) {
755 cris_update_cc_op(dc, op, size);
756 tcg_gen_mov_tl(cc_src, src);
758 if (op != CC_OP_MOVE
759 && op != CC_OP_AND
760 && op != CC_OP_OR
761 && op != CC_OP_XOR
762 && op != CC_OP_ASR
763 && op != CC_OP_LSR
764 && op != CC_OP_LSL)
765 tcg_gen_mov_tl(cc_dest, dst);
767 cris_update_cc_x(dc);
771 /* Update cc after executing ALU op. needs the result. */
772 static inline void cris_update_result(DisasContext *dc, TCGv res)
774 if (dc->update_cc)
775 tcg_gen_mov_tl(cc_result, res);
778 /* Returns one if the write back stage should execute. */
779 static void cris_alu_op_exec(DisasContext *dc, int op,
780 TCGv dst, TCGv a, TCGv b, int size)
782 /* Emit the ALU insns. */
783 switch (op)
785 case CC_OP_ADD:
786 tcg_gen_add_tl(dst, a, b);
787 /* Extended arithmetics. */
788 t_gen_addx_carry(dc, dst);
789 break;
790 case CC_OP_ADDC:
791 tcg_gen_add_tl(dst, a, b);
792 t_gen_add_flag(dst, 0); /* C_FLAG. */
793 break;
794 case CC_OP_MCP:
795 tcg_gen_add_tl(dst, a, b);
796 t_gen_add_flag(dst, 8); /* R_FLAG. */
797 break;
798 case CC_OP_SUB:
799 tcg_gen_sub_tl(dst, a, b);
800 /* Extended arithmetics. */
801 t_gen_subx_carry(dc, dst);
802 break;
803 case CC_OP_MOVE:
804 tcg_gen_mov_tl(dst, b);
805 break;
806 case CC_OP_OR:
807 tcg_gen_or_tl(dst, a, b);
808 break;
809 case CC_OP_AND:
810 tcg_gen_and_tl(dst, a, b);
811 break;
812 case CC_OP_XOR:
813 tcg_gen_xor_tl(dst, a, b);
814 break;
815 case CC_OP_LSL:
816 t_gen_lsl(dst, a, b);
817 break;
818 case CC_OP_LSR:
819 t_gen_lsr(dst, a, b);
820 break;
821 case CC_OP_ASR:
822 t_gen_asr(dst, a, b);
823 break;
824 case CC_OP_NEG:
825 tcg_gen_neg_tl(dst, b);
826 /* Extended arithmetics. */
827 t_gen_subx_carry(dc, dst);
828 break;
829 case CC_OP_LZ:
830 gen_helper_lz(dst, b);
831 break;
832 case CC_OP_MULS:
833 t_gen_muls(dst, cpu_PR[PR_MOF], a, b);
834 break;
835 case CC_OP_MULU:
836 t_gen_mulu(dst, cpu_PR[PR_MOF], a, b);
837 break;
838 case CC_OP_DSTEP:
839 t_gen_cris_dstep(dst, a, b);
840 break;
841 case CC_OP_MSTEP:
842 t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
843 break;
844 case CC_OP_BOUND:
846 int l1;
847 l1 = gen_new_label();
848 tcg_gen_mov_tl(dst, a);
849 tcg_gen_brcond_tl(TCG_COND_LEU, a, b, l1);
850 tcg_gen_mov_tl(dst, b);
851 gen_set_label(l1);
853 break;
854 case CC_OP_CMP:
855 tcg_gen_sub_tl(dst, a, b);
856 /* Extended arithmetics. */
857 t_gen_subx_carry(dc, dst);
858 break;
859 default:
860 qemu_log("illegal ALU op.\n");
861 BUG();
862 break;
865 if (size == 1)
866 tcg_gen_andi_tl(dst, dst, 0xff);
867 else if (size == 2)
868 tcg_gen_andi_tl(dst, dst, 0xffff);
871 static void cris_alu(DisasContext *dc, int op,
872 TCGv d, TCGv op_a, TCGv op_b, int size)
874 TCGv tmp;
875 int writeback;
877 writeback = 1;
879 if (op == CC_OP_CMP) {
880 tmp = tcg_temp_new();
881 writeback = 0;
882 } else if (size == 4) {
883 tmp = d;
884 writeback = 0;
885 } else
886 tmp = tcg_temp_new();
889 cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
890 cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
891 cris_update_result(dc, tmp);
893 /* Writeback. */
894 if (writeback) {
895 if (size == 1)
896 tcg_gen_andi_tl(d, d, ~0xff);
897 else
898 tcg_gen_andi_tl(d, d, ~0xffff);
899 tcg_gen_or_tl(d, d, tmp);
901 if (!TCGV_EQUAL(tmp, d))
902 tcg_temp_free(tmp);
905 static int arith_cc(DisasContext *dc)
907 if (dc->update_cc) {
908 switch (dc->cc_op) {
909 case CC_OP_ADDC: return 1;
910 case CC_OP_ADD: return 1;
911 case CC_OP_SUB: return 1;
912 case CC_OP_DSTEP: return 1;
913 case CC_OP_LSL: return 1;
914 case CC_OP_LSR: return 1;
915 case CC_OP_ASR: return 1;
916 case CC_OP_CMP: return 1;
917 case CC_OP_NEG: return 1;
918 case CC_OP_OR: return 1;
919 case CC_OP_AND: return 1;
920 case CC_OP_XOR: return 1;
921 case CC_OP_MULU: return 1;
922 case CC_OP_MULS: return 1;
923 default:
924 return 0;
927 return 0;
930 static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
932 int arith_opt, move_opt;
934 /* TODO: optimize more condition codes. */
937 * If the flags are live, we've gotta look into the bits of CCS.
938 * Otherwise, if we just did an arithmetic operation we try to
939 * evaluate the condition code faster.
941 * When this function is done, T0 should be non-zero if the condition
942 * code is true.
944 arith_opt = arith_cc(dc) && !dc->flags_uptodate;
945 move_opt = (dc->cc_op == CC_OP_MOVE);
946 switch (cond) {
947 case CC_EQ:
948 if ((arith_opt || move_opt)
949 && dc->cc_x_uptodate != (2 | X_FLAG)) {
950 tcg_gen_setcond_tl(TCG_COND_EQ, cc,
951 cc_result, tcg_const_tl(0));
953 else {
954 cris_evaluate_flags(dc);
955 tcg_gen_andi_tl(cc,
956 cpu_PR[PR_CCS], Z_FLAG);
958 break;
959 case CC_NE:
960 if ((arith_opt || move_opt)
961 && dc->cc_x_uptodate != (2 | X_FLAG)) {
962 tcg_gen_mov_tl(cc, cc_result);
963 } else {
964 cris_evaluate_flags(dc);
965 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
966 Z_FLAG);
967 tcg_gen_andi_tl(cc, cc, Z_FLAG);
969 break;
970 case CC_CS:
971 cris_evaluate_flags(dc);
972 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
973 break;
974 case CC_CC:
975 cris_evaluate_flags(dc);
976 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
977 tcg_gen_andi_tl(cc, cc, C_FLAG);
978 break;
979 case CC_VS:
980 cris_evaluate_flags(dc);
981 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
982 break;
983 case CC_VC:
984 cris_evaluate_flags(dc);
985 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
986 V_FLAG);
987 tcg_gen_andi_tl(cc, cc, V_FLAG);
988 break;
989 case CC_PL:
990 if (arith_opt || move_opt) {
991 int bits = 31;
993 if (dc->cc_size == 1)
994 bits = 7;
995 else if (dc->cc_size == 2)
996 bits = 15;
998 tcg_gen_shri_tl(cc, cc_result, bits);
999 tcg_gen_xori_tl(cc, cc, 1);
1000 } else {
1001 cris_evaluate_flags(dc);
1002 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
1003 N_FLAG);
1004 tcg_gen_andi_tl(cc, cc, N_FLAG);
1006 break;
1007 case CC_MI:
1008 if (arith_opt || move_opt) {
1009 int bits = 31;
1011 if (dc->cc_size == 1)
1012 bits = 7;
1013 else if (dc->cc_size == 2)
1014 bits = 15;
1016 tcg_gen_shri_tl(cc, cc_result, bits);
1017 tcg_gen_andi_tl(cc, cc, 1);
1019 else {
1020 cris_evaluate_flags(dc);
1021 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1022 N_FLAG);
1024 break;
1025 case CC_LS:
1026 cris_evaluate_flags(dc);
1027 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1028 C_FLAG | Z_FLAG);
1029 break;
1030 case CC_HI:
1031 cris_evaluate_flags(dc);
1033 TCGv tmp;
1035 tmp = tcg_temp_new();
1036 tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
1037 C_FLAG | Z_FLAG);
1038 /* Overlay the C flag on top of the Z. */
1039 tcg_gen_shli_tl(cc, tmp, 2);
1040 tcg_gen_and_tl(cc, tmp, cc);
1041 tcg_gen_andi_tl(cc, cc, Z_FLAG);
1043 tcg_temp_free(tmp);
1045 break;
1046 case CC_GE:
1047 cris_evaluate_flags(dc);
1048 /* Overlay the V flag on top of the N. */
1049 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1050 tcg_gen_xor_tl(cc,
1051 cpu_PR[PR_CCS], cc);
1052 tcg_gen_andi_tl(cc, cc, N_FLAG);
1053 tcg_gen_xori_tl(cc, cc, N_FLAG);
1054 break;
1055 case CC_LT:
1056 cris_evaluate_flags(dc);
1057 /* Overlay the V flag on top of the N. */
1058 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1059 tcg_gen_xor_tl(cc,
1060 cpu_PR[PR_CCS], cc);
1061 tcg_gen_andi_tl(cc, cc, N_FLAG);
1062 break;
1063 case CC_GT:
1064 cris_evaluate_flags(dc);
1066 TCGv n, z;
1068 n = tcg_temp_new();
1069 z = tcg_temp_new();
1071 /* To avoid a shift we overlay everything on
1072 the V flag. */
1073 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1074 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1075 /* invert Z. */
1076 tcg_gen_xori_tl(z, z, 2);
1078 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1079 tcg_gen_xori_tl(n, n, 2);
1080 tcg_gen_and_tl(cc, z, n);
1081 tcg_gen_andi_tl(cc, cc, 2);
1083 tcg_temp_free(n);
1084 tcg_temp_free(z);
1086 break;
1087 case CC_LE:
1088 cris_evaluate_flags(dc);
1090 TCGv n, z;
1092 n = tcg_temp_new();
1093 z = tcg_temp_new();
1095 /* To avoid a shift we overlay everything on
1096 the V flag. */
1097 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1098 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1100 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1101 tcg_gen_or_tl(cc, z, n);
1102 tcg_gen_andi_tl(cc, cc, 2);
1104 tcg_temp_free(n);
1105 tcg_temp_free(z);
1107 break;
1108 case CC_P:
1109 cris_evaluate_flags(dc);
1110 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
1111 break;
1112 case CC_A:
1113 tcg_gen_movi_tl(cc, 1);
1114 break;
1115 default:
1116 BUG();
1117 break;
1121 static void cris_store_direct_jmp(DisasContext *dc)
1123 /* Store the direct jmp state into the cpu-state. */
1124 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1125 if (dc->jmp == JMP_DIRECT) {
1126 tcg_gen_movi_tl(env_btaken, 1);
1128 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1129 dc->jmp = JMP_INDIRECT;
1133 static void cris_prepare_cc_branch (DisasContext *dc,
1134 int offset, int cond)
1136 /* This helps us re-schedule the micro-code to insns in delay-slots
1137 before the actual jump. */
1138 dc->delayed_branch = 2;
1139 dc->jmp = JMP_DIRECT_CC;
1140 dc->jmp_pc = dc->pc + offset;
1142 gen_tst_cc (dc, env_btaken, cond);
1143 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1147 /* jumps, when the dest is in a live reg for example. Direct should be set
1148 when the dest addr is constant to allow tb chaining. */
1149 static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
1151 /* This helps us re-schedule the micro-code to insns in delay-slots
1152 before the actual jump. */
1153 dc->delayed_branch = 2;
1154 dc->jmp = type;
1155 if (type == JMP_INDIRECT) {
1156 tcg_gen_movi_tl(env_btaken, 1);
1160 static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
1162 int mem_index = cpu_mmu_index(dc->env);
1164 /* If we get a fault on a delayslot we must keep the jmp state in
1165 the cpu-state to be able to re-execute the jmp. */
1166 if (dc->delayed_branch == 1)
1167 cris_store_direct_jmp(dc);
1169 tcg_gen_qemu_ld64(dst, addr, mem_index);
1172 static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
1173 unsigned int size, int sign)
1175 int mem_index = cpu_mmu_index(dc->env);
1177 /* If we get a fault on a delayslot we must keep the jmp state in
1178 the cpu-state to be able to re-execute the jmp. */
1179 if (dc->delayed_branch == 1)
1180 cris_store_direct_jmp(dc);
1182 if (size == 1) {
1183 if (sign)
1184 tcg_gen_qemu_ld8s(dst, addr, mem_index);
1185 else
1186 tcg_gen_qemu_ld8u(dst, addr, mem_index);
1188 else if (size == 2) {
1189 if (sign)
1190 tcg_gen_qemu_ld16s(dst, addr, mem_index);
1191 else
1192 tcg_gen_qemu_ld16u(dst, addr, mem_index);
1194 else if (size == 4) {
1195 tcg_gen_qemu_ld32u(dst, addr, mem_index);
1197 else {
1198 abort();
1202 static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
1203 unsigned int size)
1205 int mem_index = cpu_mmu_index(dc->env);
1207 /* If we get a fault on a delayslot we must keep the jmp state in
1208 the cpu-state to be able to re-execute the jmp. */
1209 if (dc->delayed_branch == 1)
1210 cris_store_direct_jmp(dc);
1213 /* Conditional writes. We only support the kind were X and P are known
1214 at translation time. */
1215 if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
1216 dc->postinc = 0;
1217 cris_evaluate_flags(dc);
1218 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
1219 return;
1222 if (size == 1)
1223 tcg_gen_qemu_st8(val, addr, mem_index);
1224 else if (size == 2)
1225 tcg_gen_qemu_st16(val, addr, mem_index);
1226 else
1227 tcg_gen_qemu_st32(val, addr, mem_index);
1229 if (dc->flagx_known && dc->flags_x) {
1230 cris_evaluate_flags(dc);
1231 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
1235 static inline void t_gen_sext(TCGv d, TCGv s, int size)
1237 if (size == 1)
1238 tcg_gen_ext8s_i32(d, s);
1239 else if (size == 2)
1240 tcg_gen_ext16s_i32(d, s);
1241 else if(!TCGV_EQUAL(d, s))
1242 tcg_gen_mov_tl(d, s);
1245 static inline void t_gen_zext(TCGv d, TCGv s, int size)
1247 if (size == 1)
1248 tcg_gen_ext8u_i32(d, s);
1249 else if (size == 2)
1250 tcg_gen_ext16u_i32(d, s);
1251 else if (!TCGV_EQUAL(d, s))
1252 tcg_gen_mov_tl(d, s);
1255 #if DISAS_CRIS
1256 static char memsize_char(int size)
1258 switch (size)
1260 case 1: return 'b'; break;
1261 case 2: return 'w'; break;
1262 case 4: return 'd'; break;
1263 default:
1264 return 'x';
1265 break;
1268 #endif
1270 static inline unsigned int memsize_z(DisasContext *dc)
1272 return dc->zsize + 1;
1275 static inline unsigned int memsize_zz(DisasContext *dc)
1277 switch (dc->zzsize)
1279 case 0: return 1;
1280 case 1: return 2;
1281 default:
1282 return 4;
1286 static inline void do_postinc (DisasContext *dc, int size)
1288 if (dc->postinc)
1289 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
1292 static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
1293 int size, int s_ext, TCGv dst)
1295 if (s_ext)
1296 t_gen_sext(dst, cpu_R[rs], size);
1297 else
1298 t_gen_zext(dst, cpu_R[rs], size);
1301 /* Prepare T0 and T1 for a register alu operation.
1302 s_ext decides if the operand1 should be sign-extended or zero-extended when
1303 needed. */
1304 static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
1305 int size, int s_ext, TCGv dst, TCGv src)
1307 dec_prep_move_r(dc, rs, rd, size, s_ext, src);
1309 if (s_ext)
1310 t_gen_sext(dst, cpu_R[rd], size);
1311 else
1312 t_gen_zext(dst, cpu_R[rd], size);
1315 static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
1316 TCGv dst)
1318 unsigned int rs;
1319 uint32_t imm;
1320 int is_imm;
1321 int insn_len = 2;
1323 rs = dc->op1;
1324 is_imm = rs == 15 && dc->postinc;
1326 /* Load [$rs] onto T1. */
1327 if (is_imm) {
1328 insn_len = 2 + memsize;
1329 if (memsize == 1)
1330 insn_len++;
1332 imm = cris_fetch(dc, dc->pc + 2, memsize, s_ext);
1333 tcg_gen_movi_tl(dst, imm);
1334 dc->postinc = 0;
1335 } else {
1336 cris_flush_cc_state(dc);
1337 gen_load(dc, dst, cpu_R[rs], memsize, 0);
1338 if (s_ext)
1339 t_gen_sext(dst, dst, memsize);
1340 else
1341 t_gen_zext(dst, dst, memsize);
1343 return insn_len;
1346 /* Prepare T0 and T1 for a memory + alu operation.
1347 s_ext decides if the operand1 should be sign-extended or zero-extended when
1348 needed. */
1349 static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
1350 TCGv dst, TCGv src)
1352 int insn_len;
1354 insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
1355 tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
1356 return insn_len;
1359 #if DISAS_CRIS
1360 static const char *cc_name(int cc)
1362 static const char *cc_names[16] = {
1363 "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1364 "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1366 assert(cc < 16);
1367 return cc_names[cc];
1369 #endif
1371 /* Start of insn decoders. */
1373 static int dec_bccq(DisasContext *dc)
1375 int32_t offset;
1376 int sign;
1377 uint32_t cond = dc->op2;
1379 offset = EXTRACT_FIELD (dc->ir, 1, 7);
1380 sign = EXTRACT_FIELD(dc->ir, 0, 0);
1382 offset *= 2;
1383 offset |= sign << 8;
1384 offset = sign_extend(offset, 8);
1386 LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
1388 /* op2 holds the condition-code. */
1389 cris_cc_mask(dc, 0);
1390 cris_prepare_cc_branch (dc, offset, cond);
1391 return 2;
1393 static int dec_addoq(DisasContext *dc)
1395 int32_t imm;
1397 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
1398 imm = sign_extend(dc->op1, 7);
1400 LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
1401 cris_cc_mask(dc, 0);
1402 /* Fetch register operand, */
1403 tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
1405 return 2;
1407 static int dec_addq(DisasContext *dc)
1409 LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
1411 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1413 cris_cc_mask(dc, CC_MASK_NZVC);
1415 cris_alu(dc, CC_OP_ADD,
1416 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1417 return 2;
1419 static int dec_moveq(DisasContext *dc)
1421 uint32_t imm;
1423 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1424 imm = sign_extend(dc->op1, 5);
1425 LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
1427 tcg_gen_movi_tl(cpu_R[dc->op2], imm);
1428 return 2;
1430 static int dec_subq(DisasContext *dc)
1432 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1434 LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
1436 cris_cc_mask(dc, CC_MASK_NZVC);
1437 cris_alu(dc, CC_OP_SUB,
1438 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1439 return 2;
1441 static int dec_cmpq(DisasContext *dc)
1443 uint32_t imm;
1444 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1445 imm = sign_extend(dc->op1, 5);
1447 LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
1448 cris_cc_mask(dc, CC_MASK_NZVC);
1450 cris_alu(dc, CC_OP_CMP,
1451 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1452 return 2;
1454 static int dec_andq(DisasContext *dc)
1456 uint32_t imm;
1457 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1458 imm = sign_extend(dc->op1, 5);
1460 LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
1461 cris_cc_mask(dc, CC_MASK_NZ);
1463 cris_alu(dc, CC_OP_AND,
1464 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1465 return 2;
1467 static int dec_orq(DisasContext *dc)
1469 uint32_t imm;
1470 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1471 imm = sign_extend(dc->op1, 5);
1472 LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
1473 cris_cc_mask(dc, CC_MASK_NZ);
1475 cris_alu(dc, CC_OP_OR,
1476 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1477 return 2;
1479 static int dec_btstq(DisasContext *dc)
1481 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1482 LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
1484 cris_cc_mask(dc, CC_MASK_NZ);
1485 cris_evaluate_flags(dc);
1486 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1487 tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
1488 cris_alu(dc, CC_OP_MOVE,
1489 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1490 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1491 dc->flags_uptodate = 1;
1492 return 2;
1494 static int dec_asrq(DisasContext *dc)
1496 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1497 LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
1498 cris_cc_mask(dc, CC_MASK_NZ);
1500 tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1501 cris_alu(dc, CC_OP_MOVE,
1502 cpu_R[dc->op2],
1503 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1504 return 2;
1506 static int dec_lslq(DisasContext *dc)
1508 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1509 LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
1511 cris_cc_mask(dc, CC_MASK_NZ);
1513 tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1515 cris_alu(dc, CC_OP_MOVE,
1516 cpu_R[dc->op2],
1517 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1518 return 2;
1520 static int dec_lsrq(DisasContext *dc)
1522 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1523 LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
1525 cris_cc_mask(dc, CC_MASK_NZ);
1527 tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1528 cris_alu(dc, CC_OP_MOVE,
1529 cpu_R[dc->op2],
1530 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1531 return 2;
1534 static int dec_move_r(DisasContext *dc)
1536 int size = memsize_zz(dc);
1538 LOG_DIS("move.%c $r%u, $r%u\n",
1539 memsize_char(size), dc->op1, dc->op2);
1541 cris_cc_mask(dc, CC_MASK_NZ);
1542 if (size == 4) {
1543 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
1544 cris_cc_mask(dc, CC_MASK_NZ);
1545 cris_update_cc_op(dc, CC_OP_MOVE, 4);
1546 cris_update_cc_x(dc);
1547 cris_update_result(dc, cpu_R[dc->op2]);
1549 else {
1550 TCGv t0;
1552 t0 = tcg_temp_new();
1553 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1554 cris_alu(dc, CC_OP_MOVE,
1555 cpu_R[dc->op2],
1556 cpu_R[dc->op2], t0, size);
1557 tcg_temp_free(t0);
1559 return 2;
1562 static int dec_scc_r(DisasContext *dc)
1564 int cond = dc->op2;
1566 LOG_DIS("s%s $r%u\n",
1567 cc_name(cond), dc->op1);
1569 if (cond != CC_A)
1571 int l1;
1573 gen_tst_cc (dc, cpu_R[dc->op1], cond);
1574 l1 = gen_new_label();
1575 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
1576 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1577 gen_set_label(l1);
1579 else
1580 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1582 cris_cc_mask(dc, 0);
1583 return 2;
1586 static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
1588 if (size == 4) {
1589 t[0] = cpu_R[dc->op2];
1590 t[1] = cpu_R[dc->op1];
1591 } else {
1592 t[0] = tcg_temp_new();
1593 t[1] = tcg_temp_new();
1597 static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
1599 if (size != 4) {
1600 tcg_temp_free(t[0]);
1601 tcg_temp_free(t[1]);
1605 static int dec_and_r(DisasContext *dc)
1607 TCGv t[2];
1608 int size = memsize_zz(dc);
1610 LOG_DIS("and.%c $r%u, $r%u\n",
1611 memsize_char(size), dc->op1, dc->op2);
1613 cris_cc_mask(dc, CC_MASK_NZ);
1615 cris_alu_alloc_temps(dc, size, t);
1616 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1617 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
1618 cris_alu_free_temps(dc, size, t);
1619 return 2;
1622 static int dec_lz_r(DisasContext *dc)
1624 TCGv t0;
1625 LOG_DIS("lz $r%u, $r%u\n",
1626 dc->op1, dc->op2);
1627 cris_cc_mask(dc, CC_MASK_NZ);
1628 t0 = tcg_temp_new();
1629 dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
1630 cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1631 tcg_temp_free(t0);
1632 return 2;
1635 static int dec_lsl_r(DisasContext *dc)
1637 TCGv t[2];
1638 int size = memsize_zz(dc);
1640 LOG_DIS("lsl.%c $r%u, $r%u\n",
1641 memsize_char(size), dc->op1, dc->op2);
1643 cris_cc_mask(dc, CC_MASK_NZ);
1644 cris_alu_alloc_temps(dc, size, t);
1645 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1646 tcg_gen_andi_tl(t[1], t[1], 63);
1647 cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
1648 cris_alu_alloc_temps(dc, size, t);
1649 return 2;
1652 static int dec_lsr_r(DisasContext *dc)
1654 TCGv t[2];
1655 int size = memsize_zz(dc);
1657 LOG_DIS("lsr.%c $r%u, $r%u\n",
1658 memsize_char(size), dc->op1, dc->op2);
1660 cris_cc_mask(dc, CC_MASK_NZ);
1661 cris_alu_alloc_temps(dc, size, t);
1662 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1663 tcg_gen_andi_tl(t[1], t[1], 63);
1664 cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
1665 cris_alu_free_temps(dc, size, t);
1666 return 2;
1669 static int dec_asr_r(DisasContext *dc)
1671 TCGv t[2];
1672 int size = memsize_zz(dc);
1674 LOG_DIS("asr.%c $r%u, $r%u\n",
1675 memsize_char(size), dc->op1, dc->op2);
1677 cris_cc_mask(dc, CC_MASK_NZ);
1678 cris_alu_alloc_temps(dc, size, t);
1679 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1680 tcg_gen_andi_tl(t[1], t[1], 63);
1681 cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
1682 cris_alu_free_temps(dc, size, t);
1683 return 2;
1686 static int dec_muls_r(DisasContext *dc)
1688 TCGv t[2];
1689 int size = memsize_zz(dc);
1691 LOG_DIS("muls.%c $r%u, $r%u\n",
1692 memsize_char(size), dc->op1, dc->op2);
1693 cris_cc_mask(dc, CC_MASK_NZV);
1694 cris_alu_alloc_temps(dc, size, t);
1695 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1697 cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
1698 cris_alu_free_temps(dc, size, t);
1699 return 2;
1702 static int dec_mulu_r(DisasContext *dc)
1704 TCGv t[2];
1705 int size = memsize_zz(dc);
1707 LOG_DIS("mulu.%c $r%u, $r%u\n",
1708 memsize_char(size), dc->op1, dc->op2);
1709 cris_cc_mask(dc, CC_MASK_NZV);
1710 cris_alu_alloc_temps(dc, size, t);
1711 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1713 cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
1714 cris_alu_alloc_temps(dc, size, t);
1715 return 2;
1719 static int dec_dstep_r(DisasContext *dc)
1721 LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
1722 cris_cc_mask(dc, CC_MASK_NZ);
1723 cris_alu(dc, CC_OP_DSTEP,
1724 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1725 return 2;
1728 static int dec_xor_r(DisasContext *dc)
1730 TCGv t[2];
1731 int size = memsize_zz(dc);
1732 LOG_DIS("xor.%c $r%u, $r%u\n",
1733 memsize_char(size), dc->op1, dc->op2);
1734 BUG_ON(size != 4); /* xor is dword. */
1735 cris_cc_mask(dc, CC_MASK_NZ);
1736 cris_alu_alloc_temps(dc, size, t);
1737 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1739 cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
1740 cris_alu_free_temps(dc, size, t);
1741 return 2;
1744 static int dec_bound_r(DisasContext *dc)
1746 TCGv l0;
1747 int size = memsize_zz(dc);
1748 LOG_DIS("bound.%c $r%u, $r%u\n",
1749 memsize_char(size), dc->op1, dc->op2);
1750 cris_cc_mask(dc, CC_MASK_NZ);
1751 l0 = tcg_temp_local_new();
1752 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
1753 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
1754 tcg_temp_free(l0);
1755 return 2;
1758 static int dec_cmp_r(DisasContext *dc)
1760 TCGv t[2];
1761 int size = memsize_zz(dc);
1762 LOG_DIS("cmp.%c $r%u, $r%u\n",
1763 memsize_char(size), dc->op1, dc->op2);
1764 cris_cc_mask(dc, CC_MASK_NZVC);
1765 cris_alu_alloc_temps(dc, size, t);
1766 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1768 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
1769 cris_alu_free_temps(dc, size, t);
1770 return 2;
1773 static int dec_abs_r(DisasContext *dc)
1775 TCGv t0;
1777 LOG_DIS("abs $r%u, $r%u\n",
1778 dc->op1, dc->op2);
1779 cris_cc_mask(dc, CC_MASK_NZ);
1781 t0 = tcg_temp_new();
1782 tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
1783 tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
1784 tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
1785 tcg_temp_free(t0);
1787 cris_alu(dc, CC_OP_MOVE,
1788 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1789 return 2;
1792 static int dec_add_r(DisasContext *dc)
1794 TCGv t[2];
1795 int size = memsize_zz(dc);
1796 LOG_DIS("add.%c $r%u, $r%u\n",
1797 memsize_char(size), dc->op1, dc->op2);
1798 cris_cc_mask(dc, CC_MASK_NZVC);
1799 cris_alu_alloc_temps(dc, size, t);
1800 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1802 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
1803 cris_alu_free_temps(dc, size, t);
1804 return 2;
1807 static int dec_addc_r(DisasContext *dc)
1809 LOG_DIS("addc $r%u, $r%u\n",
1810 dc->op1, dc->op2);
1811 cris_evaluate_flags(dc);
1812 /* Set for this insn. */
1813 dc->flagx_known = 1;
1814 dc->flags_x = X_FLAG;
1816 cris_cc_mask(dc, CC_MASK_NZVC);
1817 cris_alu(dc, CC_OP_ADDC,
1818 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1819 return 2;
1822 static int dec_mcp_r(DisasContext *dc)
1824 LOG_DIS("mcp $p%u, $r%u\n",
1825 dc->op2, dc->op1);
1826 cris_evaluate_flags(dc);
1827 cris_cc_mask(dc, CC_MASK_RNZV);
1828 cris_alu(dc, CC_OP_MCP,
1829 cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
1830 return 2;
1833 #if DISAS_CRIS
1834 static char * swapmode_name(int mode, char *modename) {
1835 int i = 0;
1836 if (mode & 8)
1837 modename[i++] = 'n';
1838 if (mode & 4)
1839 modename[i++] = 'w';
1840 if (mode & 2)
1841 modename[i++] = 'b';
1842 if (mode & 1)
1843 modename[i++] = 'r';
1844 modename[i++] = 0;
1845 return modename;
1847 #endif
1849 static int dec_swap_r(DisasContext *dc)
1851 TCGv t0;
1852 #if DISAS_CRIS
1853 char modename[4];
1854 #endif
1855 LOG_DIS("swap%s $r%u\n",
1856 swapmode_name(dc->op2, modename), dc->op1);
1858 cris_cc_mask(dc, CC_MASK_NZ);
1859 t0 = tcg_temp_new();
1860 t_gen_mov_TN_reg(t0, dc->op1);
1861 if (dc->op2 & 8)
1862 tcg_gen_not_tl(t0, t0);
1863 if (dc->op2 & 4)
1864 t_gen_swapw(t0, t0);
1865 if (dc->op2 & 2)
1866 t_gen_swapb(t0, t0);
1867 if (dc->op2 & 1)
1868 t_gen_swapr(t0, t0);
1869 cris_alu(dc, CC_OP_MOVE,
1870 cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
1871 tcg_temp_free(t0);
1872 return 2;
1875 static int dec_or_r(DisasContext *dc)
1877 TCGv t[2];
1878 int size = memsize_zz(dc);
1879 LOG_DIS("or.%c $r%u, $r%u\n",
1880 memsize_char(size), dc->op1, dc->op2);
1881 cris_cc_mask(dc, CC_MASK_NZ);
1882 cris_alu_alloc_temps(dc, size, t);
1883 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1884 cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
1885 cris_alu_free_temps(dc, size, t);
1886 return 2;
1889 static int dec_addi_r(DisasContext *dc)
1891 TCGv t0;
1892 LOG_DIS("addi.%c $r%u, $r%u\n",
1893 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1894 cris_cc_mask(dc, 0);
1895 t0 = tcg_temp_new();
1896 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1897 tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
1898 tcg_temp_free(t0);
1899 return 2;
1902 static int dec_addi_acr(DisasContext *dc)
1904 TCGv t0;
1905 LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1906 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1907 cris_cc_mask(dc, 0);
1908 t0 = tcg_temp_new();
1909 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1910 tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
1911 tcg_temp_free(t0);
1912 return 2;
1915 static int dec_neg_r(DisasContext *dc)
1917 TCGv t[2];
1918 int size = memsize_zz(dc);
1919 LOG_DIS("neg.%c $r%u, $r%u\n",
1920 memsize_char(size), dc->op1, dc->op2);
1921 cris_cc_mask(dc, CC_MASK_NZVC);
1922 cris_alu_alloc_temps(dc, size, t);
1923 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1925 cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
1926 cris_alu_free_temps(dc, size, t);
1927 return 2;
1930 static int dec_btst_r(DisasContext *dc)
1932 LOG_DIS("btst $r%u, $r%u\n",
1933 dc->op1, dc->op2);
1934 cris_cc_mask(dc, CC_MASK_NZ);
1935 cris_evaluate_flags(dc);
1936 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1937 cpu_R[dc->op1], cpu_PR[PR_CCS]);
1938 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
1939 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1940 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1941 dc->flags_uptodate = 1;
1942 return 2;
1945 static int dec_sub_r(DisasContext *dc)
1947 TCGv t[2];
1948 int size = memsize_zz(dc);
1949 LOG_DIS("sub.%c $r%u, $r%u\n",
1950 memsize_char(size), dc->op1, dc->op2);
1951 cris_cc_mask(dc, CC_MASK_NZVC);
1952 cris_alu_alloc_temps(dc, size, t);
1953 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1954 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
1955 cris_alu_free_temps(dc, size, t);
1956 return 2;
1959 /* Zero extension. From size to dword. */
1960 static int dec_movu_r(DisasContext *dc)
1962 TCGv t0;
1963 int size = memsize_z(dc);
1964 LOG_DIS("movu.%c $r%u, $r%u\n",
1965 memsize_char(size),
1966 dc->op1, dc->op2);
1968 cris_cc_mask(dc, CC_MASK_NZ);
1969 t0 = tcg_temp_new();
1970 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1971 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1972 tcg_temp_free(t0);
1973 return 2;
1976 /* Sign extension. From size to dword. */
1977 static int dec_movs_r(DisasContext *dc)
1979 TCGv t0;
1980 int size = memsize_z(dc);
1981 LOG_DIS("movs.%c $r%u, $r%u\n",
1982 memsize_char(size),
1983 dc->op1, dc->op2);
1985 cris_cc_mask(dc, CC_MASK_NZ);
1986 t0 = tcg_temp_new();
1987 /* Size can only be qi or hi. */
1988 t_gen_sext(t0, cpu_R[dc->op1], size);
1989 cris_alu(dc, CC_OP_MOVE,
1990 cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
1991 tcg_temp_free(t0);
1992 return 2;
1995 /* zero extension. From size to dword. */
1996 static int dec_addu_r(DisasContext *dc)
1998 TCGv t0;
1999 int size = memsize_z(dc);
2000 LOG_DIS("addu.%c $r%u, $r%u\n",
2001 memsize_char(size),
2002 dc->op1, dc->op2);
2004 cris_cc_mask(dc, CC_MASK_NZVC);
2005 t0 = tcg_temp_new();
2006 /* Size can only be qi or hi. */
2007 t_gen_zext(t0, cpu_R[dc->op1], size);
2008 cris_alu(dc, CC_OP_ADD,
2009 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2010 tcg_temp_free(t0);
2011 return 2;
2014 /* Sign extension. From size to dword. */
2015 static int dec_adds_r(DisasContext *dc)
2017 TCGv t0;
2018 int size = memsize_z(dc);
2019 LOG_DIS("adds.%c $r%u, $r%u\n",
2020 memsize_char(size),
2021 dc->op1, dc->op2);
2023 cris_cc_mask(dc, CC_MASK_NZVC);
2024 t0 = tcg_temp_new();
2025 /* Size can only be qi or hi. */
2026 t_gen_sext(t0, cpu_R[dc->op1], size);
2027 cris_alu(dc, CC_OP_ADD,
2028 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2029 tcg_temp_free(t0);
2030 return 2;
2033 /* Zero extension. From size to dword. */
2034 static int dec_subu_r(DisasContext *dc)
2036 TCGv t0;
2037 int size = memsize_z(dc);
2038 LOG_DIS("subu.%c $r%u, $r%u\n",
2039 memsize_char(size),
2040 dc->op1, dc->op2);
2042 cris_cc_mask(dc, CC_MASK_NZVC);
2043 t0 = tcg_temp_new();
2044 /* Size can only be qi or hi. */
2045 t_gen_zext(t0, cpu_R[dc->op1], size);
2046 cris_alu(dc, CC_OP_SUB,
2047 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2048 tcg_temp_free(t0);
2049 return 2;
2052 /* Sign extension. From size to dword. */
2053 static int dec_subs_r(DisasContext *dc)
2055 TCGv t0;
2056 int size = memsize_z(dc);
2057 LOG_DIS("subs.%c $r%u, $r%u\n",
2058 memsize_char(size),
2059 dc->op1, dc->op2);
2061 cris_cc_mask(dc, CC_MASK_NZVC);
2062 t0 = tcg_temp_new();
2063 /* Size can only be qi or hi. */
2064 t_gen_sext(t0, cpu_R[dc->op1], size);
2065 cris_alu(dc, CC_OP_SUB,
2066 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2067 tcg_temp_free(t0);
2068 return 2;
2071 static int dec_setclrf(DisasContext *dc)
2073 uint32_t flags;
2074 int set = (~dc->opcode >> 2) & 1;
2077 flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
2078 | EXTRACT_FIELD(dc->ir, 0, 3);
2079 if (set && flags == 0) {
2080 LOG_DIS("nop\n");
2081 return 2;
2082 } else if (!set && (flags & 0x20)) {
2083 LOG_DIS("di\n");
2085 else {
2086 LOG_DIS("%sf %x\n",
2087 set ? "set" : "clr",
2088 flags);
2091 /* User space is not allowed to touch these. Silently ignore. */
2092 if (dc->tb_flags & U_FLAG) {
2093 flags &= ~(S_FLAG | I_FLAG | U_FLAG);
2096 if (flags & X_FLAG) {
2097 dc->flagx_known = 1;
2098 if (set)
2099 dc->flags_x = X_FLAG;
2100 else
2101 dc->flags_x = 0;
2104 /* Break the TB if any of the SPI flag changes. */
2105 if (flags & (P_FLAG | S_FLAG)) {
2106 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2107 dc->is_jmp = DISAS_UPDATE;
2108 dc->cpustate_changed = 1;
2111 /* For the I flag, only act on posedge. */
2112 if ((flags & I_FLAG)) {
2113 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2114 dc->is_jmp = DISAS_UPDATE;
2115 dc->cpustate_changed = 1;
2119 /* Simply decode the flags. */
2120 cris_evaluate_flags (dc);
2121 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2122 cris_update_cc_x(dc);
2123 tcg_gen_movi_tl(cc_op, dc->cc_op);
2125 if (set) {
2126 if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
2127 /* Enter user mode. */
2128 t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
2129 tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
2130 dc->cpustate_changed = 1;
2132 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
2134 else
2135 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
2137 dc->flags_uptodate = 1;
2138 dc->clear_x = 0;
2139 return 2;
2142 static int dec_move_rs(DisasContext *dc)
2144 LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
2145 cris_cc_mask(dc, 0);
2146 gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
2147 return 2;
2149 static int dec_move_sr(DisasContext *dc)
2151 LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
2152 cris_cc_mask(dc, 0);
2153 gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
2154 return 2;
2157 static int dec_move_rp(DisasContext *dc)
2159 TCGv t[2];
2160 LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
2161 cris_cc_mask(dc, 0);
2163 t[0] = tcg_temp_new();
2164 if (dc->op2 == PR_CCS) {
2165 cris_evaluate_flags(dc);
2166 t_gen_mov_TN_reg(t[0], dc->op1);
2167 if (dc->tb_flags & U_FLAG) {
2168 t[1] = tcg_temp_new();
2169 /* User space is not allowed to touch all flags. */
2170 tcg_gen_andi_tl(t[0], t[0], 0x39f);
2171 tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
2172 tcg_gen_or_tl(t[0], t[1], t[0]);
2173 tcg_temp_free(t[1]);
2176 else
2177 t_gen_mov_TN_reg(t[0], dc->op1);
2179 t_gen_mov_preg_TN(dc, dc->op2, t[0]);
2180 if (dc->op2 == PR_CCS) {
2181 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2182 dc->flags_uptodate = 1;
2184 tcg_temp_free(t[0]);
2185 return 2;
2187 static int dec_move_pr(DisasContext *dc)
2189 TCGv t0;
2190 LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
2191 cris_cc_mask(dc, 0);
2193 if (dc->op2 == PR_CCS)
2194 cris_evaluate_flags(dc);
2196 if (dc->op2 == PR_DZ) {
2197 tcg_gen_movi_tl(cpu_R[dc->op1], 0);
2198 } else {
2199 t0 = tcg_temp_new();
2200 t_gen_mov_TN_preg(t0, dc->op2);
2201 cris_alu(dc, CC_OP_MOVE,
2202 cpu_R[dc->op1], cpu_R[dc->op1], t0,
2203 preg_sizes[dc->op2]);
2204 tcg_temp_free(t0);
2206 return 2;
2209 static int dec_move_mr(DisasContext *dc)
2211 int memsize = memsize_zz(dc);
2212 int insn_len;
2213 LOG_DIS("move.%c [$r%u%s, $r%u\n",
2214 memsize_char(memsize),
2215 dc->op1, dc->postinc ? "+]" : "]",
2216 dc->op2);
2218 if (memsize == 4) {
2219 insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
2220 cris_cc_mask(dc, CC_MASK_NZ);
2221 cris_update_cc_op(dc, CC_OP_MOVE, 4);
2222 cris_update_cc_x(dc);
2223 cris_update_result(dc, cpu_R[dc->op2]);
2225 else {
2226 TCGv t0;
2228 t0 = tcg_temp_new();
2229 insn_len = dec_prep_move_m(dc, 0, memsize, t0);
2230 cris_cc_mask(dc, CC_MASK_NZ);
2231 cris_alu(dc, CC_OP_MOVE,
2232 cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
2233 tcg_temp_free(t0);
2235 do_postinc(dc, memsize);
2236 return insn_len;
2239 static inline void cris_alu_m_alloc_temps(TCGv *t)
2241 t[0] = tcg_temp_new();
2242 t[1] = tcg_temp_new();
2245 static inline void cris_alu_m_free_temps(TCGv *t)
2247 tcg_temp_free(t[0]);
2248 tcg_temp_free(t[1]);
2251 static int dec_movs_m(DisasContext *dc)
2253 TCGv t[2];
2254 int memsize = memsize_z(dc);
2255 int insn_len;
2256 LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2257 memsize_char(memsize),
2258 dc->op1, dc->postinc ? "+]" : "]",
2259 dc->op2);
2261 cris_alu_m_alloc_temps(t);
2262 /* sign extend. */
2263 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2264 cris_cc_mask(dc, CC_MASK_NZ);
2265 cris_alu(dc, CC_OP_MOVE,
2266 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2267 do_postinc(dc, memsize);
2268 cris_alu_m_free_temps(t);
2269 return insn_len;
2272 static int dec_addu_m(DisasContext *dc)
2274 TCGv t[2];
2275 int memsize = memsize_z(dc);
2276 int insn_len;
2277 LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2278 memsize_char(memsize),
2279 dc->op1, dc->postinc ? "+]" : "]",
2280 dc->op2);
2282 cris_alu_m_alloc_temps(t);
2283 /* sign extend. */
2284 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2285 cris_cc_mask(dc, CC_MASK_NZVC);
2286 cris_alu(dc, CC_OP_ADD,
2287 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2288 do_postinc(dc, memsize);
2289 cris_alu_m_free_temps(t);
2290 return insn_len;
2293 static int dec_adds_m(DisasContext *dc)
2295 TCGv t[2];
2296 int memsize = memsize_z(dc);
2297 int insn_len;
2298 LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2299 memsize_char(memsize),
2300 dc->op1, dc->postinc ? "+]" : "]",
2301 dc->op2);
2303 cris_alu_m_alloc_temps(t);
2304 /* sign extend. */
2305 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2306 cris_cc_mask(dc, CC_MASK_NZVC);
2307 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2308 do_postinc(dc, memsize);
2309 cris_alu_m_free_temps(t);
2310 return insn_len;
2313 static int dec_subu_m(DisasContext *dc)
2315 TCGv t[2];
2316 int memsize = memsize_z(dc);
2317 int insn_len;
2318 LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2319 memsize_char(memsize),
2320 dc->op1, dc->postinc ? "+]" : "]",
2321 dc->op2);
2323 cris_alu_m_alloc_temps(t);
2324 /* sign extend. */
2325 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2326 cris_cc_mask(dc, CC_MASK_NZVC);
2327 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2328 do_postinc(dc, memsize);
2329 cris_alu_m_free_temps(t);
2330 return insn_len;
2333 static int dec_subs_m(DisasContext *dc)
2335 TCGv t[2];
2336 int memsize = memsize_z(dc);
2337 int insn_len;
2338 LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2339 memsize_char(memsize),
2340 dc->op1, dc->postinc ? "+]" : "]",
2341 dc->op2);
2343 cris_alu_m_alloc_temps(t);
2344 /* sign extend. */
2345 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2346 cris_cc_mask(dc, CC_MASK_NZVC);
2347 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2348 do_postinc(dc, memsize);
2349 cris_alu_m_free_temps(t);
2350 return insn_len;
2353 static int dec_movu_m(DisasContext *dc)
2355 TCGv t[2];
2356 int memsize = memsize_z(dc);
2357 int insn_len;
2359 LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2360 memsize_char(memsize),
2361 dc->op1, dc->postinc ? "+]" : "]",
2362 dc->op2);
2364 cris_alu_m_alloc_temps(t);
2365 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2366 cris_cc_mask(dc, CC_MASK_NZ);
2367 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2368 do_postinc(dc, memsize);
2369 cris_alu_m_free_temps(t);
2370 return insn_len;
2373 static int dec_cmpu_m(DisasContext *dc)
2375 TCGv t[2];
2376 int memsize = memsize_z(dc);
2377 int insn_len;
2378 LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2379 memsize_char(memsize),
2380 dc->op1, dc->postinc ? "+]" : "]",
2381 dc->op2);
2383 cris_alu_m_alloc_temps(t);
2384 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2385 cris_cc_mask(dc, CC_MASK_NZVC);
2386 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2387 do_postinc(dc, memsize);
2388 cris_alu_m_free_temps(t);
2389 return insn_len;
2392 static int dec_cmps_m(DisasContext *dc)
2394 TCGv t[2];
2395 int memsize = memsize_z(dc);
2396 int insn_len;
2397 LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2398 memsize_char(memsize),
2399 dc->op1, dc->postinc ? "+]" : "]",
2400 dc->op2);
2402 cris_alu_m_alloc_temps(t);
2403 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2404 cris_cc_mask(dc, CC_MASK_NZVC);
2405 cris_alu(dc, CC_OP_CMP,
2406 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2407 memsize_zz(dc));
2408 do_postinc(dc, memsize);
2409 cris_alu_m_free_temps(t);
2410 return insn_len;
2413 static int dec_cmp_m(DisasContext *dc)
2415 TCGv t[2];
2416 int memsize = memsize_zz(dc);
2417 int insn_len;
2418 LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2419 memsize_char(memsize),
2420 dc->op1, dc->postinc ? "+]" : "]",
2421 dc->op2);
2423 cris_alu_m_alloc_temps(t);
2424 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2425 cris_cc_mask(dc, CC_MASK_NZVC);
2426 cris_alu(dc, CC_OP_CMP,
2427 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2428 memsize_zz(dc));
2429 do_postinc(dc, memsize);
2430 cris_alu_m_free_temps(t);
2431 return insn_len;
2434 static int dec_test_m(DisasContext *dc)
2436 TCGv t[2];
2437 int memsize = memsize_zz(dc);
2438 int insn_len;
2439 LOG_DIS("test.%c [$r%u%s] op2=%x\n",
2440 memsize_char(memsize),
2441 dc->op1, dc->postinc ? "+]" : "]",
2442 dc->op2);
2444 cris_evaluate_flags(dc);
2446 cris_alu_m_alloc_temps(t);
2447 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2448 cris_cc_mask(dc, CC_MASK_NZ);
2449 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
2451 cris_alu(dc, CC_OP_CMP,
2452 cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
2453 do_postinc(dc, memsize);
2454 cris_alu_m_free_temps(t);
2455 return insn_len;
2458 static int dec_and_m(DisasContext *dc)
2460 TCGv t[2];
2461 int memsize = memsize_zz(dc);
2462 int insn_len;
2463 LOG_DIS("and.%c [$r%u%s, $r%u\n",
2464 memsize_char(memsize),
2465 dc->op1, dc->postinc ? "+]" : "]",
2466 dc->op2);
2468 cris_alu_m_alloc_temps(t);
2469 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2470 cris_cc_mask(dc, CC_MASK_NZ);
2471 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2472 do_postinc(dc, memsize);
2473 cris_alu_m_free_temps(t);
2474 return insn_len;
2477 static int dec_add_m(DisasContext *dc)
2479 TCGv t[2];
2480 int memsize = memsize_zz(dc);
2481 int insn_len;
2482 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2483 memsize_char(memsize),
2484 dc->op1, dc->postinc ? "+]" : "]",
2485 dc->op2);
2487 cris_alu_m_alloc_temps(t);
2488 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2489 cris_cc_mask(dc, CC_MASK_NZVC);
2490 cris_alu(dc, CC_OP_ADD,
2491 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2492 do_postinc(dc, memsize);
2493 cris_alu_m_free_temps(t);
2494 return insn_len;
2497 static int dec_addo_m(DisasContext *dc)
2499 TCGv t[2];
2500 int memsize = memsize_zz(dc);
2501 int insn_len;
2502 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2503 memsize_char(memsize),
2504 dc->op1, dc->postinc ? "+]" : "]",
2505 dc->op2);
2507 cris_alu_m_alloc_temps(t);
2508 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2509 cris_cc_mask(dc, 0);
2510 cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
2511 do_postinc(dc, memsize);
2512 cris_alu_m_free_temps(t);
2513 return insn_len;
2516 static int dec_bound_m(DisasContext *dc)
2518 TCGv l[2];
2519 int memsize = memsize_zz(dc);
2520 int insn_len;
2521 LOG_DIS("bound.%c [$r%u%s, $r%u\n",
2522 memsize_char(memsize),
2523 dc->op1, dc->postinc ? "+]" : "]",
2524 dc->op2);
2526 l[0] = tcg_temp_local_new();
2527 l[1] = tcg_temp_local_new();
2528 insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
2529 cris_cc_mask(dc, CC_MASK_NZ);
2530 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
2531 do_postinc(dc, memsize);
2532 tcg_temp_free(l[0]);
2533 tcg_temp_free(l[1]);
2534 return insn_len;
2537 static int dec_addc_mr(DisasContext *dc)
2539 TCGv t[2];
2540 int insn_len = 2;
2541 LOG_DIS("addc [$r%u%s, $r%u\n",
2542 dc->op1, dc->postinc ? "+]" : "]",
2543 dc->op2);
2545 cris_evaluate_flags(dc);
2547 /* Set for this insn. */
2548 dc->flagx_known = 1;
2549 dc->flags_x = X_FLAG;
2551 cris_alu_m_alloc_temps(t);
2552 insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
2553 cris_cc_mask(dc, CC_MASK_NZVC);
2554 cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
2555 do_postinc(dc, 4);
2556 cris_alu_m_free_temps(t);
2557 return insn_len;
2560 static int dec_sub_m(DisasContext *dc)
2562 TCGv t[2];
2563 int memsize = memsize_zz(dc);
2564 int insn_len;
2565 LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2566 memsize_char(memsize),
2567 dc->op1, dc->postinc ? "+]" : "]",
2568 dc->op2, dc->ir, dc->zzsize);
2570 cris_alu_m_alloc_temps(t);
2571 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2572 cris_cc_mask(dc, CC_MASK_NZVC);
2573 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
2574 do_postinc(dc, memsize);
2575 cris_alu_m_free_temps(t);
2576 return insn_len;
2579 static int dec_or_m(DisasContext *dc)
2581 TCGv t[2];
2582 int memsize = memsize_zz(dc);
2583 int insn_len;
2584 LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
2585 memsize_char(memsize),
2586 dc->op1, dc->postinc ? "+]" : "]",
2587 dc->op2, dc->pc);
2589 cris_alu_m_alloc_temps(t);
2590 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2591 cris_cc_mask(dc, CC_MASK_NZ);
2592 cris_alu(dc, CC_OP_OR,
2593 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2594 do_postinc(dc, memsize);
2595 cris_alu_m_free_temps(t);
2596 return insn_len;
2599 static int dec_move_mp(DisasContext *dc)
2601 TCGv t[2];
2602 int memsize = memsize_zz(dc);
2603 int insn_len = 2;
2605 LOG_DIS("move.%c [$r%u%s, $p%u\n",
2606 memsize_char(memsize),
2607 dc->op1,
2608 dc->postinc ? "+]" : "]",
2609 dc->op2);
2611 cris_alu_m_alloc_temps(t);
2612 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2613 cris_cc_mask(dc, 0);
2614 if (dc->op2 == PR_CCS) {
2615 cris_evaluate_flags(dc);
2616 if (dc->tb_flags & U_FLAG) {
2617 /* User space is not allowed to touch all flags. */
2618 tcg_gen_andi_tl(t[1], t[1], 0x39f);
2619 tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
2620 tcg_gen_or_tl(t[1], t[0], t[1]);
2624 t_gen_mov_preg_TN(dc, dc->op2, t[1]);
2626 do_postinc(dc, memsize);
2627 cris_alu_m_free_temps(t);
2628 return insn_len;
2631 static int dec_move_pm(DisasContext *dc)
2633 TCGv t0;
2634 int memsize;
2636 memsize = preg_sizes[dc->op2];
2638 LOG_DIS("move.%c $p%u, [$r%u%s\n",
2639 memsize_char(memsize),
2640 dc->op2, dc->op1, dc->postinc ? "+]" : "]");
2642 /* prepare store. Address in T0, value in T1. */
2643 if (dc->op2 == PR_CCS)
2644 cris_evaluate_flags(dc);
2645 t0 = tcg_temp_new();
2646 t_gen_mov_TN_preg(t0, dc->op2);
2647 cris_flush_cc_state(dc);
2648 gen_store(dc, cpu_R[dc->op1], t0, memsize);
2649 tcg_temp_free(t0);
2651 cris_cc_mask(dc, 0);
2652 if (dc->postinc)
2653 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2654 return 2;
2657 static int dec_movem_mr(DisasContext *dc)
2659 TCGv_i64 tmp[16];
2660 TCGv tmp32;
2661 TCGv addr;
2662 int i;
2663 int nr = dc->op2 + 1;
2665 LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
2666 dc->postinc ? "+]" : "]", dc->op2);
2668 addr = tcg_temp_new();
2669 /* There are probably better ways of doing this. */
2670 cris_flush_cc_state(dc);
2671 for (i = 0; i < (nr >> 1); i++) {
2672 tmp[i] = tcg_temp_new_i64();
2673 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2674 gen_load64(dc, tmp[i], addr);
2676 if (nr & 1) {
2677 tmp32 = tcg_temp_new_i32();
2678 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2679 gen_load(dc, tmp32, addr, 4, 0);
2680 } else
2681 TCGV_UNUSED(tmp32);
2682 tcg_temp_free(addr);
2684 for (i = 0; i < (nr >> 1); i++) {
2685 tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
2686 tcg_gen_shri_i64(tmp[i], tmp[i], 32);
2687 tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
2688 tcg_temp_free_i64(tmp[i]);
2690 if (nr & 1) {
2691 tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
2692 tcg_temp_free(tmp32);
2695 /* writeback the updated pointer value. */
2696 if (dc->postinc)
2697 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
2699 /* gen_load might want to evaluate the previous insns flags. */
2700 cris_cc_mask(dc, 0);
2701 return 2;
2704 static int dec_movem_rm(DisasContext *dc)
2706 TCGv tmp;
2707 TCGv addr;
2708 int i;
2710 LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
2711 dc->postinc ? "+]" : "]");
2713 cris_flush_cc_state(dc);
2715 tmp = tcg_temp_new();
2716 addr = tcg_temp_new();
2717 tcg_gen_movi_tl(tmp, 4);
2718 tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
2719 for (i = 0; i <= dc->op2; i++) {
2720 /* Displace addr. */
2721 /* Perform the store. */
2722 gen_store(dc, addr, cpu_R[i], 4);
2723 tcg_gen_add_tl(addr, addr, tmp);
2725 if (dc->postinc)
2726 tcg_gen_mov_tl(cpu_R[dc->op1], addr);
2727 cris_cc_mask(dc, 0);
2728 tcg_temp_free(tmp);
2729 tcg_temp_free(addr);
2730 return 2;
2733 static int dec_move_rm(DisasContext *dc)
2735 int memsize;
2737 memsize = memsize_zz(dc);
2739 LOG_DIS("move.%c $r%u, [$r%u]\n",
2740 memsize_char(memsize), dc->op2, dc->op1);
2742 /* prepare store. */
2743 cris_flush_cc_state(dc);
2744 gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
2746 if (dc->postinc)
2747 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2748 cris_cc_mask(dc, 0);
2749 return 2;
2752 static int dec_lapcq(DisasContext *dc)
2754 LOG_DIS("lapcq %x, $r%u\n",
2755 dc->pc + dc->op1*2, dc->op2);
2756 cris_cc_mask(dc, 0);
2757 tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
2758 return 2;
2761 static int dec_lapc_im(DisasContext *dc)
2763 unsigned int rd;
2764 int32_t imm;
2765 int32_t pc;
2767 rd = dc->op2;
2769 cris_cc_mask(dc, 0);
2770 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2771 LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
2773 pc = dc->pc;
2774 pc += imm;
2775 tcg_gen_movi_tl(cpu_R[rd], pc);
2776 return 6;
2779 /* Jump to special reg. */
2780 static int dec_jump_p(DisasContext *dc)
2782 LOG_DIS("jump $p%u\n", dc->op2);
2784 if (dc->op2 == PR_CCS)
2785 cris_evaluate_flags(dc);
2786 t_gen_mov_TN_preg(env_btarget, dc->op2);
2787 /* rete will often have low bit set to indicate delayslot. */
2788 tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
2789 cris_cc_mask(dc, 0);
2790 cris_prepare_jmp(dc, JMP_INDIRECT);
2791 return 2;
2794 /* Jump and save. */
2795 static int dec_jas_r(DisasContext *dc)
2797 LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
2798 cris_cc_mask(dc, 0);
2799 /* Store the return address in Pd. */
2800 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2801 if (dc->op2 > 15)
2802 abort();
2803 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
2805 cris_prepare_jmp(dc, JMP_INDIRECT);
2806 return 2;
2809 static int dec_jas_im(DisasContext *dc)
2811 uint32_t imm;
2813 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2815 LOG_DIS("jas 0x%x\n", imm);
2816 cris_cc_mask(dc, 0);
2817 /* Store the return address in Pd. */
2818 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2820 dc->jmp_pc = imm;
2821 cris_prepare_jmp(dc, JMP_DIRECT);
2822 return 6;
2825 static int dec_jasc_im(DisasContext *dc)
2827 uint32_t imm;
2829 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2831 LOG_DIS("jasc 0x%x\n", imm);
2832 cris_cc_mask(dc, 0);
2833 /* Store the return address in Pd. */
2834 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
2836 dc->jmp_pc = imm;
2837 cris_prepare_jmp(dc, JMP_DIRECT);
2838 return 6;
2841 static int dec_jasc_r(DisasContext *dc)
2843 LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
2844 cris_cc_mask(dc, 0);
2845 /* Store the return address in Pd. */
2846 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2847 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
2848 cris_prepare_jmp(dc, JMP_INDIRECT);
2849 return 2;
2852 static int dec_bcc_im(DisasContext *dc)
2854 int32_t offset;
2855 uint32_t cond = dc->op2;
2857 offset = cris_fetch(dc, dc->pc + 2, 2, 1);
2859 LOG_DIS("b%s %d pc=%x dst=%x\n",
2860 cc_name(cond), offset,
2861 dc->pc, dc->pc + offset);
2863 cris_cc_mask(dc, 0);
2864 /* op2 holds the condition-code. */
2865 cris_prepare_cc_branch (dc, offset, cond);
2866 return 4;
2869 static int dec_bas_im(DisasContext *dc)
2871 int32_t simm;
2874 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2876 LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2877 cris_cc_mask(dc, 0);
2878 /* Store the return address in Pd. */
2879 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2881 dc->jmp_pc = dc->pc + simm;
2882 cris_prepare_jmp(dc, JMP_DIRECT);
2883 return 6;
2886 static int dec_basc_im(DisasContext *dc)
2888 int32_t simm;
2889 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2891 LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2892 cris_cc_mask(dc, 0);
2893 /* Store the return address in Pd. */
2894 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
2896 dc->jmp_pc = dc->pc + simm;
2897 cris_prepare_jmp(dc, JMP_DIRECT);
2898 return 6;
2901 static int dec_rfe_etc(DisasContext *dc)
2903 cris_cc_mask(dc, 0);
2905 if (dc->op2 == 15) {
2906 t_gen_mov_env_TN(halted, tcg_const_tl(1));
2907 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2908 t_gen_raise_exception(EXCP_HLT);
2909 return 2;
2912 switch (dc->op2 & 7) {
2913 case 2:
2914 /* rfe. */
2915 LOG_DIS("rfe\n");
2916 cris_evaluate_flags(dc);
2917 gen_helper_rfe();
2918 dc->is_jmp = DISAS_UPDATE;
2919 break;
2920 case 5:
2921 /* rfn. */
2922 LOG_DIS("rfn\n");
2923 cris_evaluate_flags(dc);
2924 gen_helper_rfn();
2925 dc->is_jmp = DISAS_UPDATE;
2926 break;
2927 case 6:
2928 LOG_DIS("break %d\n", dc->op1);
2929 cris_evaluate_flags (dc);
2930 /* break. */
2931 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2933 /* Breaks start at 16 in the exception vector. */
2934 t_gen_mov_env_TN(trap_vector,
2935 tcg_const_tl(dc->op1 + 16));
2936 t_gen_raise_exception(EXCP_BREAK);
2937 dc->is_jmp = DISAS_UPDATE;
2938 break;
2939 default:
2940 printf ("op2=%x\n", dc->op2);
2941 BUG();
2942 break;
2945 return 2;
2948 static int dec_ftag_fidx_d_m(DisasContext *dc)
2950 return 2;
2953 static int dec_ftag_fidx_i_m(DisasContext *dc)
2955 return 2;
2958 static int dec_null(DisasContext *dc)
2960 printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2961 dc->pc, dc->opcode, dc->op1, dc->op2);
2962 fflush(NULL);
2963 BUG();
2964 return 2;
2967 static struct decoder_info {
2968 struct {
2969 uint32_t bits;
2970 uint32_t mask;
2972 int (*dec)(DisasContext *dc);
2973 } decinfo[] = {
2974 /* Order matters here. */
2975 {DEC_MOVEQ, dec_moveq},
2976 {DEC_BTSTQ, dec_btstq},
2977 {DEC_CMPQ, dec_cmpq},
2978 {DEC_ADDOQ, dec_addoq},
2979 {DEC_ADDQ, dec_addq},
2980 {DEC_SUBQ, dec_subq},
2981 {DEC_ANDQ, dec_andq},
2982 {DEC_ORQ, dec_orq},
2983 {DEC_ASRQ, dec_asrq},
2984 {DEC_LSLQ, dec_lslq},
2985 {DEC_LSRQ, dec_lsrq},
2986 {DEC_BCCQ, dec_bccq},
2988 {DEC_BCC_IM, dec_bcc_im},
2989 {DEC_JAS_IM, dec_jas_im},
2990 {DEC_JAS_R, dec_jas_r},
2991 {DEC_JASC_IM, dec_jasc_im},
2992 {DEC_JASC_R, dec_jasc_r},
2993 {DEC_BAS_IM, dec_bas_im},
2994 {DEC_BASC_IM, dec_basc_im},
2995 {DEC_JUMP_P, dec_jump_p},
2996 {DEC_LAPC_IM, dec_lapc_im},
2997 {DEC_LAPCQ, dec_lapcq},
2999 {DEC_RFE_ETC, dec_rfe_etc},
3000 {DEC_ADDC_MR, dec_addc_mr},
3002 {DEC_MOVE_MP, dec_move_mp},
3003 {DEC_MOVE_PM, dec_move_pm},
3004 {DEC_MOVEM_MR, dec_movem_mr},
3005 {DEC_MOVEM_RM, dec_movem_rm},
3006 {DEC_MOVE_PR, dec_move_pr},
3007 {DEC_SCC_R, dec_scc_r},
3008 {DEC_SETF, dec_setclrf},
3009 {DEC_CLEARF, dec_setclrf},
3011 {DEC_MOVE_SR, dec_move_sr},
3012 {DEC_MOVE_RP, dec_move_rp},
3013 {DEC_SWAP_R, dec_swap_r},
3014 {DEC_ABS_R, dec_abs_r},
3015 {DEC_LZ_R, dec_lz_r},
3016 {DEC_MOVE_RS, dec_move_rs},
3017 {DEC_BTST_R, dec_btst_r},
3018 {DEC_ADDC_R, dec_addc_r},
3020 {DEC_DSTEP_R, dec_dstep_r},
3021 {DEC_XOR_R, dec_xor_r},
3022 {DEC_MCP_R, dec_mcp_r},
3023 {DEC_CMP_R, dec_cmp_r},
3025 {DEC_ADDI_R, dec_addi_r},
3026 {DEC_ADDI_ACR, dec_addi_acr},
3028 {DEC_ADD_R, dec_add_r},
3029 {DEC_SUB_R, dec_sub_r},
3031 {DEC_ADDU_R, dec_addu_r},
3032 {DEC_ADDS_R, dec_adds_r},
3033 {DEC_SUBU_R, dec_subu_r},
3034 {DEC_SUBS_R, dec_subs_r},
3035 {DEC_LSL_R, dec_lsl_r},
3037 {DEC_AND_R, dec_and_r},
3038 {DEC_OR_R, dec_or_r},
3039 {DEC_BOUND_R, dec_bound_r},
3040 {DEC_ASR_R, dec_asr_r},
3041 {DEC_LSR_R, dec_lsr_r},
3043 {DEC_MOVU_R, dec_movu_r},
3044 {DEC_MOVS_R, dec_movs_r},
3045 {DEC_NEG_R, dec_neg_r},
3046 {DEC_MOVE_R, dec_move_r},
3048 {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
3049 {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
3051 {DEC_MULS_R, dec_muls_r},
3052 {DEC_MULU_R, dec_mulu_r},
3054 {DEC_ADDU_M, dec_addu_m},
3055 {DEC_ADDS_M, dec_adds_m},
3056 {DEC_SUBU_M, dec_subu_m},
3057 {DEC_SUBS_M, dec_subs_m},
3059 {DEC_CMPU_M, dec_cmpu_m},
3060 {DEC_CMPS_M, dec_cmps_m},
3061 {DEC_MOVU_M, dec_movu_m},
3062 {DEC_MOVS_M, dec_movs_m},
3064 {DEC_CMP_M, dec_cmp_m},
3065 {DEC_ADDO_M, dec_addo_m},
3066 {DEC_BOUND_M, dec_bound_m},
3067 {DEC_ADD_M, dec_add_m},
3068 {DEC_SUB_M, dec_sub_m},
3069 {DEC_AND_M, dec_and_m},
3070 {DEC_OR_M, dec_or_m},
3071 {DEC_MOVE_RM, dec_move_rm},
3072 {DEC_TEST_M, dec_test_m},
3073 {DEC_MOVE_MR, dec_move_mr},
3075 {{0, 0}, dec_null}
3078 static unsigned int crisv32_decoder(DisasContext *dc)
3080 int insn_len = 2;
3081 int i;
3083 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3084 tcg_gen_debug_insn_start(dc->pc);
3086 /* Load a halfword onto the instruction register. */
3087 dc->ir = cris_fetch(dc, dc->pc, 2, 0);
3089 /* Now decode it. */
3090 dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
3091 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
3092 dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
3093 dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
3094 dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
3095 dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
3097 /* Large switch for all insns. */
3098 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
3099 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
3101 insn_len = decinfo[i].dec(dc);
3102 break;
3106 #if !defined(CONFIG_USER_ONLY)
3107 /* Single-stepping ? */
3108 if (dc->tb_flags & S_FLAG) {
3109 int l1;
3111 l1 = gen_new_label();
3112 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
3113 /* We treat SPC as a break with an odd trap vector. */
3114 cris_evaluate_flags (dc);
3115 t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
3116 tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
3117 tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
3118 t_gen_raise_exception(EXCP_BREAK);
3119 gen_set_label(l1);
3121 #endif
3122 return insn_len;
3125 static void check_breakpoint(CPUState *env, DisasContext *dc)
3127 CPUBreakpoint *bp;
3129 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3130 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3131 if (bp->pc == dc->pc) {
3132 cris_evaluate_flags (dc);
3133 tcg_gen_movi_tl(env_pc, dc->pc);
3134 t_gen_raise_exception(EXCP_DEBUG);
3135 dc->is_jmp = DISAS_UPDATE;
3141 #include "translate_v10.c"
3144 * Delay slots on QEMU/CRIS.
3146 * If an exception hits on a delayslot, the core will let ERP (the Exception
3147 * Return Pointer) point to the branch (the previous) insn and set the lsb to
3148 * to give SW a hint that the exception actually hit on the dslot.
3150 * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3151 * the core and any jmp to an odd addresses will mask off that lsb. It is
3152 * simply there to let sw know there was an exception on a dslot.
3154 * When the software returns from an exception, the branch will re-execute.
3155 * On QEMU care needs to be taken when a branch+delayslot sequence is broken
3156 * and the branch and delayslot dont share pages.
3158 * The TB contaning the branch insn will set up env->btarget and evaluate
3159 * env->btaken. When the translation loop exits we will note that the branch
3160 * sequence is broken and let env->dslot be the size of the branch insn (those
3161 * vary in length).
3163 * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3164 * set). It will also expect to have env->dslot setup with the size of the
3165 * delay slot so that env->pc - env->dslot point to the branch insn. This TB
3166 * will execute the dslot and take the branch, either to btarget or just one
3167 * insn ahead.
3169 * When exceptions occur, we check for env->dslot in do_interrupt to detect
3170 * broken branch sequences and setup $erp accordingly (i.e let it point to the
3171 * branch and set lsb). Then env->dslot gets cleared so that the exception
3172 * handler can enter. When returning from exceptions (jump $erp) the lsb gets
3173 * masked off and we will reexecute the branch insn.
3177 /* generate intermediate code for basic block 'tb'. */
3178 static void
3179 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
3180 int search_pc)
3182 uint16_t *gen_opc_end;
3183 uint32_t pc_start;
3184 unsigned int insn_len;
3185 int j, lj;
3186 struct DisasContext ctx;
3187 struct DisasContext *dc = &ctx;
3188 uint32_t next_page_start;
3189 target_ulong npc;
3190 int num_insns;
3191 int max_insns;
3193 qemu_log_try_set_file(stderr);
3195 if (env->pregs[PR_VR] == 32) {
3196 dc->decoder = crisv32_decoder;
3197 dc->clear_locked_irq = 0;
3198 } else {
3199 dc->decoder = crisv10_decoder;
3200 dc->clear_locked_irq = 1;
3203 /* Odd PC indicates that branch is rexecuting due to exception in the
3204 * delayslot, like in real hw.
3206 pc_start = tb->pc & ~1;
3207 dc->env = env;
3208 dc->tb = tb;
3210 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3212 dc->is_jmp = DISAS_NEXT;
3213 dc->ppc = pc_start;
3214 dc->pc = pc_start;
3215 dc->singlestep_enabled = env->singlestep_enabled;
3216 dc->flags_uptodate = 1;
3217 dc->flagx_known = 1;
3218 dc->flags_x = tb->flags & X_FLAG;
3219 dc->cc_x_uptodate = 0;
3220 dc->cc_mask = 0;
3221 dc->update_cc = 0;
3222 dc->clear_prefix = 0;
3224 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
3225 dc->cc_size_uptodate = -1;
3227 /* Decode TB flags. */
3228 dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
3229 | X_FLAG | PFIX_FLAG);
3230 dc->delayed_branch = !!(tb->flags & 7);
3231 if (dc->delayed_branch)
3232 dc->jmp = JMP_INDIRECT;
3233 else
3234 dc->jmp = JMP_NOJMP;
3236 dc->cpustate_changed = 0;
3238 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3239 qemu_log(
3240 "srch=%d pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
3241 "pid=%x usp=%x\n"
3242 "%x.%x.%x.%x\n"
3243 "%x.%x.%x.%x\n"
3244 "%x.%x.%x.%x\n"
3245 "%x.%x.%x.%x\n",
3246 search_pc, dc->pc, dc->ppc,
3247 (uint64_t)tb->flags,
3248 env->btarget, (unsigned)tb->flags & 7,
3249 env->pregs[PR_CCS],
3250 env->pregs[PR_PID], env->pregs[PR_USP],
3251 env->regs[0], env->regs[1], env->regs[2], env->regs[3],
3252 env->regs[4], env->regs[5], env->regs[6], env->regs[7],
3253 env->regs[8], env->regs[9],
3254 env->regs[10], env->regs[11],
3255 env->regs[12], env->regs[13],
3256 env->regs[14], env->regs[15]);
3257 qemu_log("--------------\n");
3258 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3261 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3262 lj = -1;
3263 num_insns = 0;
3264 max_insns = tb->cflags & CF_COUNT_MASK;
3265 if (max_insns == 0)
3266 max_insns = CF_COUNT_MASK;
3268 gen_icount_start();
3271 check_breakpoint(env, dc);
3273 if (search_pc) {
3274 j = gen_opc_ptr - gen_opc_buf;
3275 if (lj < j) {
3276 lj++;
3277 while (lj < j)
3278 gen_opc_instr_start[lj++] = 0;
3280 if (dc->delayed_branch == 1)
3281 gen_opc_pc[lj] = dc->ppc | 1;
3282 else
3283 gen_opc_pc[lj] = dc->pc;
3284 gen_opc_instr_start[lj] = 1;
3285 gen_opc_icount[lj] = num_insns;
3288 /* Pretty disas. */
3289 LOG_DIS("%8.8x:\t", dc->pc);
3291 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3292 gen_io_start();
3293 dc->clear_x = 1;
3295 insn_len = dc->decoder(dc);
3296 dc->ppc = dc->pc;
3297 dc->pc += insn_len;
3298 if (dc->clear_x)
3299 cris_clear_x_flag(dc);
3301 num_insns++;
3302 /* Check for delayed branches here. If we do it before
3303 actually generating any host code, the simulator will just
3304 loop doing nothing for on this program location. */
3305 if (dc->delayed_branch) {
3306 dc->delayed_branch--;
3307 if (dc->delayed_branch == 0)
3309 if (tb->flags & 7)
3310 t_gen_mov_env_TN(dslot,
3311 tcg_const_tl(0));
3312 if (dc->cpustate_changed || !dc->flagx_known
3313 || (dc->flags_x != (tb->flags & X_FLAG))) {
3314 cris_store_direct_jmp(dc);
3317 if (dc->clear_locked_irq) {
3318 dc->clear_locked_irq = 0;
3319 t_gen_mov_env_TN(locked_irq,
3320 tcg_const_tl(0));
3323 if (dc->jmp == JMP_DIRECT_CC) {
3324 int l1;
3326 l1 = gen_new_label();
3327 cris_evaluate_flags(dc);
3329 /* Conditional jmp. */
3330 tcg_gen_brcondi_tl(TCG_COND_EQ,
3331 env_btaken, 0, l1);
3332 gen_goto_tb(dc, 1, dc->jmp_pc);
3333 gen_set_label(l1);
3334 gen_goto_tb(dc, 0, dc->pc);
3335 dc->is_jmp = DISAS_TB_JUMP;
3336 dc->jmp = JMP_NOJMP;
3337 } else if (dc->jmp == JMP_DIRECT) {
3338 cris_evaluate_flags(dc);
3339 gen_goto_tb(dc, 0, dc->jmp_pc);
3340 dc->is_jmp = DISAS_TB_JUMP;
3341 dc->jmp = JMP_NOJMP;
3342 } else {
3343 t_gen_cc_jmp(env_btarget,
3344 tcg_const_tl(dc->pc));
3345 dc->is_jmp = DISAS_JUMP;
3347 break;
3351 /* If we are rexecuting a branch due to exceptions on
3352 delay slots dont break. */
3353 if (!(tb->pc & 1) && env->singlestep_enabled)
3354 break;
3355 } while (!dc->is_jmp && !dc->cpustate_changed
3356 && gen_opc_ptr < gen_opc_end
3357 && !singlestep
3358 && (dc->pc < next_page_start)
3359 && num_insns < max_insns);
3361 if (dc->clear_locked_irq)
3362 t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
3364 npc = dc->pc;
3366 if (tb->cflags & CF_LAST_IO)
3367 gen_io_end();
3368 /* Force an update if the per-tb cpu state has changed. */
3369 if (dc->is_jmp == DISAS_NEXT
3370 && (dc->cpustate_changed || !dc->flagx_known
3371 || (dc->flags_x != (tb->flags & X_FLAG)))) {
3372 dc->is_jmp = DISAS_UPDATE;
3373 tcg_gen_movi_tl(env_pc, npc);
3375 /* Broken branch+delayslot sequence. */
3376 if (dc->delayed_branch == 1) {
3377 /* Set env->dslot to the size of the branch insn. */
3378 t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
3379 cris_store_direct_jmp(dc);
3382 cris_evaluate_flags (dc);
3384 if (unlikely(env->singlestep_enabled)) {
3385 if (dc->is_jmp == DISAS_NEXT)
3386 tcg_gen_movi_tl(env_pc, npc);
3387 t_gen_raise_exception(EXCP_DEBUG);
3388 } else {
3389 switch(dc->is_jmp) {
3390 case DISAS_NEXT:
3391 gen_goto_tb(dc, 1, npc);
3392 break;
3393 default:
3394 case DISAS_JUMP:
3395 case DISAS_UPDATE:
3396 /* indicate that the hash table must be used
3397 to find the next TB */
3398 tcg_gen_exit_tb(0);
3399 break;
3400 case DISAS_SWI:
3401 case DISAS_TB_JUMP:
3402 /* nothing more to generate */
3403 break;
3406 gen_icount_end(tb, num_insns);
3407 *gen_opc_ptr = INDEX_op_end;
3408 if (search_pc) {
3409 j = gen_opc_ptr - gen_opc_buf;
3410 lj++;
3411 while (lj <= j)
3412 gen_opc_instr_start[lj++] = 0;
3413 } else {
3414 tb->size = dc->pc - pc_start;
3415 tb->icount = num_insns;
3418 #ifdef DEBUG_DISAS
3419 #if !DISAS_CRIS
3420 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3421 log_target_disas(pc_start, dc->pc - pc_start,
3422 dc->env->pregs[PR_VR]);
3423 qemu_log("\nisize=%d osize=%td\n",
3424 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
3426 #endif
3427 #endif
3430 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3432 gen_intermediate_code_internal(env, tb, 0);
3435 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3437 gen_intermediate_code_internal(env, tb, 1);
3440 void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
3441 int flags)
3443 int i;
3444 uint32_t srs;
3446 if (!env || !f)
3447 return;
3449 cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3450 "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3451 env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
3452 env->cc_op,
3453 env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
3456 for (i = 0; i < 16; i++) {
3457 cpu_fprintf(f, "%s=%8.8x ",regnames[i], env->regs[i]);
3458 if ((i + 1) % 4 == 0)
3459 cpu_fprintf(f, "\n");
3461 cpu_fprintf(f, "\nspecial regs:\n");
3462 for (i = 0; i < 16; i++) {
3463 cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
3464 if ((i + 1) % 4 == 0)
3465 cpu_fprintf(f, "\n");
3467 srs = env->pregs[PR_SRS];
3468 cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
3469 if (srs < 256) {
3470 for (i = 0; i < 16; i++) {
3471 cpu_fprintf(f, "s%2.2d=%8.8x ",
3472 i, env->sregs[srs][i]);
3473 if ((i + 1) % 4 == 0)
3474 cpu_fprintf(f, "\n");
3477 cpu_fprintf(f, "\n\n");
3481 struct
3483 uint32_t vr;
3484 const char *name;
3485 } cris_cores[] = {
3486 {8, "crisv8"},
3487 {9, "crisv9"},
3488 {10, "crisv10"},
3489 {11, "crisv11"},
3490 {32, "crisv32"},
3493 void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3495 unsigned int i;
3497 (*cpu_fprintf)(f, "Available CPUs:\n");
3498 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3499 (*cpu_fprintf)(f, " %s\n", cris_cores[i].name);
3503 static uint32_t vr_by_name(const char *name)
3505 unsigned int i;
3506 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3507 if (strcmp(name, cris_cores[i].name) == 0) {
3508 return cris_cores[i].vr;
3511 return 32;
3514 CPUCRISState *cpu_cris_init (const char *cpu_model)
3516 CPUCRISState *env;
3517 static int tcg_initialized = 0;
3518 int i;
3520 env = qemu_mallocz(sizeof(CPUCRISState));
3522 env->pregs[PR_VR] = vr_by_name(cpu_model);
3523 cpu_exec_init(env);
3524 cpu_reset(env);
3525 qemu_init_vcpu(env);
3527 if (tcg_initialized)
3528 return env;
3530 tcg_initialized = 1;
3532 #define GEN_HELPER 2
3533 #include "helper.h"
3535 if (env->pregs[PR_VR] < 32) {
3536 cpu_crisv10_init(env);
3537 return env;
3541 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
3542 cc_x = tcg_global_mem_new(TCG_AREG0,
3543 offsetof(CPUState, cc_x), "cc_x");
3544 cc_src = tcg_global_mem_new(TCG_AREG0,
3545 offsetof(CPUState, cc_src), "cc_src");
3546 cc_dest = tcg_global_mem_new(TCG_AREG0,
3547 offsetof(CPUState, cc_dest),
3548 "cc_dest");
3549 cc_result = tcg_global_mem_new(TCG_AREG0,
3550 offsetof(CPUState, cc_result),
3551 "cc_result");
3552 cc_op = tcg_global_mem_new(TCG_AREG0,
3553 offsetof(CPUState, cc_op), "cc_op");
3554 cc_size = tcg_global_mem_new(TCG_AREG0,
3555 offsetof(CPUState, cc_size),
3556 "cc_size");
3557 cc_mask = tcg_global_mem_new(TCG_AREG0,
3558 offsetof(CPUState, cc_mask),
3559 "cc_mask");
3561 env_pc = tcg_global_mem_new(TCG_AREG0,
3562 offsetof(CPUState, pc),
3563 "pc");
3564 env_btarget = tcg_global_mem_new(TCG_AREG0,
3565 offsetof(CPUState, btarget),
3566 "btarget");
3567 env_btaken = tcg_global_mem_new(TCG_AREG0,
3568 offsetof(CPUState, btaken),
3569 "btaken");
3570 for (i = 0; i < 16; i++) {
3571 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
3572 offsetof(CPUState, regs[i]),
3573 regnames[i]);
3575 for (i = 0; i < 16; i++) {
3576 cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
3577 offsetof(CPUState, pregs[i]),
3578 pregnames[i]);
3581 return env;
3584 void cpu_reset (CPUCRISState *env)
3586 uint32_t vr;
3588 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
3589 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
3590 log_cpu_state(env, 0);
3593 vr = env->pregs[PR_VR];
3594 memset(env, 0, offsetof(CPUCRISState, breakpoints));
3595 env->pregs[PR_VR] = vr;
3596 tlb_flush(env, 1);
3598 #if defined(CONFIG_USER_ONLY)
3599 /* start in user mode with interrupts enabled. */
3600 env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
3601 #else
3602 cris_mmu_init(env);
3603 env->pregs[PR_CCS] = 0;
3604 #endif
3607 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3609 env->pc = gen_opc_pc[pc_pos];