ppc: avoid write only variables
[qemu/agraf.git] / target-cris / translate.c
blob836136921408ea6aab38f60d5b73f303154a87fb
1 /*
2 * CRIS emulation for qemu: main translation routines.
4 * Copyright (c) 2008 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * FIXME:
23 * The condition code translation is in need of attention.
26 #include <stdarg.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <inttypes.h>
32 #include "cpu.h"
33 #include "exec-all.h"
34 #include "disas.h"
35 #include "tcg-op.h"
36 #include "helper.h"
37 #include "mmu.h"
38 #include "crisv32-decode.h"
39 #include "qemu-common.h"
41 #define GEN_HELPER 1
42 #include "helper.h"
44 #define DISAS_CRIS 0
45 #if DISAS_CRIS
46 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
47 #else
48 # define LOG_DIS(...) do { } while (0)
49 #endif
51 #define D(x)
52 #define BUG() (gen_BUG(dc, __FILE__, __LINE__))
53 #define BUG_ON(x) ({if (x) BUG();})
55 #define DISAS_SWI 5
57 /* Used by the decoder. */
58 #define EXTRACT_FIELD(src, start, end) \
59 (((src) >> start) & ((1 << (end - start + 1)) - 1))
61 #define CC_MASK_NZ 0xc
62 #define CC_MASK_NZV 0xe
63 #define CC_MASK_NZVC 0xf
64 #define CC_MASK_RNZV 0x10e
66 static TCGv_ptr cpu_env;
67 static TCGv cpu_R[16];
68 static TCGv cpu_PR[16];
69 static TCGv cc_x;
70 static TCGv cc_src;
71 static TCGv cc_dest;
72 static TCGv cc_result;
73 static TCGv cc_op;
74 static TCGv cc_size;
75 static TCGv cc_mask;
77 static TCGv env_btaken;
78 static TCGv env_btarget;
79 static TCGv env_pc;
81 #include "gen-icount.h"
83 /* This is the state at translation time. */
84 typedef struct DisasContext {
85 CPUState *env;
86 target_ulong pc, ppc;
88 /* Decoder. */
89 unsigned int (*decoder)(struct DisasContext *dc);
90 uint32_t ir;
91 uint32_t opcode;
92 unsigned int op1;
93 unsigned int op2;
94 unsigned int zsize, zzsize;
95 unsigned int mode;
96 unsigned int postinc;
98 unsigned int size;
99 unsigned int src;
100 unsigned int dst;
101 unsigned int cond;
103 int update_cc;
104 int cc_op;
105 int cc_size;
106 uint32_t cc_mask;
108 int cc_size_uptodate; /* -1 invalid or last written value. */
110 int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
111 int flags_uptodate; /* Wether or not $ccs is uptodate. */
112 int flagx_known; /* Wether or not flags_x has the x flag known at
113 translation time. */
114 int flags_x;
116 int clear_x; /* Clear x after this insn? */
117 int clear_prefix; /* Clear prefix after this insn? */
118 int clear_locked_irq; /* Clear the irq lockout. */
119 int cpustate_changed;
120 unsigned int tb_flags; /* tb dependent flags. */
121 int is_jmp;
123 #define JMP_NOJMP 0
124 #define JMP_DIRECT 1
125 #define JMP_INDIRECT 2
126 int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
127 uint32_t jmp_pc;
129 int delayed_branch;
131 struct TranslationBlock *tb;
132 int singlestep_enabled;
133 } DisasContext;
135 static void gen_BUG(DisasContext *dc, const char *file, int line)
137 printf ("BUG: pc=%x %s %d\n", dc->pc, file, line);
138 qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
139 cpu_abort(dc->env, "%s:%d\n", file, line);
142 static const char *regnames[] =
144 "$r0", "$r1", "$r2", "$r3",
145 "$r4", "$r5", "$r6", "$r7",
146 "$r8", "$r9", "$r10", "$r11",
147 "$r12", "$r13", "$sp", "$acr",
149 static const char *pregnames[] =
151 "$bz", "$vr", "$pid", "$srs",
152 "$wz", "$exs", "$eda", "$mof",
153 "$dz", "$ebp", "$erp", "$srp",
154 "$nrp", "$ccs", "$usp", "$spc",
157 /* We need this table to handle preg-moves with implicit width. */
158 static int preg_sizes[] = {
159 1, /* bz. */
160 1, /* vr. */
161 4, /* pid. */
162 1, /* srs. */
163 2, /* wz. */
164 4, 4, 4,
165 4, 4, 4, 4,
166 4, 4, 4, 4,
169 #define t_gen_mov_TN_env(tn, member) \
170 _t_gen_mov_TN_env((tn), offsetof(CPUState, member))
171 #define t_gen_mov_env_TN(member, tn) \
172 _t_gen_mov_env_TN(offsetof(CPUState, member), (tn))
174 static inline void t_gen_mov_TN_reg(TCGv tn, int r)
176 if (r < 0 || r > 15)
177 fprintf(stderr, "wrong register read $r%d\n", r);
178 tcg_gen_mov_tl(tn, cpu_R[r]);
180 static inline void t_gen_mov_reg_TN(int r, TCGv tn)
182 if (r < 0 || r > 15)
183 fprintf(stderr, "wrong register write $r%d\n", r);
184 tcg_gen_mov_tl(cpu_R[r], tn);
187 static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
189 if (offset > sizeof (CPUState))
190 fprintf(stderr, "wrong load from env from off=%d\n", offset);
191 tcg_gen_ld_tl(tn, cpu_env, offset);
193 static inline void _t_gen_mov_env_TN(int offset, TCGv tn)
195 if (offset > sizeof (CPUState))
196 fprintf(stderr, "wrong store to env at off=%d\n", offset);
197 tcg_gen_st_tl(tn, cpu_env, offset);
200 static inline void t_gen_mov_TN_preg(TCGv tn, int r)
202 if (r < 0 || r > 15)
203 fprintf(stderr, "wrong register read $p%d\n", r);
204 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
205 tcg_gen_mov_tl(tn, tcg_const_tl(0));
206 else if (r == PR_VR)
207 tcg_gen_mov_tl(tn, tcg_const_tl(32));
208 else
209 tcg_gen_mov_tl(tn, cpu_PR[r]);
211 static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
213 if (r < 0 || r > 15)
214 fprintf(stderr, "wrong register write $p%d\n", r);
215 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
216 return;
217 else if (r == PR_SRS)
218 tcg_gen_andi_tl(cpu_PR[r], tn, 3);
219 else {
220 if (r == PR_PID)
221 gen_helper_tlb_flush_pid(tn);
222 if (dc->tb_flags & S_FLAG && r == PR_SPC)
223 gen_helper_spc_write(tn);
224 else if (r == PR_CCS)
225 dc->cpustate_changed = 1;
226 tcg_gen_mov_tl(cpu_PR[r], tn);
230 /* Sign extend at translation time. */
231 static int sign_extend(unsigned int val, unsigned int width)
233 int sval;
235 /* LSL. */
236 val <<= 31 - width;
237 sval = val;
238 /* ASR. */
239 sval >>= 31 - width;
240 return sval;
243 static int cris_fetch(DisasContext *dc, uint32_t addr,
244 unsigned int size, unsigned int sign)
246 int r;
248 switch (size) {
249 case 4:
251 r = ldl_code(addr);
252 break;
254 case 2:
256 if (sign) {
257 r = ldsw_code(addr);
258 } else {
259 r = lduw_code(addr);
261 break;
263 case 1:
265 if (sign) {
266 r = ldsb_code(addr);
267 } else {
268 r = ldub_code(addr);
270 break;
272 default:
273 cpu_abort(dc->env, "Invalid fetch size %d\n", size);
274 break;
276 return r;
279 static void cris_lock_irq(DisasContext *dc)
281 dc->clear_locked_irq = 0;
282 t_gen_mov_env_TN(locked_irq, tcg_const_tl(1));
285 static inline void t_gen_raise_exception(uint32_t index)
287 TCGv_i32 tmp = tcg_const_i32(index);
288 gen_helper_raise_exception(tmp);
289 tcg_temp_free_i32(tmp);
292 static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
294 TCGv t0, t_31;
296 t0 = tcg_temp_new();
297 t_31 = tcg_const_tl(31);
298 tcg_gen_shl_tl(d, a, b);
300 tcg_gen_sub_tl(t0, t_31, b);
301 tcg_gen_sar_tl(t0, t0, t_31);
302 tcg_gen_and_tl(t0, t0, d);
303 tcg_gen_xor_tl(d, d, t0);
304 tcg_temp_free(t0);
305 tcg_temp_free(t_31);
308 static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
310 TCGv t0, t_31;
312 t0 = tcg_temp_new();
313 t_31 = tcg_temp_new();
314 tcg_gen_shr_tl(d, a, b);
316 tcg_gen_movi_tl(t_31, 31);
317 tcg_gen_sub_tl(t0, t_31, b);
318 tcg_gen_sar_tl(t0, t0, t_31);
319 tcg_gen_and_tl(t0, t0, d);
320 tcg_gen_xor_tl(d, d, t0);
321 tcg_temp_free(t0);
322 tcg_temp_free(t_31);
325 static void t_gen_asr(TCGv d, TCGv a, TCGv b)
327 TCGv t0, t_31;
329 t0 = tcg_temp_new();
330 t_31 = tcg_temp_new();
331 tcg_gen_sar_tl(d, a, b);
333 tcg_gen_movi_tl(t_31, 31);
334 tcg_gen_sub_tl(t0, t_31, b);
335 tcg_gen_sar_tl(t0, t0, t_31);
336 tcg_gen_or_tl(d, d, t0);
337 tcg_temp_free(t0);
338 tcg_temp_free(t_31);
341 /* 64-bit signed mul, lower result in d and upper in d2. */
342 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
344 TCGv_i64 t0, t1;
346 t0 = tcg_temp_new_i64();
347 t1 = tcg_temp_new_i64();
349 tcg_gen_ext_i32_i64(t0, a);
350 tcg_gen_ext_i32_i64(t1, b);
351 tcg_gen_mul_i64(t0, t0, t1);
353 tcg_gen_trunc_i64_i32(d, t0);
354 tcg_gen_shri_i64(t0, t0, 32);
355 tcg_gen_trunc_i64_i32(d2, t0);
357 tcg_temp_free_i64(t0);
358 tcg_temp_free_i64(t1);
361 /* 64-bit unsigned muls, lower result in d and upper in d2. */
362 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
364 TCGv_i64 t0, t1;
366 t0 = tcg_temp_new_i64();
367 t1 = tcg_temp_new_i64();
369 tcg_gen_extu_i32_i64(t0, a);
370 tcg_gen_extu_i32_i64(t1, b);
371 tcg_gen_mul_i64(t0, t0, t1);
373 tcg_gen_trunc_i64_i32(d, t0);
374 tcg_gen_shri_i64(t0, t0, 32);
375 tcg_gen_trunc_i64_i32(d2, t0);
377 tcg_temp_free_i64(t0);
378 tcg_temp_free_i64(t1);
381 static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
383 int l1;
385 l1 = gen_new_label();
388 * d <<= 1
389 * if (d >= s)
390 * d -= s;
392 tcg_gen_shli_tl(d, a, 1);
393 tcg_gen_brcond_tl(TCG_COND_LTU, d, b, l1);
394 tcg_gen_sub_tl(d, d, b);
395 gen_set_label(l1);
398 static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
400 TCGv t;
403 * d <<= 1
404 * if (n)
405 * d += s;
407 t = tcg_temp_new();
408 tcg_gen_shli_tl(d, a, 1);
409 tcg_gen_shli_tl(t, ccs, 31 - 3);
410 tcg_gen_sari_tl(t, t, 31);
411 tcg_gen_and_tl(t, t, b);
412 tcg_gen_add_tl(d, d, t);
413 tcg_temp_free(t);
416 /* Extended arithmetics on CRIS. */
417 static inline void t_gen_add_flag(TCGv d, int flag)
419 TCGv c;
421 c = tcg_temp_new();
422 t_gen_mov_TN_preg(c, PR_CCS);
423 /* Propagate carry into d. */
424 tcg_gen_andi_tl(c, c, 1 << flag);
425 if (flag)
426 tcg_gen_shri_tl(c, c, flag);
427 tcg_gen_add_tl(d, d, c);
428 tcg_temp_free(c);
431 static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
433 if (dc->flagx_known) {
434 if (dc->flags_x) {
435 TCGv c;
437 c = tcg_temp_new();
438 t_gen_mov_TN_preg(c, PR_CCS);
439 /* C flag is already at bit 0. */
440 tcg_gen_andi_tl(c, c, C_FLAG);
441 tcg_gen_add_tl(d, d, c);
442 tcg_temp_free(c);
444 } else {
445 TCGv x, c;
447 x = tcg_temp_new();
448 c = tcg_temp_new();
449 t_gen_mov_TN_preg(x, PR_CCS);
450 tcg_gen_mov_tl(c, x);
452 /* Propagate carry into d if X is set. Branch free. */
453 tcg_gen_andi_tl(c, c, C_FLAG);
454 tcg_gen_andi_tl(x, x, X_FLAG);
455 tcg_gen_shri_tl(x, x, 4);
457 tcg_gen_and_tl(x, x, c);
458 tcg_gen_add_tl(d, d, x);
459 tcg_temp_free(x);
460 tcg_temp_free(c);
464 static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
466 if (dc->flagx_known) {
467 if (dc->flags_x) {
468 TCGv c;
470 c = tcg_temp_new();
471 t_gen_mov_TN_preg(c, PR_CCS);
472 /* C flag is already at bit 0. */
473 tcg_gen_andi_tl(c, c, C_FLAG);
474 tcg_gen_sub_tl(d, d, c);
475 tcg_temp_free(c);
477 } else {
478 TCGv x, c;
480 x = tcg_temp_new();
481 c = tcg_temp_new();
482 t_gen_mov_TN_preg(x, PR_CCS);
483 tcg_gen_mov_tl(c, x);
485 /* Propagate carry into d if X is set. Branch free. */
486 tcg_gen_andi_tl(c, c, C_FLAG);
487 tcg_gen_andi_tl(x, x, X_FLAG);
488 tcg_gen_shri_tl(x, x, 4);
490 tcg_gen_and_tl(x, x, c);
491 tcg_gen_sub_tl(d, d, x);
492 tcg_temp_free(x);
493 tcg_temp_free(c);
497 /* Swap the two bytes within each half word of the s operand.
498 T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
499 static inline void t_gen_swapb(TCGv d, TCGv s)
501 TCGv t, org_s;
503 t = tcg_temp_new();
504 org_s = tcg_temp_new();
506 /* d and s may refer to the same object. */
507 tcg_gen_mov_tl(org_s, s);
508 tcg_gen_shli_tl(t, org_s, 8);
509 tcg_gen_andi_tl(d, t, 0xff00ff00);
510 tcg_gen_shri_tl(t, org_s, 8);
511 tcg_gen_andi_tl(t, t, 0x00ff00ff);
512 tcg_gen_or_tl(d, d, t);
513 tcg_temp_free(t);
514 tcg_temp_free(org_s);
517 /* Swap the halfwords of the s operand. */
518 static inline void t_gen_swapw(TCGv d, TCGv s)
520 TCGv t;
521 /* d and s refer the same object. */
522 t = tcg_temp_new();
523 tcg_gen_mov_tl(t, s);
524 tcg_gen_shli_tl(d, t, 16);
525 tcg_gen_shri_tl(t, t, 16);
526 tcg_gen_or_tl(d, d, t);
527 tcg_temp_free(t);
530 /* Reverse the within each byte.
531 T0 = (((T0 << 7) & 0x80808080) |
532 ((T0 << 5) & 0x40404040) |
533 ((T0 << 3) & 0x20202020) |
534 ((T0 << 1) & 0x10101010) |
535 ((T0 >> 1) & 0x08080808) |
536 ((T0 >> 3) & 0x04040404) |
537 ((T0 >> 5) & 0x02020202) |
538 ((T0 >> 7) & 0x01010101));
540 static inline void t_gen_swapr(TCGv d, TCGv s)
542 struct {
543 int shift; /* LSL when positive, LSR when negative. */
544 uint32_t mask;
545 } bitrev [] = {
546 {7, 0x80808080},
547 {5, 0x40404040},
548 {3, 0x20202020},
549 {1, 0x10101010},
550 {-1, 0x08080808},
551 {-3, 0x04040404},
552 {-5, 0x02020202},
553 {-7, 0x01010101}
555 int i;
556 TCGv t, org_s;
558 /* d and s refer the same object. */
559 t = tcg_temp_new();
560 org_s = tcg_temp_new();
561 tcg_gen_mov_tl(org_s, s);
563 tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
564 tcg_gen_andi_tl(d, t, bitrev[0].mask);
565 for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
566 if (bitrev[i].shift >= 0) {
567 tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
568 } else {
569 tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
571 tcg_gen_andi_tl(t, t, bitrev[i].mask);
572 tcg_gen_or_tl(d, d, t);
574 tcg_temp_free(t);
575 tcg_temp_free(org_s);
578 static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
580 TCGv btaken;
581 int l1;
583 l1 = gen_new_label();
584 btaken = tcg_temp_new();
586 /* Conditional jmp. */
587 tcg_gen_mov_tl(btaken, env_btaken);
588 tcg_gen_mov_tl(env_pc, pc_false);
589 tcg_gen_brcondi_tl(TCG_COND_EQ, btaken, 0, l1);
590 tcg_gen_mov_tl(env_pc, pc_true);
591 gen_set_label(l1);
593 tcg_temp_free(btaken);
596 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
598 TranslationBlock *tb;
599 tb = dc->tb;
600 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
601 tcg_gen_goto_tb(n);
602 tcg_gen_movi_tl(env_pc, dest);
603 tcg_gen_exit_tb((long)tb + n);
604 } else {
605 tcg_gen_movi_tl(env_pc, dest);
606 tcg_gen_exit_tb(0);
610 static inline void cris_clear_x_flag(DisasContext *dc)
612 if (dc->flagx_known && dc->flags_x)
613 dc->flags_uptodate = 0;
615 dc->flagx_known = 1;
616 dc->flags_x = 0;
619 static void cris_flush_cc_state(DisasContext *dc)
621 if (dc->cc_size_uptodate != dc->cc_size) {
622 tcg_gen_movi_tl(cc_size, dc->cc_size);
623 dc->cc_size_uptodate = dc->cc_size;
625 tcg_gen_movi_tl(cc_op, dc->cc_op);
626 tcg_gen_movi_tl(cc_mask, dc->cc_mask);
629 static void cris_evaluate_flags(DisasContext *dc)
631 if (dc->flags_uptodate)
632 return;
634 cris_flush_cc_state(dc);
636 switch (dc->cc_op)
638 case CC_OP_MCP:
639 gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
640 cpu_PR[PR_CCS], cc_src,
641 cc_dest, cc_result);
642 break;
643 case CC_OP_MULS:
644 gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
645 cpu_PR[PR_CCS], cc_result,
646 cpu_PR[PR_MOF]);
647 break;
648 case CC_OP_MULU:
649 gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
650 cpu_PR[PR_CCS], cc_result,
651 cpu_PR[PR_MOF]);
652 break;
653 case CC_OP_MOVE:
654 case CC_OP_AND:
655 case CC_OP_OR:
656 case CC_OP_XOR:
657 case CC_OP_ASR:
658 case CC_OP_LSR:
659 case CC_OP_LSL:
660 switch (dc->cc_size)
662 case 4:
663 gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
664 cpu_PR[PR_CCS], cc_result);
665 break;
666 case 2:
667 gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
668 cpu_PR[PR_CCS], cc_result);
669 break;
670 default:
671 gen_helper_evaluate_flags();
672 break;
674 break;
675 case CC_OP_FLAGS:
676 /* live. */
677 break;
678 case CC_OP_SUB:
679 case CC_OP_CMP:
680 if (dc->cc_size == 4)
681 gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
682 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
683 else
684 gen_helper_evaluate_flags();
686 break;
687 default:
688 switch (dc->cc_size)
690 case 4:
691 gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
692 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
693 break;
694 default:
695 gen_helper_evaluate_flags();
696 break;
698 break;
701 if (dc->flagx_known) {
702 if (dc->flags_x)
703 tcg_gen_ori_tl(cpu_PR[PR_CCS],
704 cpu_PR[PR_CCS], X_FLAG);
705 else if (dc->cc_op == CC_OP_FLAGS)
706 tcg_gen_andi_tl(cpu_PR[PR_CCS],
707 cpu_PR[PR_CCS], ~X_FLAG);
709 dc->flags_uptodate = 1;
712 static void cris_cc_mask(DisasContext *dc, unsigned int mask)
714 uint32_t ovl;
716 if (!mask) {
717 dc->update_cc = 0;
718 return;
721 /* Check if we need to evaluate the condition codes due to
722 CC overlaying. */
723 ovl = (dc->cc_mask ^ mask) & ~mask;
724 if (ovl) {
725 /* TODO: optimize this case. It trigs all the time. */
726 cris_evaluate_flags (dc);
728 dc->cc_mask = mask;
729 dc->update_cc = 1;
732 static void cris_update_cc_op(DisasContext *dc, int op, int size)
734 dc->cc_op = op;
735 dc->cc_size = size;
736 dc->flags_uptodate = 0;
739 static inline void cris_update_cc_x(DisasContext *dc)
741 /* Save the x flag state at the time of the cc snapshot. */
742 if (dc->flagx_known) {
743 if (dc->cc_x_uptodate == (2 | dc->flags_x))
744 return;
745 tcg_gen_movi_tl(cc_x, dc->flags_x);
746 dc->cc_x_uptodate = 2 | dc->flags_x;
748 else {
749 tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
750 dc->cc_x_uptodate = 1;
754 /* Update cc prior to executing ALU op. Needs source operands untouched. */
755 static void cris_pre_alu_update_cc(DisasContext *dc, int op,
756 TCGv dst, TCGv src, int size)
758 if (dc->update_cc) {
759 cris_update_cc_op(dc, op, size);
760 tcg_gen_mov_tl(cc_src, src);
762 if (op != CC_OP_MOVE
763 && op != CC_OP_AND
764 && op != CC_OP_OR
765 && op != CC_OP_XOR
766 && op != CC_OP_ASR
767 && op != CC_OP_LSR
768 && op != CC_OP_LSL)
769 tcg_gen_mov_tl(cc_dest, dst);
771 cris_update_cc_x(dc);
775 /* Update cc after executing ALU op. needs the result. */
776 static inline void cris_update_result(DisasContext *dc, TCGv res)
778 if (dc->update_cc)
779 tcg_gen_mov_tl(cc_result, res);
782 /* Returns one if the write back stage should execute. */
783 static void cris_alu_op_exec(DisasContext *dc, int op,
784 TCGv dst, TCGv a, TCGv b, int size)
786 /* Emit the ALU insns. */
787 switch (op)
789 case CC_OP_ADD:
790 tcg_gen_add_tl(dst, a, b);
791 /* Extended arithmetics. */
792 t_gen_addx_carry(dc, dst);
793 break;
794 case CC_OP_ADDC:
795 tcg_gen_add_tl(dst, a, b);
796 t_gen_add_flag(dst, 0); /* C_FLAG. */
797 break;
798 case CC_OP_MCP:
799 tcg_gen_add_tl(dst, a, b);
800 t_gen_add_flag(dst, 8); /* R_FLAG. */
801 break;
802 case CC_OP_SUB:
803 tcg_gen_sub_tl(dst, a, b);
804 /* Extended arithmetics. */
805 t_gen_subx_carry(dc, dst);
806 break;
807 case CC_OP_MOVE:
808 tcg_gen_mov_tl(dst, b);
809 break;
810 case CC_OP_OR:
811 tcg_gen_or_tl(dst, a, b);
812 break;
813 case CC_OP_AND:
814 tcg_gen_and_tl(dst, a, b);
815 break;
816 case CC_OP_XOR:
817 tcg_gen_xor_tl(dst, a, b);
818 break;
819 case CC_OP_LSL:
820 t_gen_lsl(dst, a, b);
821 break;
822 case CC_OP_LSR:
823 t_gen_lsr(dst, a, b);
824 break;
825 case CC_OP_ASR:
826 t_gen_asr(dst, a, b);
827 break;
828 case CC_OP_NEG:
829 tcg_gen_neg_tl(dst, b);
830 /* Extended arithmetics. */
831 t_gen_subx_carry(dc, dst);
832 break;
833 case CC_OP_LZ:
834 gen_helper_lz(dst, b);
835 break;
836 case CC_OP_MULS:
837 t_gen_muls(dst, cpu_PR[PR_MOF], a, b);
838 break;
839 case CC_OP_MULU:
840 t_gen_mulu(dst, cpu_PR[PR_MOF], a, b);
841 break;
842 case CC_OP_DSTEP:
843 t_gen_cris_dstep(dst, a, b);
844 break;
845 case CC_OP_MSTEP:
846 t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
847 break;
848 case CC_OP_BOUND:
850 int l1;
851 l1 = gen_new_label();
852 tcg_gen_mov_tl(dst, a);
853 tcg_gen_brcond_tl(TCG_COND_LEU, a, b, l1);
854 tcg_gen_mov_tl(dst, b);
855 gen_set_label(l1);
857 break;
858 case CC_OP_CMP:
859 tcg_gen_sub_tl(dst, a, b);
860 /* Extended arithmetics. */
861 t_gen_subx_carry(dc, dst);
862 break;
863 default:
864 qemu_log("illegal ALU op.\n");
865 BUG();
866 break;
869 if (size == 1)
870 tcg_gen_andi_tl(dst, dst, 0xff);
871 else if (size == 2)
872 tcg_gen_andi_tl(dst, dst, 0xffff);
875 static void cris_alu(DisasContext *dc, int op,
876 TCGv d, TCGv op_a, TCGv op_b, int size)
878 TCGv tmp;
879 int writeback;
881 writeback = 1;
883 if (op == CC_OP_CMP) {
884 tmp = tcg_temp_new();
885 writeback = 0;
886 } else if (size == 4) {
887 tmp = d;
888 writeback = 0;
889 } else
890 tmp = tcg_temp_new();
893 cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
894 cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
895 cris_update_result(dc, tmp);
897 /* Writeback. */
898 if (writeback) {
899 if (size == 1)
900 tcg_gen_andi_tl(d, d, ~0xff);
901 else
902 tcg_gen_andi_tl(d, d, ~0xffff);
903 tcg_gen_or_tl(d, d, tmp);
905 if (!TCGV_EQUAL(tmp, d))
906 tcg_temp_free(tmp);
909 static int arith_cc(DisasContext *dc)
911 if (dc->update_cc) {
912 switch (dc->cc_op) {
913 case CC_OP_ADDC: return 1;
914 case CC_OP_ADD: return 1;
915 case CC_OP_SUB: return 1;
916 case CC_OP_DSTEP: return 1;
917 case CC_OP_LSL: return 1;
918 case CC_OP_LSR: return 1;
919 case CC_OP_ASR: return 1;
920 case CC_OP_CMP: return 1;
921 case CC_OP_NEG: return 1;
922 case CC_OP_OR: return 1;
923 case CC_OP_AND: return 1;
924 case CC_OP_XOR: return 1;
925 case CC_OP_MULU: return 1;
926 case CC_OP_MULS: return 1;
927 default:
928 return 0;
931 return 0;
934 static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
936 int arith_opt, move_opt;
938 /* TODO: optimize more condition codes. */
941 * If the flags are live, we've gotta look into the bits of CCS.
942 * Otherwise, if we just did an arithmetic operation we try to
943 * evaluate the condition code faster.
945 * When this function is done, T0 should be non-zero if the condition
946 * code is true.
948 arith_opt = arith_cc(dc) && !dc->flags_uptodate;
949 move_opt = (dc->cc_op == CC_OP_MOVE);
950 switch (cond) {
951 case CC_EQ:
952 if ((arith_opt || move_opt)
953 && dc->cc_x_uptodate != (2 | X_FLAG)) {
954 /* If cc_result is zero, T0 should be
955 non-zero otherwise T0 should be zero. */
956 int l1;
957 l1 = gen_new_label();
958 tcg_gen_movi_tl(cc, 0);
959 tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
960 0, l1);
961 tcg_gen_movi_tl(cc, 1);
962 gen_set_label(l1);
964 else {
965 cris_evaluate_flags(dc);
966 tcg_gen_andi_tl(cc,
967 cpu_PR[PR_CCS], Z_FLAG);
969 break;
970 case CC_NE:
971 if ((arith_opt || move_opt)
972 && dc->cc_x_uptodate != (2 | X_FLAG)) {
973 tcg_gen_mov_tl(cc, cc_result);
974 } else {
975 cris_evaluate_flags(dc);
976 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
977 Z_FLAG);
978 tcg_gen_andi_tl(cc, cc, Z_FLAG);
980 break;
981 case CC_CS:
982 cris_evaluate_flags(dc);
983 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
984 break;
985 case CC_CC:
986 cris_evaluate_flags(dc);
987 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
988 tcg_gen_andi_tl(cc, cc, C_FLAG);
989 break;
990 case CC_VS:
991 cris_evaluate_flags(dc);
992 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
993 break;
994 case CC_VC:
995 cris_evaluate_flags(dc);
996 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
997 V_FLAG);
998 tcg_gen_andi_tl(cc, cc, V_FLAG);
999 break;
1000 case CC_PL:
1001 if (arith_opt || move_opt) {
1002 int bits = 31;
1004 if (dc->cc_size == 1)
1005 bits = 7;
1006 else if (dc->cc_size == 2)
1007 bits = 15;
1009 tcg_gen_shri_tl(cc, cc_result, bits);
1010 tcg_gen_xori_tl(cc, cc, 1);
1011 } else {
1012 cris_evaluate_flags(dc);
1013 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
1014 N_FLAG);
1015 tcg_gen_andi_tl(cc, cc, N_FLAG);
1017 break;
1018 case CC_MI:
1019 if (arith_opt || move_opt) {
1020 int bits = 31;
1022 if (dc->cc_size == 1)
1023 bits = 7;
1024 else if (dc->cc_size == 2)
1025 bits = 15;
1027 tcg_gen_shri_tl(cc, cc_result, bits);
1028 tcg_gen_andi_tl(cc, cc, 1);
1030 else {
1031 cris_evaluate_flags(dc);
1032 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1033 N_FLAG);
1035 break;
1036 case CC_LS:
1037 cris_evaluate_flags(dc);
1038 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1039 C_FLAG | Z_FLAG);
1040 break;
1041 case CC_HI:
1042 cris_evaluate_flags(dc);
1044 TCGv tmp;
1046 tmp = tcg_temp_new();
1047 tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
1048 C_FLAG | Z_FLAG);
1049 /* Overlay the C flag on top of the Z. */
1050 tcg_gen_shli_tl(cc, tmp, 2);
1051 tcg_gen_and_tl(cc, tmp, cc);
1052 tcg_gen_andi_tl(cc, cc, Z_FLAG);
1054 tcg_temp_free(tmp);
1056 break;
1057 case CC_GE:
1058 cris_evaluate_flags(dc);
1059 /* Overlay the V flag on top of the N. */
1060 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1061 tcg_gen_xor_tl(cc,
1062 cpu_PR[PR_CCS], cc);
1063 tcg_gen_andi_tl(cc, cc, N_FLAG);
1064 tcg_gen_xori_tl(cc, cc, N_FLAG);
1065 break;
1066 case CC_LT:
1067 cris_evaluate_flags(dc);
1068 /* Overlay the V flag on top of the N. */
1069 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1070 tcg_gen_xor_tl(cc,
1071 cpu_PR[PR_CCS], cc);
1072 tcg_gen_andi_tl(cc, cc, N_FLAG);
1073 break;
1074 case CC_GT:
1075 cris_evaluate_flags(dc);
1077 TCGv n, z;
1079 n = tcg_temp_new();
1080 z = tcg_temp_new();
1082 /* To avoid a shift we overlay everything on
1083 the V flag. */
1084 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1085 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1086 /* invert Z. */
1087 tcg_gen_xori_tl(z, z, 2);
1089 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1090 tcg_gen_xori_tl(n, n, 2);
1091 tcg_gen_and_tl(cc, z, n);
1092 tcg_gen_andi_tl(cc, cc, 2);
1094 tcg_temp_free(n);
1095 tcg_temp_free(z);
1097 break;
1098 case CC_LE:
1099 cris_evaluate_flags(dc);
1101 TCGv n, z;
1103 n = tcg_temp_new();
1104 z = tcg_temp_new();
1106 /* To avoid a shift we overlay everything on
1107 the V flag. */
1108 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1109 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1111 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1112 tcg_gen_or_tl(cc, z, n);
1113 tcg_gen_andi_tl(cc, cc, 2);
1115 tcg_temp_free(n);
1116 tcg_temp_free(z);
1118 break;
1119 case CC_P:
1120 cris_evaluate_flags(dc);
1121 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
1122 break;
1123 case CC_A:
1124 tcg_gen_movi_tl(cc, 1);
1125 break;
1126 default:
1127 BUG();
1128 break;
1132 static void cris_store_direct_jmp(DisasContext *dc)
1134 /* Store the direct jmp state into the cpu-state. */
1135 if (dc->jmp == JMP_DIRECT) {
1136 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1137 tcg_gen_movi_tl(env_btaken, 1);
1141 static void cris_prepare_cc_branch (DisasContext *dc,
1142 int offset, int cond)
1144 /* This helps us re-schedule the micro-code to insns in delay-slots
1145 before the actual jump. */
1146 dc->delayed_branch = 2;
1147 dc->jmp_pc = dc->pc + offset;
1149 if (cond != CC_A)
1151 dc->jmp = JMP_INDIRECT;
1152 gen_tst_cc (dc, env_btaken, cond);
1153 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1154 } else {
1155 /* Allow chaining. */
1156 dc->jmp = JMP_DIRECT;
1161 /* jumps, when the dest is in a live reg for example. Direct should be set
1162 when the dest addr is constant to allow tb chaining. */
1163 static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
1165 /* This helps us re-schedule the micro-code to insns in delay-slots
1166 before the actual jump. */
1167 dc->delayed_branch = 2;
1168 dc->jmp = type;
1169 if (type == JMP_INDIRECT)
1170 tcg_gen_movi_tl(env_btaken, 1);
1173 static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
1175 int mem_index = cpu_mmu_index(dc->env);
1177 /* If we get a fault on a delayslot we must keep the jmp state in
1178 the cpu-state to be able to re-execute the jmp. */
1179 if (dc->delayed_branch == 1)
1180 cris_store_direct_jmp(dc);
1182 tcg_gen_qemu_ld64(dst, addr, mem_index);
1185 static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
1186 unsigned int size, int sign)
1188 int mem_index = cpu_mmu_index(dc->env);
1190 /* If we get a fault on a delayslot we must keep the jmp state in
1191 the cpu-state to be able to re-execute the jmp. */
1192 if (dc->delayed_branch == 1)
1193 cris_store_direct_jmp(dc);
1195 if (size == 1) {
1196 if (sign)
1197 tcg_gen_qemu_ld8s(dst, addr, mem_index);
1198 else
1199 tcg_gen_qemu_ld8u(dst, addr, mem_index);
1201 else if (size == 2) {
1202 if (sign)
1203 tcg_gen_qemu_ld16s(dst, addr, mem_index);
1204 else
1205 tcg_gen_qemu_ld16u(dst, addr, mem_index);
1207 else if (size == 4) {
1208 tcg_gen_qemu_ld32u(dst, addr, mem_index);
1210 else {
1211 abort();
1215 static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
1216 unsigned int size)
1218 int mem_index = cpu_mmu_index(dc->env);
1220 /* If we get a fault on a delayslot we must keep the jmp state in
1221 the cpu-state to be able to re-execute the jmp. */
1222 if (dc->delayed_branch == 1)
1223 cris_store_direct_jmp(dc);
1226 /* Conditional writes. We only support the kind were X and P are known
1227 at translation time. */
1228 if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
1229 dc->postinc = 0;
1230 cris_evaluate_flags(dc);
1231 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
1232 return;
1235 if (size == 1)
1236 tcg_gen_qemu_st8(val, addr, mem_index);
1237 else if (size == 2)
1238 tcg_gen_qemu_st16(val, addr, mem_index);
1239 else
1240 tcg_gen_qemu_st32(val, addr, mem_index);
1242 if (dc->flagx_known && dc->flags_x) {
1243 cris_evaluate_flags(dc);
1244 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
1248 static inline void t_gen_sext(TCGv d, TCGv s, int size)
1250 if (size == 1)
1251 tcg_gen_ext8s_i32(d, s);
1252 else if (size == 2)
1253 tcg_gen_ext16s_i32(d, s);
1254 else if(!TCGV_EQUAL(d, s))
1255 tcg_gen_mov_tl(d, s);
1258 static inline void t_gen_zext(TCGv d, TCGv s, int size)
1260 if (size == 1)
1261 tcg_gen_ext8u_i32(d, s);
1262 else if (size == 2)
1263 tcg_gen_ext16u_i32(d, s);
1264 else if (!TCGV_EQUAL(d, s))
1265 tcg_gen_mov_tl(d, s);
1268 #if DISAS_CRIS
1269 static char memsize_char(int size)
1271 switch (size)
1273 case 1: return 'b'; break;
1274 case 2: return 'w'; break;
1275 case 4: return 'd'; break;
1276 default:
1277 return 'x';
1278 break;
1281 #endif
1283 static inline unsigned int memsize_z(DisasContext *dc)
1285 return dc->zsize + 1;
1288 static inline unsigned int memsize_zz(DisasContext *dc)
1290 switch (dc->zzsize)
1292 case 0: return 1;
1293 case 1: return 2;
1294 default:
1295 return 4;
1299 static inline void do_postinc (DisasContext *dc, int size)
1301 if (dc->postinc)
1302 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
1305 static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
1306 int size, int s_ext, TCGv dst)
1308 if (s_ext)
1309 t_gen_sext(dst, cpu_R[rs], size);
1310 else
1311 t_gen_zext(dst, cpu_R[rs], size);
1314 /* Prepare T0 and T1 for a register alu operation.
1315 s_ext decides if the operand1 should be sign-extended or zero-extended when
1316 needed. */
1317 static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
1318 int size, int s_ext, TCGv dst, TCGv src)
1320 dec_prep_move_r(dc, rs, rd, size, s_ext, src);
1322 if (s_ext)
1323 t_gen_sext(dst, cpu_R[rd], size);
1324 else
1325 t_gen_zext(dst, cpu_R[rd], size);
1328 static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
1329 TCGv dst)
1331 unsigned int rs;
1332 uint32_t imm;
1333 int is_imm;
1334 int insn_len = 2;
1336 rs = dc->op1;
1337 is_imm = rs == 15 && dc->postinc;
1339 /* Load [$rs] onto T1. */
1340 if (is_imm) {
1341 insn_len = 2 + memsize;
1342 if (memsize == 1)
1343 insn_len++;
1345 imm = cris_fetch(dc, dc->pc + 2, memsize, s_ext);
1346 tcg_gen_movi_tl(dst, imm);
1347 dc->postinc = 0;
1348 } else {
1349 cris_flush_cc_state(dc);
1350 gen_load(dc, dst, cpu_R[rs], memsize, 0);
1351 if (s_ext)
1352 t_gen_sext(dst, dst, memsize);
1353 else
1354 t_gen_zext(dst, dst, memsize);
1356 return insn_len;
1359 /* Prepare T0 and T1 for a memory + alu operation.
1360 s_ext decides if the operand1 should be sign-extended or zero-extended when
1361 needed. */
1362 static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
1363 TCGv dst, TCGv src)
1365 int insn_len;
1367 insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
1368 tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
1369 return insn_len;
1372 #if DISAS_CRIS
1373 static const char *cc_name(int cc)
1375 static const char *cc_names[16] = {
1376 "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1377 "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1379 assert(cc < 16);
1380 return cc_names[cc];
1382 #endif
1384 /* Start of insn decoders. */
1386 static int dec_bccq(DisasContext *dc)
1388 int32_t offset;
1389 int sign;
1390 uint32_t cond = dc->op2;
1392 offset = EXTRACT_FIELD (dc->ir, 1, 7);
1393 sign = EXTRACT_FIELD(dc->ir, 0, 0);
1395 offset *= 2;
1396 offset |= sign << 8;
1397 offset = sign_extend(offset, 8);
1399 LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
1401 /* op2 holds the condition-code. */
1402 cris_cc_mask(dc, 0);
1403 cris_prepare_cc_branch (dc, offset, cond);
1404 return 2;
1406 static int dec_addoq(DisasContext *dc)
1408 int32_t imm;
1410 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
1411 imm = sign_extend(dc->op1, 7);
1413 LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
1414 cris_cc_mask(dc, 0);
1415 /* Fetch register operand, */
1416 tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
1418 return 2;
1420 static int dec_addq(DisasContext *dc)
1422 LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
1424 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1426 cris_cc_mask(dc, CC_MASK_NZVC);
1428 cris_alu(dc, CC_OP_ADD,
1429 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1430 return 2;
1432 static int dec_moveq(DisasContext *dc)
1434 uint32_t imm;
1436 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1437 imm = sign_extend(dc->op1, 5);
1438 LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
1440 tcg_gen_movi_tl(cpu_R[dc->op2], imm);
1441 return 2;
1443 static int dec_subq(DisasContext *dc)
1445 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1447 LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
1449 cris_cc_mask(dc, CC_MASK_NZVC);
1450 cris_alu(dc, CC_OP_SUB,
1451 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1452 return 2;
1454 static int dec_cmpq(DisasContext *dc)
1456 uint32_t imm;
1457 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1458 imm = sign_extend(dc->op1, 5);
1460 LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
1461 cris_cc_mask(dc, CC_MASK_NZVC);
1463 cris_alu(dc, CC_OP_CMP,
1464 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1465 return 2;
1467 static int dec_andq(DisasContext *dc)
1469 uint32_t imm;
1470 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1471 imm = sign_extend(dc->op1, 5);
1473 LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
1474 cris_cc_mask(dc, CC_MASK_NZ);
1476 cris_alu(dc, CC_OP_AND,
1477 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1478 return 2;
1480 static int dec_orq(DisasContext *dc)
1482 uint32_t imm;
1483 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1484 imm = sign_extend(dc->op1, 5);
1485 LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
1486 cris_cc_mask(dc, CC_MASK_NZ);
1488 cris_alu(dc, CC_OP_OR,
1489 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1490 return 2;
1492 static int dec_btstq(DisasContext *dc)
1494 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1495 LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
1497 cris_cc_mask(dc, CC_MASK_NZ);
1498 cris_evaluate_flags(dc);
1499 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1500 tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
1501 cris_alu(dc, CC_OP_MOVE,
1502 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1503 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1504 dc->flags_uptodate = 1;
1505 return 2;
1507 static int dec_asrq(DisasContext *dc)
1509 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1510 LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
1511 cris_cc_mask(dc, CC_MASK_NZ);
1513 tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1514 cris_alu(dc, CC_OP_MOVE,
1515 cpu_R[dc->op2],
1516 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1517 return 2;
1519 static int dec_lslq(DisasContext *dc)
1521 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1522 LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
1524 cris_cc_mask(dc, CC_MASK_NZ);
1526 tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1528 cris_alu(dc, CC_OP_MOVE,
1529 cpu_R[dc->op2],
1530 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1531 return 2;
1533 static int dec_lsrq(DisasContext *dc)
1535 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1536 LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
1538 cris_cc_mask(dc, CC_MASK_NZ);
1540 tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1541 cris_alu(dc, CC_OP_MOVE,
1542 cpu_R[dc->op2],
1543 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1544 return 2;
1547 static int dec_move_r(DisasContext *dc)
1549 int size = memsize_zz(dc);
1551 LOG_DIS("move.%c $r%u, $r%u\n",
1552 memsize_char(size), dc->op1, dc->op2);
1554 cris_cc_mask(dc, CC_MASK_NZ);
1555 if (size == 4) {
1556 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
1557 cris_cc_mask(dc, CC_MASK_NZ);
1558 cris_update_cc_op(dc, CC_OP_MOVE, 4);
1559 cris_update_cc_x(dc);
1560 cris_update_result(dc, cpu_R[dc->op2]);
1562 else {
1563 TCGv t0;
1565 t0 = tcg_temp_new();
1566 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1567 cris_alu(dc, CC_OP_MOVE,
1568 cpu_R[dc->op2],
1569 cpu_R[dc->op2], t0, size);
1570 tcg_temp_free(t0);
1572 return 2;
1575 static int dec_scc_r(DisasContext *dc)
1577 int cond = dc->op2;
1579 LOG_DIS("s%s $r%u\n",
1580 cc_name(cond), dc->op1);
1582 if (cond != CC_A)
1584 int l1;
1586 gen_tst_cc (dc, cpu_R[dc->op1], cond);
1587 l1 = gen_new_label();
1588 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
1589 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1590 gen_set_label(l1);
1592 else
1593 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1595 cris_cc_mask(dc, 0);
1596 return 2;
1599 static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
1601 if (size == 4) {
1602 t[0] = cpu_R[dc->op2];
1603 t[1] = cpu_R[dc->op1];
1604 } else {
1605 t[0] = tcg_temp_new();
1606 t[1] = tcg_temp_new();
1610 static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
1612 if (size != 4) {
1613 tcg_temp_free(t[0]);
1614 tcg_temp_free(t[1]);
1618 static int dec_and_r(DisasContext *dc)
1620 TCGv t[2];
1621 int size = memsize_zz(dc);
1623 LOG_DIS("and.%c $r%u, $r%u\n",
1624 memsize_char(size), dc->op1, dc->op2);
1626 cris_cc_mask(dc, CC_MASK_NZ);
1628 cris_alu_alloc_temps(dc, size, t);
1629 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1630 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
1631 cris_alu_free_temps(dc, size, t);
1632 return 2;
1635 static int dec_lz_r(DisasContext *dc)
1637 TCGv t0;
1638 LOG_DIS("lz $r%u, $r%u\n",
1639 dc->op1, dc->op2);
1640 cris_cc_mask(dc, CC_MASK_NZ);
1641 t0 = tcg_temp_new();
1642 dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
1643 cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1644 tcg_temp_free(t0);
1645 return 2;
1648 static int dec_lsl_r(DisasContext *dc)
1650 TCGv t[2];
1651 int size = memsize_zz(dc);
1653 LOG_DIS("lsl.%c $r%u, $r%u\n",
1654 memsize_char(size), dc->op1, dc->op2);
1656 cris_cc_mask(dc, CC_MASK_NZ);
1657 cris_alu_alloc_temps(dc, size, t);
1658 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1659 tcg_gen_andi_tl(t[1], t[1], 63);
1660 cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
1661 cris_alu_alloc_temps(dc, size, t);
1662 return 2;
1665 static int dec_lsr_r(DisasContext *dc)
1667 TCGv t[2];
1668 int size = memsize_zz(dc);
1670 LOG_DIS("lsr.%c $r%u, $r%u\n",
1671 memsize_char(size), dc->op1, dc->op2);
1673 cris_cc_mask(dc, CC_MASK_NZ);
1674 cris_alu_alloc_temps(dc, size, t);
1675 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1676 tcg_gen_andi_tl(t[1], t[1], 63);
1677 cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
1678 cris_alu_free_temps(dc, size, t);
1679 return 2;
1682 static int dec_asr_r(DisasContext *dc)
1684 TCGv t[2];
1685 int size = memsize_zz(dc);
1687 LOG_DIS("asr.%c $r%u, $r%u\n",
1688 memsize_char(size), dc->op1, dc->op2);
1690 cris_cc_mask(dc, CC_MASK_NZ);
1691 cris_alu_alloc_temps(dc, size, t);
1692 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1693 tcg_gen_andi_tl(t[1], t[1], 63);
1694 cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
1695 cris_alu_free_temps(dc, size, t);
1696 return 2;
1699 static int dec_muls_r(DisasContext *dc)
1701 TCGv t[2];
1702 int size = memsize_zz(dc);
1704 LOG_DIS("muls.%c $r%u, $r%u\n",
1705 memsize_char(size), dc->op1, dc->op2);
1706 cris_cc_mask(dc, CC_MASK_NZV);
1707 cris_alu_alloc_temps(dc, size, t);
1708 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1710 cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
1711 cris_alu_free_temps(dc, size, t);
1712 return 2;
1715 static int dec_mulu_r(DisasContext *dc)
1717 TCGv t[2];
1718 int size = memsize_zz(dc);
1720 LOG_DIS("mulu.%c $r%u, $r%u\n",
1721 memsize_char(size), dc->op1, dc->op2);
1722 cris_cc_mask(dc, CC_MASK_NZV);
1723 cris_alu_alloc_temps(dc, size, t);
1724 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1726 cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
1727 cris_alu_alloc_temps(dc, size, t);
1728 return 2;
1732 static int dec_dstep_r(DisasContext *dc)
1734 LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
1735 cris_cc_mask(dc, CC_MASK_NZ);
1736 cris_alu(dc, CC_OP_DSTEP,
1737 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1738 return 2;
1741 static int dec_xor_r(DisasContext *dc)
1743 TCGv t[2];
1744 int size = memsize_zz(dc);
1745 LOG_DIS("xor.%c $r%u, $r%u\n",
1746 memsize_char(size), dc->op1, dc->op2);
1747 BUG_ON(size != 4); /* xor is dword. */
1748 cris_cc_mask(dc, CC_MASK_NZ);
1749 cris_alu_alloc_temps(dc, size, t);
1750 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1752 cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
1753 cris_alu_free_temps(dc, size, t);
1754 return 2;
1757 static int dec_bound_r(DisasContext *dc)
1759 TCGv l0;
1760 int size = memsize_zz(dc);
1761 LOG_DIS("bound.%c $r%u, $r%u\n",
1762 memsize_char(size), dc->op1, dc->op2);
1763 cris_cc_mask(dc, CC_MASK_NZ);
1764 l0 = tcg_temp_local_new();
1765 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
1766 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
1767 tcg_temp_free(l0);
1768 return 2;
1771 static int dec_cmp_r(DisasContext *dc)
1773 TCGv t[2];
1774 int size = memsize_zz(dc);
1775 LOG_DIS("cmp.%c $r%u, $r%u\n",
1776 memsize_char(size), dc->op1, dc->op2);
1777 cris_cc_mask(dc, CC_MASK_NZVC);
1778 cris_alu_alloc_temps(dc, size, t);
1779 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1781 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
1782 cris_alu_free_temps(dc, size, t);
1783 return 2;
1786 static int dec_abs_r(DisasContext *dc)
1788 TCGv t0;
1790 LOG_DIS("abs $r%u, $r%u\n",
1791 dc->op1, dc->op2);
1792 cris_cc_mask(dc, CC_MASK_NZ);
1794 t0 = tcg_temp_new();
1795 tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
1796 tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
1797 tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
1798 tcg_temp_free(t0);
1800 cris_alu(dc, CC_OP_MOVE,
1801 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1802 return 2;
1805 static int dec_add_r(DisasContext *dc)
1807 TCGv t[2];
1808 int size = memsize_zz(dc);
1809 LOG_DIS("add.%c $r%u, $r%u\n",
1810 memsize_char(size), dc->op1, dc->op2);
1811 cris_cc_mask(dc, CC_MASK_NZVC);
1812 cris_alu_alloc_temps(dc, size, t);
1813 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1815 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
1816 cris_alu_free_temps(dc, size, t);
1817 return 2;
1820 static int dec_addc_r(DisasContext *dc)
1822 LOG_DIS("addc $r%u, $r%u\n",
1823 dc->op1, dc->op2);
1824 cris_evaluate_flags(dc);
1825 /* Set for this insn. */
1826 dc->flagx_known = 1;
1827 dc->flags_x = X_FLAG;
1829 cris_cc_mask(dc, CC_MASK_NZVC);
1830 cris_alu(dc, CC_OP_ADDC,
1831 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1832 return 2;
1835 static int dec_mcp_r(DisasContext *dc)
1837 LOG_DIS("mcp $p%u, $r%u\n",
1838 dc->op2, dc->op1);
1839 cris_evaluate_flags(dc);
1840 cris_cc_mask(dc, CC_MASK_RNZV);
1841 cris_alu(dc, CC_OP_MCP,
1842 cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
1843 return 2;
1846 #if DISAS_CRIS
1847 static char * swapmode_name(int mode, char *modename) {
1848 int i = 0;
1849 if (mode & 8)
1850 modename[i++] = 'n';
1851 if (mode & 4)
1852 modename[i++] = 'w';
1853 if (mode & 2)
1854 modename[i++] = 'b';
1855 if (mode & 1)
1856 modename[i++] = 'r';
1857 modename[i++] = 0;
1858 return modename;
1860 #endif
1862 static int dec_swap_r(DisasContext *dc)
1864 TCGv t0;
1865 #if DISAS_CRIS
1866 char modename[4];
1867 #endif
1868 LOG_DIS("swap%s $r%u\n",
1869 swapmode_name(dc->op2, modename), dc->op1);
1871 cris_cc_mask(dc, CC_MASK_NZ);
1872 t0 = tcg_temp_new();
1873 t_gen_mov_TN_reg(t0, dc->op1);
1874 if (dc->op2 & 8)
1875 tcg_gen_not_tl(t0, t0);
1876 if (dc->op2 & 4)
1877 t_gen_swapw(t0, t0);
1878 if (dc->op2 & 2)
1879 t_gen_swapb(t0, t0);
1880 if (dc->op2 & 1)
1881 t_gen_swapr(t0, t0);
1882 cris_alu(dc, CC_OP_MOVE,
1883 cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
1884 tcg_temp_free(t0);
1885 return 2;
1888 static int dec_or_r(DisasContext *dc)
1890 TCGv t[2];
1891 int size = memsize_zz(dc);
1892 LOG_DIS("or.%c $r%u, $r%u\n",
1893 memsize_char(size), dc->op1, dc->op2);
1894 cris_cc_mask(dc, CC_MASK_NZ);
1895 cris_alu_alloc_temps(dc, size, t);
1896 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1897 cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
1898 cris_alu_free_temps(dc, size, t);
1899 return 2;
1902 static int dec_addi_r(DisasContext *dc)
1904 TCGv t0;
1905 LOG_DIS("addi.%c $r%u, $r%u\n",
1906 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1907 cris_cc_mask(dc, 0);
1908 t0 = tcg_temp_new();
1909 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1910 tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
1911 tcg_temp_free(t0);
1912 return 2;
1915 static int dec_addi_acr(DisasContext *dc)
1917 TCGv t0;
1918 LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1919 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1920 cris_cc_mask(dc, 0);
1921 t0 = tcg_temp_new();
1922 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1923 tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
1924 tcg_temp_free(t0);
1925 return 2;
1928 static int dec_neg_r(DisasContext *dc)
1930 TCGv t[2];
1931 int size = memsize_zz(dc);
1932 LOG_DIS("neg.%c $r%u, $r%u\n",
1933 memsize_char(size), dc->op1, dc->op2);
1934 cris_cc_mask(dc, CC_MASK_NZVC);
1935 cris_alu_alloc_temps(dc, size, t);
1936 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1938 cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
1939 cris_alu_free_temps(dc, size, t);
1940 return 2;
1943 static int dec_btst_r(DisasContext *dc)
1945 LOG_DIS("btst $r%u, $r%u\n",
1946 dc->op1, dc->op2);
1947 cris_cc_mask(dc, CC_MASK_NZ);
1948 cris_evaluate_flags(dc);
1949 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1950 cpu_R[dc->op1], cpu_PR[PR_CCS]);
1951 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
1952 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1953 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1954 dc->flags_uptodate = 1;
1955 return 2;
1958 static int dec_sub_r(DisasContext *dc)
1960 TCGv t[2];
1961 int size = memsize_zz(dc);
1962 LOG_DIS("sub.%c $r%u, $r%u\n",
1963 memsize_char(size), dc->op1, dc->op2);
1964 cris_cc_mask(dc, CC_MASK_NZVC);
1965 cris_alu_alloc_temps(dc, size, t);
1966 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1967 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
1968 cris_alu_free_temps(dc, size, t);
1969 return 2;
1972 /* Zero extension. From size to dword. */
1973 static int dec_movu_r(DisasContext *dc)
1975 TCGv t0;
1976 int size = memsize_z(dc);
1977 LOG_DIS("movu.%c $r%u, $r%u\n",
1978 memsize_char(size),
1979 dc->op1, dc->op2);
1981 cris_cc_mask(dc, CC_MASK_NZ);
1982 t0 = tcg_temp_new();
1983 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1984 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1985 tcg_temp_free(t0);
1986 return 2;
1989 /* Sign extension. From size to dword. */
1990 static int dec_movs_r(DisasContext *dc)
1992 TCGv t0;
1993 int size = memsize_z(dc);
1994 LOG_DIS("movs.%c $r%u, $r%u\n",
1995 memsize_char(size),
1996 dc->op1, dc->op2);
1998 cris_cc_mask(dc, CC_MASK_NZ);
1999 t0 = tcg_temp_new();
2000 /* Size can only be qi or hi. */
2001 t_gen_sext(t0, cpu_R[dc->op1], size);
2002 cris_alu(dc, CC_OP_MOVE,
2003 cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
2004 tcg_temp_free(t0);
2005 return 2;
2008 /* zero extension. From size to dword. */
2009 static int dec_addu_r(DisasContext *dc)
2011 TCGv t0;
2012 int size = memsize_z(dc);
2013 LOG_DIS("addu.%c $r%u, $r%u\n",
2014 memsize_char(size),
2015 dc->op1, dc->op2);
2017 cris_cc_mask(dc, CC_MASK_NZVC);
2018 t0 = tcg_temp_new();
2019 /* Size can only be qi or hi. */
2020 t_gen_zext(t0, cpu_R[dc->op1], size);
2021 cris_alu(dc, CC_OP_ADD,
2022 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2023 tcg_temp_free(t0);
2024 return 2;
2027 /* Sign extension. From size to dword. */
2028 static int dec_adds_r(DisasContext *dc)
2030 TCGv t0;
2031 int size = memsize_z(dc);
2032 LOG_DIS("adds.%c $r%u, $r%u\n",
2033 memsize_char(size),
2034 dc->op1, dc->op2);
2036 cris_cc_mask(dc, CC_MASK_NZVC);
2037 t0 = tcg_temp_new();
2038 /* Size can only be qi or hi. */
2039 t_gen_sext(t0, cpu_R[dc->op1], size);
2040 cris_alu(dc, CC_OP_ADD,
2041 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2042 tcg_temp_free(t0);
2043 return 2;
2046 /* Zero extension. From size to dword. */
2047 static int dec_subu_r(DisasContext *dc)
2049 TCGv t0;
2050 int size = memsize_z(dc);
2051 LOG_DIS("subu.%c $r%u, $r%u\n",
2052 memsize_char(size),
2053 dc->op1, dc->op2);
2055 cris_cc_mask(dc, CC_MASK_NZVC);
2056 t0 = tcg_temp_new();
2057 /* Size can only be qi or hi. */
2058 t_gen_zext(t0, cpu_R[dc->op1], size);
2059 cris_alu(dc, CC_OP_SUB,
2060 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2061 tcg_temp_free(t0);
2062 return 2;
2065 /* Sign extension. From size to dword. */
2066 static int dec_subs_r(DisasContext *dc)
2068 TCGv t0;
2069 int size = memsize_z(dc);
2070 LOG_DIS("subs.%c $r%u, $r%u\n",
2071 memsize_char(size),
2072 dc->op1, dc->op2);
2074 cris_cc_mask(dc, CC_MASK_NZVC);
2075 t0 = tcg_temp_new();
2076 /* Size can only be qi or hi. */
2077 t_gen_sext(t0, cpu_R[dc->op1], size);
2078 cris_alu(dc, CC_OP_SUB,
2079 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2080 tcg_temp_free(t0);
2081 return 2;
2084 static int dec_setclrf(DisasContext *dc)
2086 uint32_t flags;
2087 int set = (~dc->opcode >> 2) & 1;
2090 flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
2091 | EXTRACT_FIELD(dc->ir, 0, 3);
2092 if (set && flags == 0) {
2093 LOG_DIS("nop\n");
2094 return 2;
2095 } else if (!set && (flags & 0x20)) {
2096 LOG_DIS("di\n");
2098 else {
2099 LOG_DIS("%sf %x\n",
2100 set ? "set" : "clr",
2101 flags);
2104 /* User space is not allowed to touch these. Silently ignore. */
2105 if (dc->tb_flags & U_FLAG) {
2106 flags &= ~(S_FLAG | I_FLAG | U_FLAG);
2109 if (flags & X_FLAG) {
2110 dc->flagx_known = 1;
2111 if (set)
2112 dc->flags_x = X_FLAG;
2113 else
2114 dc->flags_x = 0;
2117 /* Break the TB if any of the SPI flag changes. */
2118 if (flags & (P_FLAG | S_FLAG)) {
2119 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2120 dc->is_jmp = DISAS_UPDATE;
2121 dc->cpustate_changed = 1;
2124 /* For the I flag, only act on posedge. */
2125 if ((flags & I_FLAG)) {
2126 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2127 dc->is_jmp = DISAS_UPDATE;
2128 dc->cpustate_changed = 1;
2132 /* Simply decode the flags. */
2133 cris_evaluate_flags (dc);
2134 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2135 cris_update_cc_x(dc);
2136 tcg_gen_movi_tl(cc_op, dc->cc_op);
2138 if (set) {
2139 if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
2140 /* Enter user mode. */
2141 t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
2142 tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
2143 dc->cpustate_changed = 1;
2145 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
2147 else
2148 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
2150 dc->flags_uptodate = 1;
2151 dc->clear_x = 0;
2152 return 2;
2155 static int dec_move_rs(DisasContext *dc)
2157 LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
2158 cris_cc_mask(dc, 0);
2159 gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
2160 return 2;
2162 static int dec_move_sr(DisasContext *dc)
2164 LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
2165 cris_cc_mask(dc, 0);
2166 gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
2167 return 2;
2170 static int dec_move_rp(DisasContext *dc)
2172 TCGv t[2];
2173 LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
2174 cris_cc_mask(dc, 0);
2176 t[0] = tcg_temp_new();
2177 if (dc->op2 == PR_CCS) {
2178 cris_evaluate_flags(dc);
2179 t_gen_mov_TN_reg(t[0], dc->op1);
2180 if (dc->tb_flags & U_FLAG) {
2181 t[1] = tcg_temp_new();
2182 /* User space is not allowed to touch all flags. */
2183 tcg_gen_andi_tl(t[0], t[0], 0x39f);
2184 tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
2185 tcg_gen_or_tl(t[0], t[1], t[0]);
2186 tcg_temp_free(t[1]);
2189 else
2190 t_gen_mov_TN_reg(t[0], dc->op1);
2192 t_gen_mov_preg_TN(dc, dc->op2, t[0]);
2193 if (dc->op2 == PR_CCS) {
2194 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2195 dc->flags_uptodate = 1;
2197 tcg_temp_free(t[0]);
2198 return 2;
2200 static int dec_move_pr(DisasContext *dc)
2202 TCGv t0;
2203 LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
2204 cris_cc_mask(dc, 0);
2206 if (dc->op2 == PR_CCS)
2207 cris_evaluate_flags(dc);
2209 if (dc->op2 == PR_DZ) {
2210 tcg_gen_movi_tl(cpu_R[dc->op1], 0);
2211 } else {
2212 t0 = tcg_temp_new();
2213 t_gen_mov_TN_preg(t0, dc->op2);
2214 cris_alu(dc, CC_OP_MOVE,
2215 cpu_R[dc->op1], cpu_R[dc->op1], t0,
2216 preg_sizes[dc->op2]);
2217 tcg_temp_free(t0);
2219 return 2;
2222 static int dec_move_mr(DisasContext *dc)
2224 int memsize = memsize_zz(dc);
2225 int insn_len;
2226 LOG_DIS("move.%c [$r%u%s, $r%u\n",
2227 memsize_char(memsize),
2228 dc->op1, dc->postinc ? "+]" : "]",
2229 dc->op2);
2231 if (memsize == 4) {
2232 insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
2233 cris_cc_mask(dc, CC_MASK_NZ);
2234 cris_update_cc_op(dc, CC_OP_MOVE, 4);
2235 cris_update_cc_x(dc);
2236 cris_update_result(dc, cpu_R[dc->op2]);
2238 else {
2239 TCGv t0;
2241 t0 = tcg_temp_new();
2242 insn_len = dec_prep_move_m(dc, 0, memsize, t0);
2243 cris_cc_mask(dc, CC_MASK_NZ);
2244 cris_alu(dc, CC_OP_MOVE,
2245 cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
2246 tcg_temp_free(t0);
2248 do_postinc(dc, memsize);
2249 return insn_len;
2252 static inline void cris_alu_m_alloc_temps(TCGv *t)
2254 t[0] = tcg_temp_new();
2255 t[1] = tcg_temp_new();
2258 static inline void cris_alu_m_free_temps(TCGv *t)
2260 tcg_temp_free(t[0]);
2261 tcg_temp_free(t[1]);
2264 static int dec_movs_m(DisasContext *dc)
2266 TCGv t[2];
2267 int memsize = memsize_z(dc);
2268 int insn_len;
2269 LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2270 memsize_char(memsize),
2271 dc->op1, dc->postinc ? "+]" : "]",
2272 dc->op2);
2274 cris_alu_m_alloc_temps(t);
2275 /* sign extend. */
2276 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2277 cris_cc_mask(dc, CC_MASK_NZ);
2278 cris_alu(dc, CC_OP_MOVE,
2279 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2280 do_postinc(dc, memsize);
2281 cris_alu_m_free_temps(t);
2282 return insn_len;
2285 static int dec_addu_m(DisasContext *dc)
2287 TCGv t[2];
2288 int memsize = memsize_z(dc);
2289 int insn_len;
2290 LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2291 memsize_char(memsize),
2292 dc->op1, dc->postinc ? "+]" : "]",
2293 dc->op2);
2295 cris_alu_m_alloc_temps(t);
2296 /* sign extend. */
2297 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2298 cris_cc_mask(dc, CC_MASK_NZVC);
2299 cris_alu(dc, CC_OP_ADD,
2300 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2301 do_postinc(dc, memsize);
2302 cris_alu_m_free_temps(t);
2303 return insn_len;
2306 static int dec_adds_m(DisasContext *dc)
2308 TCGv t[2];
2309 int memsize = memsize_z(dc);
2310 int insn_len;
2311 LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2312 memsize_char(memsize),
2313 dc->op1, dc->postinc ? "+]" : "]",
2314 dc->op2);
2316 cris_alu_m_alloc_temps(t);
2317 /* sign extend. */
2318 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2319 cris_cc_mask(dc, CC_MASK_NZVC);
2320 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2321 do_postinc(dc, memsize);
2322 cris_alu_m_free_temps(t);
2323 return insn_len;
2326 static int dec_subu_m(DisasContext *dc)
2328 TCGv t[2];
2329 int memsize = memsize_z(dc);
2330 int insn_len;
2331 LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2332 memsize_char(memsize),
2333 dc->op1, dc->postinc ? "+]" : "]",
2334 dc->op2);
2336 cris_alu_m_alloc_temps(t);
2337 /* sign extend. */
2338 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2339 cris_cc_mask(dc, CC_MASK_NZVC);
2340 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2341 do_postinc(dc, memsize);
2342 cris_alu_m_free_temps(t);
2343 return insn_len;
2346 static int dec_subs_m(DisasContext *dc)
2348 TCGv t[2];
2349 int memsize = memsize_z(dc);
2350 int insn_len;
2351 LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2352 memsize_char(memsize),
2353 dc->op1, dc->postinc ? "+]" : "]",
2354 dc->op2);
2356 cris_alu_m_alloc_temps(t);
2357 /* sign extend. */
2358 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2359 cris_cc_mask(dc, CC_MASK_NZVC);
2360 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2361 do_postinc(dc, memsize);
2362 cris_alu_m_free_temps(t);
2363 return insn_len;
2366 static int dec_movu_m(DisasContext *dc)
2368 TCGv t[2];
2369 int memsize = memsize_z(dc);
2370 int insn_len;
2372 LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2373 memsize_char(memsize),
2374 dc->op1, dc->postinc ? "+]" : "]",
2375 dc->op2);
2377 cris_alu_m_alloc_temps(t);
2378 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2379 cris_cc_mask(dc, CC_MASK_NZ);
2380 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2381 do_postinc(dc, memsize);
2382 cris_alu_m_free_temps(t);
2383 return insn_len;
2386 static int dec_cmpu_m(DisasContext *dc)
2388 TCGv t[2];
2389 int memsize = memsize_z(dc);
2390 int insn_len;
2391 LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2392 memsize_char(memsize),
2393 dc->op1, dc->postinc ? "+]" : "]",
2394 dc->op2);
2396 cris_alu_m_alloc_temps(t);
2397 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2398 cris_cc_mask(dc, CC_MASK_NZVC);
2399 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2400 do_postinc(dc, memsize);
2401 cris_alu_m_free_temps(t);
2402 return insn_len;
2405 static int dec_cmps_m(DisasContext *dc)
2407 TCGv t[2];
2408 int memsize = memsize_z(dc);
2409 int insn_len;
2410 LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2411 memsize_char(memsize),
2412 dc->op1, dc->postinc ? "+]" : "]",
2413 dc->op2);
2415 cris_alu_m_alloc_temps(t);
2416 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2417 cris_cc_mask(dc, CC_MASK_NZVC);
2418 cris_alu(dc, CC_OP_CMP,
2419 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2420 memsize_zz(dc));
2421 do_postinc(dc, memsize);
2422 cris_alu_m_free_temps(t);
2423 return insn_len;
2426 static int dec_cmp_m(DisasContext *dc)
2428 TCGv t[2];
2429 int memsize = memsize_zz(dc);
2430 int insn_len;
2431 LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2432 memsize_char(memsize),
2433 dc->op1, dc->postinc ? "+]" : "]",
2434 dc->op2);
2436 cris_alu_m_alloc_temps(t);
2437 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2438 cris_cc_mask(dc, CC_MASK_NZVC);
2439 cris_alu(dc, CC_OP_CMP,
2440 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2441 memsize_zz(dc));
2442 do_postinc(dc, memsize);
2443 cris_alu_m_free_temps(t);
2444 return insn_len;
2447 static int dec_test_m(DisasContext *dc)
2449 TCGv t[2];
2450 int memsize = memsize_zz(dc);
2451 int insn_len;
2452 LOG_DIS("test.%c [$r%u%s] op2=%x\n",
2453 memsize_char(memsize),
2454 dc->op1, dc->postinc ? "+]" : "]",
2455 dc->op2);
2457 cris_evaluate_flags(dc);
2459 cris_alu_m_alloc_temps(t);
2460 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2461 cris_cc_mask(dc, CC_MASK_NZ);
2462 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
2464 cris_alu(dc, CC_OP_CMP,
2465 cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
2466 do_postinc(dc, memsize);
2467 cris_alu_m_free_temps(t);
2468 return insn_len;
2471 static int dec_and_m(DisasContext *dc)
2473 TCGv t[2];
2474 int memsize = memsize_zz(dc);
2475 int insn_len;
2476 LOG_DIS("and.%c [$r%u%s, $r%u\n",
2477 memsize_char(memsize),
2478 dc->op1, dc->postinc ? "+]" : "]",
2479 dc->op2);
2481 cris_alu_m_alloc_temps(t);
2482 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2483 cris_cc_mask(dc, CC_MASK_NZ);
2484 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2485 do_postinc(dc, memsize);
2486 cris_alu_m_free_temps(t);
2487 return insn_len;
2490 static int dec_add_m(DisasContext *dc)
2492 TCGv t[2];
2493 int memsize = memsize_zz(dc);
2494 int insn_len;
2495 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2496 memsize_char(memsize),
2497 dc->op1, dc->postinc ? "+]" : "]",
2498 dc->op2);
2500 cris_alu_m_alloc_temps(t);
2501 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2502 cris_cc_mask(dc, CC_MASK_NZVC);
2503 cris_alu(dc, CC_OP_ADD,
2504 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2505 do_postinc(dc, memsize);
2506 cris_alu_m_free_temps(t);
2507 return insn_len;
2510 static int dec_addo_m(DisasContext *dc)
2512 TCGv t[2];
2513 int memsize = memsize_zz(dc);
2514 int insn_len;
2515 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2516 memsize_char(memsize),
2517 dc->op1, dc->postinc ? "+]" : "]",
2518 dc->op2);
2520 cris_alu_m_alloc_temps(t);
2521 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2522 cris_cc_mask(dc, 0);
2523 cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
2524 do_postinc(dc, memsize);
2525 cris_alu_m_free_temps(t);
2526 return insn_len;
2529 static int dec_bound_m(DisasContext *dc)
2531 TCGv l[2];
2532 int memsize = memsize_zz(dc);
2533 int insn_len;
2534 LOG_DIS("bound.%c [$r%u%s, $r%u\n",
2535 memsize_char(memsize),
2536 dc->op1, dc->postinc ? "+]" : "]",
2537 dc->op2);
2539 l[0] = tcg_temp_local_new();
2540 l[1] = tcg_temp_local_new();
2541 insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
2542 cris_cc_mask(dc, CC_MASK_NZ);
2543 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
2544 do_postinc(dc, memsize);
2545 tcg_temp_free(l[0]);
2546 tcg_temp_free(l[1]);
2547 return insn_len;
2550 static int dec_addc_mr(DisasContext *dc)
2552 TCGv t[2];
2553 int insn_len = 2;
2554 LOG_DIS("addc [$r%u%s, $r%u\n",
2555 dc->op1, dc->postinc ? "+]" : "]",
2556 dc->op2);
2558 cris_evaluate_flags(dc);
2560 /* Set for this insn. */
2561 dc->flagx_known = 1;
2562 dc->flags_x = X_FLAG;
2564 cris_alu_m_alloc_temps(t);
2565 insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
2566 cris_cc_mask(dc, CC_MASK_NZVC);
2567 cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
2568 do_postinc(dc, 4);
2569 cris_alu_m_free_temps(t);
2570 return insn_len;
2573 static int dec_sub_m(DisasContext *dc)
2575 TCGv t[2];
2576 int memsize = memsize_zz(dc);
2577 int insn_len;
2578 LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2579 memsize_char(memsize),
2580 dc->op1, dc->postinc ? "+]" : "]",
2581 dc->op2, dc->ir, dc->zzsize);
2583 cris_alu_m_alloc_temps(t);
2584 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2585 cris_cc_mask(dc, CC_MASK_NZVC);
2586 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
2587 do_postinc(dc, memsize);
2588 cris_alu_m_free_temps(t);
2589 return insn_len;
2592 static int dec_or_m(DisasContext *dc)
2594 TCGv t[2];
2595 int memsize = memsize_zz(dc);
2596 int insn_len;
2597 LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
2598 memsize_char(memsize),
2599 dc->op1, dc->postinc ? "+]" : "]",
2600 dc->op2, dc->pc);
2602 cris_alu_m_alloc_temps(t);
2603 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2604 cris_cc_mask(dc, CC_MASK_NZ);
2605 cris_alu(dc, CC_OP_OR,
2606 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2607 do_postinc(dc, memsize);
2608 cris_alu_m_free_temps(t);
2609 return insn_len;
2612 static int dec_move_mp(DisasContext *dc)
2614 TCGv t[2];
2615 int memsize = memsize_zz(dc);
2616 int insn_len = 2;
2618 LOG_DIS("move.%c [$r%u%s, $p%u\n",
2619 memsize_char(memsize),
2620 dc->op1,
2621 dc->postinc ? "+]" : "]",
2622 dc->op2);
2624 cris_alu_m_alloc_temps(t);
2625 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2626 cris_cc_mask(dc, 0);
2627 if (dc->op2 == PR_CCS) {
2628 cris_evaluate_flags(dc);
2629 if (dc->tb_flags & U_FLAG) {
2630 /* User space is not allowed to touch all flags. */
2631 tcg_gen_andi_tl(t[1], t[1], 0x39f);
2632 tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
2633 tcg_gen_or_tl(t[1], t[0], t[1]);
2637 t_gen_mov_preg_TN(dc, dc->op2, t[1]);
2639 do_postinc(dc, memsize);
2640 cris_alu_m_free_temps(t);
2641 return insn_len;
2644 static int dec_move_pm(DisasContext *dc)
2646 TCGv t0;
2647 int memsize;
2649 memsize = preg_sizes[dc->op2];
2651 LOG_DIS("move.%c $p%u, [$r%u%s\n",
2652 memsize_char(memsize),
2653 dc->op2, dc->op1, dc->postinc ? "+]" : "]");
2655 /* prepare store. Address in T0, value in T1. */
2656 if (dc->op2 == PR_CCS)
2657 cris_evaluate_flags(dc);
2658 t0 = tcg_temp_new();
2659 t_gen_mov_TN_preg(t0, dc->op2);
2660 cris_flush_cc_state(dc);
2661 gen_store(dc, cpu_R[dc->op1], t0, memsize);
2662 tcg_temp_free(t0);
2664 cris_cc_mask(dc, 0);
2665 if (dc->postinc)
2666 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2667 return 2;
2670 static int dec_movem_mr(DisasContext *dc)
2672 TCGv_i64 tmp[16];
2673 TCGv tmp32;
2674 TCGv addr;
2675 int i;
2676 int nr = dc->op2 + 1;
2678 LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
2679 dc->postinc ? "+]" : "]", dc->op2);
2681 addr = tcg_temp_new();
2682 /* There are probably better ways of doing this. */
2683 cris_flush_cc_state(dc);
2684 for (i = 0; i < (nr >> 1); i++) {
2685 tmp[i] = tcg_temp_new_i64();
2686 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2687 gen_load64(dc, tmp[i], addr);
2689 if (nr & 1) {
2690 tmp32 = tcg_temp_new_i32();
2691 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2692 gen_load(dc, tmp32, addr, 4, 0);
2693 } else
2694 TCGV_UNUSED(tmp32);
2695 tcg_temp_free(addr);
2697 for (i = 0; i < (nr >> 1); i++) {
2698 tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
2699 tcg_gen_shri_i64(tmp[i], tmp[i], 32);
2700 tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
2701 tcg_temp_free_i64(tmp[i]);
2703 if (nr & 1) {
2704 tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
2705 tcg_temp_free(tmp32);
2708 /* writeback the updated pointer value. */
2709 if (dc->postinc)
2710 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
2712 /* gen_load might want to evaluate the previous insns flags. */
2713 cris_cc_mask(dc, 0);
2714 return 2;
2717 static int dec_movem_rm(DisasContext *dc)
2719 TCGv tmp;
2720 TCGv addr;
2721 int i;
2723 LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
2724 dc->postinc ? "+]" : "]");
2726 cris_flush_cc_state(dc);
2728 tmp = tcg_temp_new();
2729 addr = tcg_temp_new();
2730 tcg_gen_movi_tl(tmp, 4);
2731 tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
2732 for (i = 0; i <= dc->op2; i++) {
2733 /* Displace addr. */
2734 /* Perform the store. */
2735 gen_store(dc, addr, cpu_R[i], 4);
2736 tcg_gen_add_tl(addr, addr, tmp);
2738 if (dc->postinc)
2739 tcg_gen_mov_tl(cpu_R[dc->op1], addr);
2740 cris_cc_mask(dc, 0);
2741 tcg_temp_free(tmp);
2742 tcg_temp_free(addr);
2743 return 2;
2746 static int dec_move_rm(DisasContext *dc)
2748 int memsize;
2750 memsize = memsize_zz(dc);
2752 LOG_DIS("move.%c $r%u, [$r%u]\n",
2753 memsize_char(memsize), dc->op2, dc->op1);
2755 /* prepare store. */
2756 cris_flush_cc_state(dc);
2757 gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
2759 if (dc->postinc)
2760 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2761 cris_cc_mask(dc, 0);
2762 return 2;
2765 static int dec_lapcq(DisasContext *dc)
2767 LOG_DIS("lapcq %x, $r%u\n",
2768 dc->pc + dc->op1*2, dc->op2);
2769 cris_cc_mask(dc, 0);
2770 tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
2771 return 2;
2774 static int dec_lapc_im(DisasContext *dc)
2776 unsigned int rd;
2777 int32_t imm;
2778 int32_t pc;
2780 rd = dc->op2;
2782 cris_cc_mask(dc, 0);
2783 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2784 LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
2786 pc = dc->pc;
2787 pc += imm;
2788 tcg_gen_movi_tl(cpu_R[rd], pc);
2789 return 6;
2792 /* Jump to special reg. */
2793 static int dec_jump_p(DisasContext *dc)
2795 LOG_DIS("jump $p%u\n", dc->op2);
2797 if (dc->op2 == PR_CCS)
2798 cris_evaluate_flags(dc);
2799 t_gen_mov_TN_preg(env_btarget, dc->op2);
2800 /* rete will often have low bit set to indicate delayslot. */
2801 tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
2802 cris_cc_mask(dc, 0);
2803 cris_prepare_jmp(dc, JMP_INDIRECT);
2804 return 2;
2807 /* Jump and save. */
2808 static int dec_jas_r(DisasContext *dc)
2810 LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
2811 cris_cc_mask(dc, 0);
2812 /* Store the return address in Pd. */
2813 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2814 if (dc->op2 > 15)
2815 abort();
2816 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
2818 cris_prepare_jmp(dc, JMP_INDIRECT);
2819 return 2;
2822 static int dec_jas_im(DisasContext *dc)
2824 uint32_t imm;
2826 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2828 LOG_DIS("jas 0x%x\n", imm);
2829 cris_cc_mask(dc, 0);
2830 /* Store the return address in Pd. */
2831 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2833 dc->jmp_pc = imm;
2834 cris_prepare_jmp(dc, JMP_DIRECT);
2835 return 6;
2838 static int dec_jasc_im(DisasContext *dc)
2840 uint32_t imm;
2842 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2844 LOG_DIS("jasc 0x%x\n", imm);
2845 cris_cc_mask(dc, 0);
2846 /* Store the return address in Pd. */
2847 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
2849 dc->jmp_pc = imm;
2850 cris_prepare_jmp(dc, JMP_DIRECT);
2851 return 6;
2854 static int dec_jasc_r(DisasContext *dc)
2856 LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
2857 cris_cc_mask(dc, 0);
2858 /* Store the return address in Pd. */
2859 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2860 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
2861 cris_prepare_jmp(dc, JMP_INDIRECT);
2862 return 2;
2865 static int dec_bcc_im(DisasContext *dc)
2867 int32_t offset;
2868 uint32_t cond = dc->op2;
2870 offset = cris_fetch(dc, dc->pc + 2, 2, 1);
2872 LOG_DIS("b%s %d pc=%x dst=%x\n",
2873 cc_name(cond), offset,
2874 dc->pc, dc->pc + offset);
2876 cris_cc_mask(dc, 0);
2877 /* op2 holds the condition-code. */
2878 cris_prepare_cc_branch (dc, offset, cond);
2879 return 4;
2882 static int dec_bas_im(DisasContext *dc)
2884 int32_t simm;
2887 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2889 LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2890 cris_cc_mask(dc, 0);
2891 /* Store the return address in Pd. */
2892 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2894 dc->jmp_pc = dc->pc + simm;
2895 cris_prepare_jmp(dc, JMP_DIRECT);
2896 return 6;
2899 static int dec_basc_im(DisasContext *dc)
2901 int32_t simm;
2902 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2904 LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2905 cris_cc_mask(dc, 0);
2906 /* Store the return address in Pd. */
2907 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
2909 dc->jmp_pc = dc->pc + simm;
2910 cris_prepare_jmp(dc, JMP_DIRECT);
2911 return 6;
2914 static int dec_rfe_etc(DisasContext *dc)
2916 cris_cc_mask(dc, 0);
2918 if (dc->op2 == 15) {
2919 t_gen_mov_env_TN(halted, tcg_const_tl(1));
2920 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2921 t_gen_raise_exception(EXCP_HLT);
2922 return 2;
2925 switch (dc->op2 & 7) {
2926 case 2:
2927 /* rfe. */
2928 LOG_DIS("rfe\n");
2929 cris_evaluate_flags(dc);
2930 gen_helper_rfe();
2931 dc->is_jmp = DISAS_UPDATE;
2932 break;
2933 case 5:
2934 /* rfn. */
2935 LOG_DIS("rfn\n");
2936 cris_evaluate_flags(dc);
2937 gen_helper_rfn();
2938 dc->is_jmp = DISAS_UPDATE;
2939 break;
2940 case 6:
2941 LOG_DIS("break %d\n", dc->op1);
2942 cris_evaluate_flags (dc);
2943 /* break. */
2944 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2946 /* Breaks start at 16 in the exception vector. */
2947 t_gen_mov_env_TN(trap_vector,
2948 tcg_const_tl(dc->op1 + 16));
2949 t_gen_raise_exception(EXCP_BREAK);
2950 dc->is_jmp = DISAS_UPDATE;
2951 break;
2952 default:
2953 printf ("op2=%x\n", dc->op2);
2954 BUG();
2955 break;
2958 return 2;
2961 static int dec_ftag_fidx_d_m(DisasContext *dc)
2963 return 2;
2966 static int dec_ftag_fidx_i_m(DisasContext *dc)
2968 return 2;
2971 static int dec_null(DisasContext *dc)
2973 printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2974 dc->pc, dc->opcode, dc->op1, dc->op2);
2975 fflush(NULL);
2976 BUG();
2977 return 2;
2980 static struct decoder_info {
2981 struct {
2982 uint32_t bits;
2983 uint32_t mask;
2985 int (*dec)(DisasContext *dc);
2986 } decinfo[] = {
2987 /* Order matters here. */
2988 {DEC_MOVEQ, dec_moveq},
2989 {DEC_BTSTQ, dec_btstq},
2990 {DEC_CMPQ, dec_cmpq},
2991 {DEC_ADDOQ, dec_addoq},
2992 {DEC_ADDQ, dec_addq},
2993 {DEC_SUBQ, dec_subq},
2994 {DEC_ANDQ, dec_andq},
2995 {DEC_ORQ, dec_orq},
2996 {DEC_ASRQ, dec_asrq},
2997 {DEC_LSLQ, dec_lslq},
2998 {DEC_LSRQ, dec_lsrq},
2999 {DEC_BCCQ, dec_bccq},
3001 {DEC_BCC_IM, dec_bcc_im},
3002 {DEC_JAS_IM, dec_jas_im},
3003 {DEC_JAS_R, dec_jas_r},
3004 {DEC_JASC_IM, dec_jasc_im},
3005 {DEC_JASC_R, dec_jasc_r},
3006 {DEC_BAS_IM, dec_bas_im},
3007 {DEC_BASC_IM, dec_basc_im},
3008 {DEC_JUMP_P, dec_jump_p},
3009 {DEC_LAPC_IM, dec_lapc_im},
3010 {DEC_LAPCQ, dec_lapcq},
3012 {DEC_RFE_ETC, dec_rfe_etc},
3013 {DEC_ADDC_MR, dec_addc_mr},
3015 {DEC_MOVE_MP, dec_move_mp},
3016 {DEC_MOVE_PM, dec_move_pm},
3017 {DEC_MOVEM_MR, dec_movem_mr},
3018 {DEC_MOVEM_RM, dec_movem_rm},
3019 {DEC_MOVE_PR, dec_move_pr},
3020 {DEC_SCC_R, dec_scc_r},
3021 {DEC_SETF, dec_setclrf},
3022 {DEC_CLEARF, dec_setclrf},
3024 {DEC_MOVE_SR, dec_move_sr},
3025 {DEC_MOVE_RP, dec_move_rp},
3026 {DEC_SWAP_R, dec_swap_r},
3027 {DEC_ABS_R, dec_abs_r},
3028 {DEC_LZ_R, dec_lz_r},
3029 {DEC_MOVE_RS, dec_move_rs},
3030 {DEC_BTST_R, dec_btst_r},
3031 {DEC_ADDC_R, dec_addc_r},
3033 {DEC_DSTEP_R, dec_dstep_r},
3034 {DEC_XOR_R, dec_xor_r},
3035 {DEC_MCP_R, dec_mcp_r},
3036 {DEC_CMP_R, dec_cmp_r},
3038 {DEC_ADDI_R, dec_addi_r},
3039 {DEC_ADDI_ACR, dec_addi_acr},
3041 {DEC_ADD_R, dec_add_r},
3042 {DEC_SUB_R, dec_sub_r},
3044 {DEC_ADDU_R, dec_addu_r},
3045 {DEC_ADDS_R, dec_adds_r},
3046 {DEC_SUBU_R, dec_subu_r},
3047 {DEC_SUBS_R, dec_subs_r},
3048 {DEC_LSL_R, dec_lsl_r},
3050 {DEC_AND_R, dec_and_r},
3051 {DEC_OR_R, dec_or_r},
3052 {DEC_BOUND_R, dec_bound_r},
3053 {DEC_ASR_R, dec_asr_r},
3054 {DEC_LSR_R, dec_lsr_r},
3056 {DEC_MOVU_R, dec_movu_r},
3057 {DEC_MOVS_R, dec_movs_r},
3058 {DEC_NEG_R, dec_neg_r},
3059 {DEC_MOVE_R, dec_move_r},
3061 {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
3062 {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
3064 {DEC_MULS_R, dec_muls_r},
3065 {DEC_MULU_R, dec_mulu_r},
3067 {DEC_ADDU_M, dec_addu_m},
3068 {DEC_ADDS_M, dec_adds_m},
3069 {DEC_SUBU_M, dec_subu_m},
3070 {DEC_SUBS_M, dec_subs_m},
3072 {DEC_CMPU_M, dec_cmpu_m},
3073 {DEC_CMPS_M, dec_cmps_m},
3074 {DEC_MOVU_M, dec_movu_m},
3075 {DEC_MOVS_M, dec_movs_m},
3077 {DEC_CMP_M, dec_cmp_m},
3078 {DEC_ADDO_M, dec_addo_m},
3079 {DEC_BOUND_M, dec_bound_m},
3080 {DEC_ADD_M, dec_add_m},
3081 {DEC_SUB_M, dec_sub_m},
3082 {DEC_AND_M, dec_and_m},
3083 {DEC_OR_M, dec_or_m},
3084 {DEC_MOVE_RM, dec_move_rm},
3085 {DEC_TEST_M, dec_test_m},
3086 {DEC_MOVE_MR, dec_move_mr},
3088 {{0, 0}, dec_null}
3091 static unsigned int crisv32_decoder(DisasContext *dc)
3093 int insn_len = 2;
3094 int i;
3096 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3097 tcg_gen_debug_insn_start(dc->pc);
3099 /* Load a halfword onto the instruction register. */
3100 dc->ir = cris_fetch(dc, dc->pc, 2, 0);
3102 /* Now decode it. */
3103 dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
3104 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
3105 dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
3106 dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
3107 dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
3108 dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
3110 /* Large switch for all insns. */
3111 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
3112 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
3114 insn_len = decinfo[i].dec(dc);
3115 break;
3119 #if !defined(CONFIG_USER_ONLY)
3120 /* Single-stepping ? */
3121 if (dc->tb_flags & S_FLAG) {
3122 int l1;
3124 l1 = gen_new_label();
3125 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
3126 /* We treat SPC as a break with an odd trap vector. */
3127 cris_evaluate_flags (dc);
3128 t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
3129 tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
3130 tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
3131 t_gen_raise_exception(EXCP_BREAK);
3132 gen_set_label(l1);
3134 #endif
3135 return insn_len;
3138 static void check_breakpoint(CPUState *env, DisasContext *dc)
3140 CPUBreakpoint *bp;
3142 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3143 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3144 if (bp->pc == dc->pc) {
3145 cris_evaluate_flags (dc);
3146 tcg_gen_movi_tl(env_pc, dc->pc);
3147 t_gen_raise_exception(EXCP_DEBUG);
3148 dc->is_jmp = DISAS_UPDATE;
3154 #include "translate_v10.c"
3157 * Delay slots on QEMU/CRIS.
3159 * If an exception hits on a delayslot, the core will let ERP (the Exception
3160 * Return Pointer) point to the branch (the previous) insn and set the lsb to
3161 * to give SW a hint that the exception actually hit on the dslot.
3163 * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3164 * the core and any jmp to an odd addresses will mask off that lsb. It is
3165 * simply there to let sw know there was an exception on a dslot.
3167 * When the software returns from an exception, the branch will re-execute.
3168 * On QEMU care needs to be taken when a branch+delayslot sequence is broken
3169 * and the branch and delayslot dont share pages.
3171 * The TB contaning the branch insn will set up env->btarget and evaluate
3172 * env->btaken. When the translation loop exits we will note that the branch
3173 * sequence is broken and let env->dslot be the size of the branch insn (those
3174 * vary in length).
3176 * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3177 * set). It will also expect to have env->dslot setup with the size of the
3178 * delay slot so that env->pc - env->dslot point to the branch insn. This TB
3179 * will execute the dslot and take the branch, either to btarget or just one
3180 * insn ahead.
3182 * When exceptions occur, we check for env->dslot in do_interrupt to detect
3183 * broken branch sequences and setup $erp accordingly (i.e let it point to the
3184 * branch and set lsb). Then env->dslot gets cleared so that the exception
3185 * handler can enter. When returning from exceptions (jump $erp) the lsb gets
3186 * masked off and we will reexecute the branch insn.
3190 /* generate intermediate code for basic block 'tb'. */
3191 static void
3192 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
3193 int search_pc)
3195 uint16_t *gen_opc_end;
3196 uint32_t pc_start;
3197 unsigned int insn_len, orig_flags;
3198 int j, lj;
3199 struct DisasContext ctx;
3200 struct DisasContext *dc = &ctx;
3201 uint32_t next_page_start;
3202 target_ulong npc;
3203 int num_insns;
3204 int max_insns;
3206 qemu_log_try_set_file(stderr);
3208 if (env->pregs[PR_VR] == 32)
3209 dc->decoder = crisv32_decoder;
3210 else
3211 dc->decoder = crisv10_decoder;
3213 /* Odd PC indicates that branch is rexecuting due to exception in the
3214 * delayslot, like in real hw.
3216 pc_start = tb->pc & ~1;
3217 dc->env = env;
3218 dc->tb = tb;
3220 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3222 dc->is_jmp = DISAS_NEXT;
3223 dc->ppc = pc_start;
3224 dc->pc = pc_start;
3225 dc->singlestep_enabled = env->singlestep_enabled;
3226 dc->flags_uptodate = 1;
3227 dc->flagx_known = 1;
3228 dc->flags_x = tb->flags & X_FLAG;
3229 dc->cc_x_uptodate = 0;
3230 dc->cc_mask = 0;
3231 dc->update_cc = 0;
3232 dc->clear_prefix = 0;
3233 dc->clear_locked_irq = 1;
3235 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
3236 dc->cc_size_uptodate = -1;
3238 /* Decode TB flags. */
3239 orig_flags = dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
3240 | X_FLAG | PFIX_FLAG);
3241 dc->delayed_branch = !!(tb->flags & 7);
3242 if (dc->delayed_branch)
3243 dc->jmp = JMP_INDIRECT;
3244 else
3245 dc->jmp = JMP_NOJMP;
3247 dc->cpustate_changed = 0;
3249 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3250 qemu_log(
3251 "srch=%d pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
3252 "pid=%x usp=%x\n"
3253 "%x.%x.%x.%x\n"
3254 "%x.%x.%x.%x\n"
3255 "%x.%x.%x.%x\n"
3256 "%x.%x.%x.%x\n",
3257 search_pc, dc->pc, dc->ppc,
3258 (uint64_t)tb->flags,
3259 env->btarget, (unsigned)tb->flags & 7,
3260 env->pregs[PR_CCS],
3261 env->pregs[PR_PID], env->pregs[PR_USP],
3262 env->regs[0], env->regs[1], env->regs[2], env->regs[3],
3263 env->regs[4], env->regs[5], env->regs[6], env->regs[7],
3264 env->regs[8], env->regs[9],
3265 env->regs[10], env->regs[11],
3266 env->regs[12], env->regs[13],
3267 env->regs[14], env->regs[15]);
3268 qemu_log("--------------\n");
3269 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3272 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3273 lj = -1;
3274 num_insns = 0;
3275 max_insns = tb->cflags & CF_COUNT_MASK;
3276 if (max_insns == 0)
3277 max_insns = CF_COUNT_MASK;
3279 gen_icount_start();
3282 check_breakpoint(env, dc);
3284 if (search_pc) {
3285 j = gen_opc_ptr - gen_opc_buf;
3286 if (lj < j) {
3287 lj++;
3288 while (lj < j)
3289 gen_opc_instr_start[lj++] = 0;
3291 if (dc->delayed_branch == 1)
3292 gen_opc_pc[lj] = dc->ppc | 1;
3293 else
3294 gen_opc_pc[lj] = dc->pc;
3295 gen_opc_instr_start[lj] = 1;
3296 gen_opc_icount[lj] = num_insns;
3299 /* Pretty disas. */
3300 LOG_DIS("%8.8x:\t", dc->pc);
3302 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3303 gen_io_start();
3304 dc->clear_x = 1;
3306 insn_len = dc->decoder(dc);
3307 dc->ppc = dc->pc;
3308 dc->pc += insn_len;
3309 if (dc->clear_x)
3310 cris_clear_x_flag(dc);
3312 num_insns++;
3313 /* Check for delayed branches here. If we do it before
3314 actually generating any host code, the simulator will just
3315 loop doing nothing for on this program location. */
3316 if (dc->delayed_branch) {
3317 dc->delayed_branch--;
3318 if (dc->delayed_branch == 0)
3320 if (tb->flags & 7)
3321 t_gen_mov_env_TN(dslot,
3322 tcg_const_tl(0));
3323 if (dc->jmp == JMP_DIRECT) {
3324 dc->is_jmp = DISAS_NEXT;
3325 } else {
3326 t_gen_cc_jmp(env_btarget,
3327 tcg_const_tl(dc->pc));
3328 dc->is_jmp = DISAS_JUMP;
3330 break;
3334 /* If we are rexecuting a branch due to exceptions on
3335 delay slots dont break. */
3336 if (!(tb->pc & 1) && env->singlestep_enabled)
3337 break;
3338 } while (!dc->is_jmp && !dc->cpustate_changed
3339 && gen_opc_ptr < gen_opc_end
3340 && !singlestep
3341 && (dc->pc < next_page_start)
3342 && num_insns < max_insns);
3344 if (dc->tb_flags != orig_flags) {
3345 dc->cpustate_changed = 1;
3348 if (dc->clear_locked_irq)
3349 t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
3351 npc = dc->pc;
3352 if (dc->jmp == JMP_DIRECT && !dc->delayed_branch)
3353 npc = dc->jmp_pc;
3355 if (tb->cflags & CF_LAST_IO)
3356 gen_io_end();
3357 /* Force an update if the per-tb cpu state has changed. */
3358 if (dc->is_jmp == DISAS_NEXT
3359 && (dc->cpustate_changed || !dc->flagx_known
3360 || (dc->flags_x != (tb->flags & X_FLAG)))) {
3361 dc->is_jmp = DISAS_UPDATE;
3362 tcg_gen_movi_tl(env_pc, npc);
3364 /* Broken branch+delayslot sequence. */
3365 if (dc->delayed_branch == 1) {
3366 /* Set env->dslot to the size of the branch insn. */
3367 t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
3368 cris_store_direct_jmp(dc);
3371 cris_evaluate_flags (dc);
3373 if (unlikely(env->singlestep_enabled)) {
3374 if (dc->is_jmp == DISAS_NEXT)
3375 tcg_gen_movi_tl(env_pc, npc);
3376 t_gen_raise_exception(EXCP_DEBUG);
3377 } else {
3378 switch(dc->is_jmp) {
3379 case DISAS_NEXT:
3380 gen_goto_tb(dc, 1, npc);
3381 break;
3382 default:
3383 case DISAS_JUMP:
3384 case DISAS_UPDATE:
3385 /* indicate that the hash table must be used
3386 to find the next TB */
3387 tcg_gen_exit_tb(0);
3388 break;
3389 case DISAS_SWI:
3390 case DISAS_TB_JUMP:
3391 /* nothing more to generate */
3392 break;
3395 gen_icount_end(tb, num_insns);
3396 *gen_opc_ptr = INDEX_op_end;
3397 if (search_pc) {
3398 j = gen_opc_ptr - gen_opc_buf;
3399 lj++;
3400 while (lj <= j)
3401 gen_opc_instr_start[lj++] = 0;
3402 } else {
3403 tb->size = dc->pc - pc_start;
3404 tb->icount = num_insns;
3407 #ifdef DEBUG_DISAS
3408 #if !DISAS_CRIS
3409 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3410 log_target_disas(pc_start, dc->pc - pc_start,
3411 dc->env->pregs[PR_VR]);
3412 qemu_log("\nisize=%d osize=%td\n",
3413 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
3415 #endif
3416 #endif
3419 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3421 gen_intermediate_code_internal(env, tb, 0);
3424 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3426 gen_intermediate_code_internal(env, tb, 1);
3429 void cpu_dump_state (CPUState *env, FILE *f,
3430 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3431 int flags)
3433 int i;
3434 uint32_t srs;
3436 if (!env || !f)
3437 return;
3439 cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3440 "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3441 env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
3442 env->cc_op,
3443 env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
3446 for (i = 0; i < 16; i++) {
3447 cpu_fprintf(f, "%s=%8.8x ",regnames[i], env->regs[i]);
3448 if ((i + 1) % 4 == 0)
3449 cpu_fprintf(f, "\n");
3451 cpu_fprintf(f, "\nspecial regs:\n");
3452 for (i = 0; i < 16; i++) {
3453 cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
3454 if ((i + 1) % 4 == 0)
3455 cpu_fprintf(f, "\n");
3457 srs = env->pregs[PR_SRS];
3458 cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
3459 if (srs < 256) {
3460 for (i = 0; i < 16; i++) {
3461 cpu_fprintf(f, "s%2.2d=%8.8x ",
3462 i, env->sregs[srs][i]);
3463 if ((i + 1) % 4 == 0)
3464 cpu_fprintf(f, "\n");
3467 cpu_fprintf(f, "\n\n");
3471 struct
3473 uint32_t vr;
3474 const char *name;
3475 } cris_cores[] = {
3476 {8, "crisv8"},
3477 {9, "crisv9"},
3478 {10, "crisv10"},
3479 {11, "crisv11"},
3480 {32, "crisv32"},
3483 void cris_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3485 unsigned int i;
3487 (*cpu_fprintf)(f, "Available CPUs:\n");
3488 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3489 (*cpu_fprintf)(f, " %s\n", cris_cores[i].name);
3493 static uint32_t vr_by_name(const char *name)
3495 unsigned int i;
3496 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3497 if (strcmp(name, cris_cores[i].name) == 0) {
3498 return cris_cores[i].vr;
3501 return 32;
3504 CPUCRISState *cpu_cris_init (const char *cpu_model)
3506 CPUCRISState *env;
3507 static int tcg_initialized = 0;
3508 int i;
3510 env = qemu_mallocz(sizeof(CPUCRISState));
3512 env->pregs[PR_VR] = vr_by_name(cpu_model);
3513 cpu_exec_init(env);
3514 cpu_reset(env);
3515 qemu_init_vcpu(env);
3517 if (tcg_initialized)
3518 return env;
3520 tcg_initialized = 1;
3522 #define GEN_HELPER 2
3523 #include "helper.h"
3525 if (env->pregs[PR_VR] < 32) {
3526 cpu_crisv10_init(env);
3527 return env;
3531 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
3532 cc_x = tcg_global_mem_new(TCG_AREG0,
3533 offsetof(CPUState, cc_x), "cc_x");
3534 cc_src = tcg_global_mem_new(TCG_AREG0,
3535 offsetof(CPUState, cc_src), "cc_src");
3536 cc_dest = tcg_global_mem_new(TCG_AREG0,
3537 offsetof(CPUState, cc_dest),
3538 "cc_dest");
3539 cc_result = tcg_global_mem_new(TCG_AREG0,
3540 offsetof(CPUState, cc_result),
3541 "cc_result");
3542 cc_op = tcg_global_mem_new(TCG_AREG0,
3543 offsetof(CPUState, cc_op), "cc_op");
3544 cc_size = tcg_global_mem_new(TCG_AREG0,
3545 offsetof(CPUState, cc_size),
3546 "cc_size");
3547 cc_mask = tcg_global_mem_new(TCG_AREG0,
3548 offsetof(CPUState, cc_mask),
3549 "cc_mask");
3551 env_pc = tcg_global_mem_new(TCG_AREG0,
3552 offsetof(CPUState, pc),
3553 "pc");
3554 env_btarget = tcg_global_mem_new(TCG_AREG0,
3555 offsetof(CPUState, btarget),
3556 "btarget");
3557 env_btaken = tcg_global_mem_new(TCG_AREG0,
3558 offsetof(CPUState, btaken),
3559 "btaken");
3560 for (i = 0; i < 16; i++) {
3561 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
3562 offsetof(CPUState, regs[i]),
3563 regnames[i]);
3565 for (i = 0; i < 16; i++) {
3566 cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
3567 offsetof(CPUState, pregs[i]),
3568 pregnames[i]);
3571 return env;
3574 void cpu_reset (CPUCRISState *env)
3576 uint32_t vr;
3578 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
3579 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
3580 log_cpu_state(env, 0);
3583 vr = env->pregs[PR_VR];
3584 memset(env, 0, offsetof(CPUCRISState, breakpoints));
3585 env->pregs[PR_VR] = vr;
3586 tlb_flush(env, 1);
3588 #if defined(CONFIG_USER_ONLY)
3589 /* start in user mode with interrupts enabled. */
3590 env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
3591 #else
3592 cris_mmu_init(env);
3593 env->pregs[PR_CCS] = 0;
3594 #endif
3597 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
3598 unsigned long searched_pc, int pc_pos, void *puc)
3600 env->pc = gen_opc_pc[pc_pos];