fix event fallout in sh_serial.c
[qemu/aliguori.git] / target-cris / translate.c
blobdd85859c0bf929b57f1a67a409f75870a54a2aff
1 /*
2 * CRIS emulation for qemu: main translation routines.
4 * Copyright (c) 2008 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 * FIXME:
23 * The condition code translation is in need of attention.
26 #include <stdarg.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <inttypes.h>
32 #include "cpu.h"
33 #include "disas.h"
34 #include "tcg-op.h"
35 #include "helper.h"
36 #include "mmu.h"
37 #include "crisv32-decode.h"
38 #include "qemu-common.h"
40 #define GEN_HELPER 1
41 #include "helper.h"
43 #define DISAS_CRIS 0
44 #if DISAS_CRIS
45 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
46 #else
47 # define LOG_DIS(...) do { } while (0)
48 #endif
50 #define D(x)
51 #define BUG() (gen_BUG(dc, __FILE__, __LINE__))
52 #define BUG_ON(x) ({if (x) BUG();})
54 #define DISAS_SWI 5
56 /* Used by the decoder. */
57 #define EXTRACT_FIELD(src, start, end) \
58 (((src) >> start) & ((1 << (end - start + 1)) - 1))
60 #define CC_MASK_NZ 0xc
61 #define CC_MASK_NZV 0xe
62 #define CC_MASK_NZVC 0xf
63 #define CC_MASK_RNZV 0x10e
65 static TCGv_ptr cpu_env;
66 static TCGv cpu_R[16];
67 static TCGv cpu_PR[16];
68 static TCGv cc_x;
69 static TCGv cc_src;
70 static TCGv cc_dest;
71 static TCGv cc_result;
72 static TCGv cc_op;
73 static TCGv cc_size;
74 static TCGv cc_mask;
76 static TCGv env_btaken;
77 static TCGv env_btarget;
78 static TCGv env_pc;
80 #include "gen-icount.h"
82 /* This is the state at translation time. */
83 typedef struct DisasContext {
84 CPUState *env;
85 target_ulong pc, ppc;
87 /* Decoder. */
88 unsigned int (*decoder)(struct DisasContext *dc);
89 uint32_t ir;
90 uint32_t opcode;
91 unsigned int op1;
92 unsigned int op2;
93 unsigned int zsize, zzsize;
94 unsigned int mode;
95 unsigned int postinc;
97 unsigned int size;
98 unsigned int src;
99 unsigned int dst;
100 unsigned int cond;
102 int update_cc;
103 int cc_op;
104 int cc_size;
105 uint32_t cc_mask;
107 int cc_size_uptodate; /* -1 invalid or last written value. */
109 int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not uptodate. */
110 int flags_uptodate; /* Wether or not $ccs is uptodate. */
111 int flagx_known; /* Wether or not flags_x has the x flag known at
112 translation time. */
113 int flags_x;
115 int clear_x; /* Clear x after this insn? */
116 int clear_prefix; /* Clear prefix after this insn? */
117 int clear_locked_irq; /* Clear the irq lockout. */
118 int cpustate_changed;
119 unsigned int tb_flags; /* tb dependent flags. */
120 int is_jmp;
122 #define JMP_NOJMP 0
123 #define JMP_DIRECT 1
124 #define JMP_DIRECT_CC 2
125 #define JMP_INDIRECT 3
126 int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
127 uint32_t jmp_pc;
129 int delayed_branch;
131 struct TranslationBlock *tb;
132 int singlestep_enabled;
133 } DisasContext;
135 static void gen_BUG(DisasContext *dc, const char *file, int line)
137 printf ("BUG: pc=%x %s %d\n", dc->pc, file, line);
138 qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
139 cpu_abort(dc->env, "%s:%d\n", file, line);
142 static const char *regnames[] =
144 "$r0", "$r1", "$r2", "$r3",
145 "$r4", "$r5", "$r6", "$r7",
146 "$r8", "$r9", "$r10", "$r11",
147 "$r12", "$r13", "$sp", "$acr",
149 static const char *pregnames[] =
151 "$bz", "$vr", "$pid", "$srs",
152 "$wz", "$exs", "$eda", "$mof",
153 "$dz", "$ebp", "$erp", "$srp",
154 "$nrp", "$ccs", "$usp", "$spc",
157 /* We need this table to handle preg-moves with implicit width. */
158 static int preg_sizes[] = {
159 1, /* bz. */
160 1, /* vr. */
161 4, /* pid. */
162 1, /* srs. */
163 2, /* wz. */
164 4, 4, 4,
165 4, 4, 4, 4,
166 4, 4, 4, 4,
169 #define t_gen_mov_TN_env(tn, member) \
170 _t_gen_mov_TN_env((tn), offsetof(CPUState, member))
171 #define t_gen_mov_env_TN(member, tn) \
172 _t_gen_mov_env_TN(offsetof(CPUState, member), (tn))
174 static inline void t_gen_mov_TN_reg(TCGv tn, int r)
176 if (r < 0 || r > 15)
177 fprintf(stderr, "wrong register read $r%d\n", r);
178 tcg_gen_mov_tl(tn, cpu_R[r]);
180 static inline void t_gen_mov_reg_TN(int r, TCGv tn)
182 if (r < 0 || r > 15)
183 fprintf(stderr, "wrong register write $r%d\n", r);
184 tcg_gen_mov_tl(cpu_R[r], tn);
187 static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
189 if (offset > sizeof (CPUState))
190 fprintf(stderr, "wrong load from env from off=%d\n", offset);
191 tcg_gen_ld_tl(tn, cpu_env, offset);
193 static inline void _t_gen_mov_env_TN(int offset, TCGv tn)
195 if (offset > sizeof (CPUState))
196 fprintf(stderr, "wrong store to env at off=%d\n", offset);
197 tcg_gen_st_tl(tn, cpu_env, offset);
200 static inline void t_gen_mov_TN_preg(TCGv tn, int r)
202 if (r < 0 || r > 15)
203 fprintf(stderr, "wrong register read $p%d\n", r);
204 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
205 tcg_gen_mov_tl(tn, tcg_const_tl(0));
206 else if (r == PR_VR)
207 tcg_gen_mov_tl(tn, tcg_const_tl(32));
208 else
209 tcg_gen_mov_tl(tn, cpu_PR[r]);
211 static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
213 if (r < 0 || r > 15)
214 fprintf(stderr, "wrong register write $p%d\n", r);
215 if (r == PR_BZ || r == PR_WZ || r == PR_DZ)
216 return;
217 else if (r == PR_SRS)
218 tcg_gen_andi_tl(cpu_PR[r], tn, 3);
219 else {
220 if (r == PR_PID)
221 gen_helper_tlb_flush_pid(tn);
222 if (dc->tb_flags & S_FLAG && r == PR_SPC)
223 gen_helper_spc_write(tn);
224 else if (r == PR_CCS)
225 dc->cpustate_changed = 1;
226 tcg_gen_mov_tl(cpu_PR[r], tn);
230 /* Sign extend at translation time. */
231 static int sign_extend(unsigned int val, unsigned int width)
233 int sval;
235 /* LSL. */
236 val <<= 31 - width;
237 sval = val;
238 /* ASR. */
239 sval >>= 31 - width;
240 return sval;
243 static int cris_fetch(DisasContext *dc, uint32_t addr,
244 unsigned int size, unsigned int sign)
246 int r;
248 switch (size) {
249 case 4:
251 r = ldl_code(addr);
252 break;
254 case 2:
256 if (sign) {
257 r = ldsw_code(addr);
258 } else {
259 r = lduw_code(addr);
261 break;
263 case 1:
265 if (sign) {
266 r = ldsb_code(addr);
267 } else {
268 r = ldub_code(addr);
270 break;
272 default:
273 cpu_abort(dc->env, "Invalid fetch size %d\n", size);
274 break;
276 return r;
279 static void cris_lock_irq(DisasContext *dc)
281 dc->clear_locked_irq = 0;
282 t_gen_mov_env_TN(locked_irq, tcg_const_tl(1));
285 static inline void t_gen_raise_exception(uint32_t index)
287 TCGv_i32 tmp = tcg_const_i32(index);
288 gen_helper_raise_exception(tmp);
289 tcg_temp_free_i32(tmp);
292 static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
294 TCGv t0, t_31;
296 t0 = tcg_temp_new();
297 t_31 = tcg_const_tl(31);
298 tcg_gen_shl_tl(d, a, b);
300 tcg_gen_sub_tl(t0, t_31, b);
301 tcg_gen_sar_tl(t0, t0, t_31);
302 tcg_gen_and_tl(t0, t0, d);
303 tcg_gen_xor_tl(d, d, t0);
304 tcg_temp_free(t0);
305 tcg_temp_free(t_31);
308 static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
310 TCGv t0, t_31;
312 t0 = tcg_temp_new();
313 t_31 = tcg_temp_new();
314 tcg_gen_shr_tl(d, a, b);
316 tcg_gen_movi_tl(t_31, 31);
317 tcg_gen_sub_tl(t0, t_31, b);
318 tcg_gen_sar_tl(t0, t0, t_31);
319 tcg_gen_and_tl(t0, t0, d);
320 tcg_gen_xor_tl(d, d, t0);
321 tcg_temp_free(t0);
322 tcg_temp_free(t_31);
325 static void t_gen_asr(TCGv d, TCGv a, TCGv b)
327 TCGv t0, t_31;
329 t0 = tcg_temp_new();
330 t_31 = tcg_temp_new();
331 tcg_gen_sar_tl(d, a, b);
333 tcg_gen_movi_tl(t_31, 31);
334 tcg_gen_sub_tl(t0, t_31, b);
335 tcg_gen_sar_tl(t0, t0, t_31);
336 tcg_gen_or_tl(d, d, t0);
337 tcg_temp_free(t0);
338 tcg_temp_free(t_31);
341 /* 64-bit signed mul, lower result in d and upper in d2. */
342 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
344 TCGv_i64 t0, t1;
346 t0 = tcg_temp_new_i64();
347 t1 = tcg_temp_new_i64();
349 tcg_gen_ext_i32_i64(t0, a);
350 tcg_gen_ext_i32_i64(t1, b);
351 tcg_gen_mul_i64(t0, t0, t1);
353 tcg_gen_trunc_i64_i32(d, t0);
354 tcg_gen_shri_i64(t0, t0, 32);
355 tcg_gen_trunc_i64_i32(d2, t0);
357 tcg_temp_free_i64(t0);
358 tcg_temp_free_i64(t1);
361 /* 64-bit unsigned muls, lower result in d and upper in d2. */
362 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
364 TCGv_i64 t0, t1;
366 t0 = tcg_temp_new_i64();
367 t1 = tcg_temp_new_i64();
369 tcg_gen_extu_i32_i64(t0, a);
370 tcg_gen_extu_i32_i64(t1, b);
371 tcg_gen_mul_i64(t0, t0, t1);
373 tcg_gen_trunc_i64_i32(d, t0);
374 tcg_gen_shri_i64(t0, t0, 32);
375 tcg_gen_trunc_i64_i32(d2, t0);
377 tcg_temp_free_i64(t0);
378 tcg_temp_free_i64(t1);
381 static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
383 int l1;
385 l1 = gen_new_label();
388 * d <<= 1
389 * if (d >= s)
390 * d -= s;
392 tcg_gen_shli_tl(d, a, 1);
393 tcg_gen_brcond_tl(TCG_COND_LTU, d, b, l1);
394 tcg_gen_sub_tl(d, d, b);
395 gen_set_label(l1);
398 static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
400 TCGv t;
403 * d <<= 1
404 * if (n)
405 * d += s;
407 t = tcg_temp_new();
408 tcg_gen_shli_tl(d, a, 1);
409 tcg_gen_shli_tl(t, ccs, 31 - 3);
410 tcg_gen_sari_tl(t, t, 31);
411 tcg_gen_and_tl(t, t, b);
412 tcg_gen_add_tl(d, d, t);
413 tcg_temp_free(t);
416 /* Extended arithmetics on CRIS. */
417 static inline void t_gen_add_flag(TCGv d, int flag)
419 TCGv c;
421 c = tcg_temp_new();
422 t_gen_mov_TN_preg(c, PR_CCS);
423 /* Propagate carry into d. */
424 tcg_gen_andi_tl(c, c, 1 << flag);
425 if (flag)
426 tcg_gen_shri_tl(c, c, flag);
427 tcg_gen_add_tl(d, d, c);
428 tcg_temp_free(c);
431 static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
433 if (dc->flagx_known) {
434 if (dc->flags_x) {
435 TCGv c;
437 c = tcg_temp_new();
438 t_gen_mov_TN_preg(c, PR_CCS);
439 /* C flag is already at bit 0. */
440 tcg_gen_andi_tl(c, c, C_FLAG);
441 tcg_gen_add_tl(d, d, c);
442 tcg_temp_free(c);
444 } else {
445 TCGv x, c;
447 x = tcg_temp_new();
448 c = tcg_temp_new();
449 t_gen_mov_TN_preg(x, PR_CCS);
450 tcg_gen_mov_tl(c, x);
452 /* Propagate carry into d if X is set. Branch free. */
453 tcg_gen_andi_tl(c, c, C_FLAG);
454 tcg_gen_andi_tl(x, x, X_FLAG);
455 tcg_gen_shri_tl(x, x, 4);
457 tcg_gen_and_tl(x, x, c);
458 tcg_gen_add_tl(d, d, x);
459 tcg_temp_free(x);
460 tcg_temp_free(c);
464 static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
466 if (dc->flagx_known) {
467 if (dc->flags_x) {
468 TCGv c;
470 c = tcg_temp_new();
471 t_gen_mov_TN_preg(c, PR_CCS);
472 /* C flag is already at bit 0. */
473 tcg_gen_andi_tl(c, c, C_FLAG);
474 tcg_gen_sub_tl(d, d, c);
475 tcg_temp_free(c);
477 } else {
478 TCGv x, c;
480 x = tcg_temp_new();
481 c = tcg_temp_new();
482 t_gen_mov_TN_preg(x, PR_CCS);
483 tcg_gen_mov_tl(c, x);
485 /* Propagate carry into d if X is set. Branch free. */
486 tcg_gen_andi_tl(c, c, C_FLAG);
487 tcg_gen_andi_tl(x, x, X_FLAG);
488 tcg_gen_shri_tl(x, x, 4);
490 tcg_gen_and_tl(x, x, c);
491 tcg_gen_sub_tl(d, d, x);
492 tcg_temp_free(x);
493 tcg_temp_free(c);
497 /* Swap the two bytes within each half word of the s operand.
498 T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
499 static inline void t_gen_swapb(TCGv d, TCGv s)
501 TCGv t, org_s;
503 t = tcg_temp_new();
504 org_s = tcg_temp_new();
506 /* d and s may refer to the same object. */
507 tcg_gen_mov_tl(org_s, s);
508 tcg_gen_shli_tl(t, org_s, 8);
509 tcg_gen_andi_tl(d, t, 0xff00ff00);
510 tcg_gen_shri_tl(t, org_s, 8);
511 tcg_gen_andi_tl(t, t, 0x00ff00ff);
512 tcg_gen_or_tl(d, d, t);
513 tcg_temp_free(t);
514 tcg_temp_free(org_s);
517 /* Swap the halfwords of the s operand. */
518 static inline void t_gen_swapw(TCGv d, TCGv s)
520 TCGv t;
521 /* d and s refer the same object. */
522 t = tcg_temp_new();
523 tcg_gen_mov_tl(t, s);
524 tcg_gen_shli_tl(d, t, 16);
525 tcg_gen_shri_tl(t, t, 16);
526 tcg_gen_or_tl(d, d, t);
527 tcg_temp_free(t);
530 /* Reverse the within each byte.
531 T0 = (((T0 << 7) & 0x80808080) |
532 ((T0 << 5) & 0x40404040) |
533 ((T0 << 3) & 0x20202020) |
534 ((T0 << 1) & 0x10101010) |
535 ((T0 >> 1) & 0x08080808) |
536 ((T0 >> 3) & 0x04040404) |
537 ((T0 >> 5) & 0x02020202) |
538 ((T0 >> 7) & 0x01010101));
540 static inline void t_gen_swapr(TCGv d, TCGv s)
542 struct {
543 int shift; /* LSL when positive, LSR when negative. */
544 uint32_t mask;
545 } bitrev [] = {
546 {7, 0x80808080},
547 {5, 0x40404040},
548 {3, 0x20202020},
549 {1, 0x10101010},
550 {-1, 0x08080808},
551 {-3, 0x04040404},
552 {-5, 0x02020202},
553 {-7, 0x01010101}
555 int i;
556 TCGv t, org_s;
558 /* d and s refer the same object. */
559 t = tcg_temp_new();
560 org_s = tcg_temp_new();
561 tcg_gen_mov_tl(org_s, s);
563 tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
564 tcg_gen_andi_tl(d, t, bitrev[0].mask);
565 for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
566 if (bitrev[i].shift >= 0) {
567 tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
568 } else {
569 tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
571 tcg_gen_andi_tl(t, t, bitrev[i].mask);
572 tcg_gen_or_tl(d, d, t);
574 tcg_temp_free(t);
575 tcg_temp_free(org_s);
578 static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
580 int l1;
582 l1 = gen_new_label();
584 /* Conditional jmp. */
585 tcg_gen_mov_tl(env_pc, pc_false);
586 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
587 tcg_gen_mov_tl(env_pc, pc_true);
588 gen_set_label(l1);
591 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
593 TranslationBlock *tb;
594 tb = dc->tb;
595 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
596 tcg_gen_goto_tb(n);
597 tcg_gen_movi_tl(env_pc, dest);
598 tcg_gen_exit_tb((tcg_target_long)tb + n);
599 } else {
600 tcg_gen_movi_tl(env_pc, dest);
601 tcg_gen_exit_tb(0);
605 static inline void cris_clear_x_flag(DisasContext *dc)
607 if (dc->flagx_known && dc->flags_x)
608 dc->flags_uptodate = 0;
610 dc->flagx_known = 1;
611 dc->flags_x = 0;
614 static void cris_flush_cc_state(DisasContext *dc)
616 if (dc->cc_size_uptodate != dc->cc_size) {
617 tcg_gen_movi_tl(cc_size, dc->cc_size);
618 dc->cc_size_uptodate = dc->cc_size;
620 tcg_gen_movi_tl(cc_op, dc->cc_op);
621 tcg_gen_movi_tl(cc_mask, dc->cc_mask);
624 static void cris_evaluate_flags(DisasContext *dc)
626 if (dc->flags_uptodate)
627 return;
629 cris_flush_cc_state(dc);
631 switch (dc->cc_op)
633 case CC_OP_MCP:
634 gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS],
635 cpu_PR[PR_CCS], cc_src,
636 cc_dest, cc_result);
637 break;
638 case CC_OP_MULS:
639 gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS],
640 cpu_PR[PR_CCS], cc_result,
641 cpu_PR[PR_MOF]);
642 break;
643 case CC_OP_MULU:
644 gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS],
645 cpu_PR[PR_CCS], cc_result,
646 cpu_PR[PR_MOF]);
647 break;
648 case CC_OP_MOVE:
649 case CC_OP_AND:
650 case CC_OP_OR:
651 case CC_OP_XOR:
652 case CC_OP_ASR:
653 case CC_OP_LSR:
654 case CC_OP_LSL:
655 switch (dc->cc_size)
657 case 4:
658 gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
659 cpu_PR[PR_CCS], cc_result);
660 break;
661 case 2:
662 gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
663 cpu_PR[PR_CCS], cc_result);
664 break;
665 default:
666 gen_helper_evaluate_flags();
667 break;
669 break;
670 case CC_OP_FLAGS:
671 /* live. */
672 break;
673 case CC_OP_SUB:
674 case CC_OP_CMP:
675 if (dc->cc_size == 4)
676 gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS],
677 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
678 else
679 gen_helper_evaluate_flags();
681 break;
682 default:
683 switch (dc->cc_size)
685 case 4:
686 gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS],
687 cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
688 break;
689 default:
690 gen_helper_evaluate_flags();
691 break;
693 break;
696 if (dc->flagx_known) {
697 if (dc->flags_x)
698 tcg_gen_ori_tl(cpu_PR[PR_CCS],
699 cpu_PR[PR_CCS], X_FLAG);
700 else if (dc->cc_op == CC_OP_FLAGS)
701 tcg_gen_andi_tl(cpu_PR[PR_CCS],
702 cpu_PR[PR_CCS], ~X_FLAG);
704 dc->flags_uptodate = 1;
707 static void cris_cc_mask(DisasContext *dc, unsigned int mask)
709 uint32_t ovl;
711 if (!mask) {
712 dc->update_cc = 0;
713 return;
716 /* Check if we need to evaluate the condition codes due to
717 CC overlaying. */
718 ovl = (dc->cc_mask ^ mask) & ~mask;
719 if (ovl) {
720 /* TODO: optimize this case. It trigs all the time. */
721 cris_evaluate_flags (dc);
723 dc->cc_mask = mask;
724 dc->update_cc = 1;
727 static void cris_update_cc_op(DisasContext *dc, int op, int size)
729 dc->cc_op = op;
730 dc->cc_size = size;
731 dc->flags_uptodate = 0;
734 static inline void cris_update_cc_x(DisasContext *dc)
736 /* Save the x flag state at the time of the cc snapshot. */
737 if (dc->flagx_known) {
738 if (dc->cc_x_uptodate == (2 | dc->flags_x))
739 return;
740 tcg_gen_movi_tl(cc_x, dc->flags_x);
741 dc->cc_x_uptodate = 2 | dc->flags_x;
743 else {
744 tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
745 dc->cc_x_uptodate = 1;
749 /* Update cc prior to executing ALU op. Needs source operands untouched. */
750 static void cris_pre_alu_update_cc(DisasContext *dc, int op,
751 TCGv dst, TCGv src, int size)
753 if (dc->update_cc) {
754 cris_update_cc_op(dc, op, size);
755 tcg_gen_mov_tl(cc_src, src);
757 if (op != CC_OP_MOVE
758 && op != CC_OP_AND
759 && op != CC_OP_OR
760 && op != CC_OP_XOR
761 && op != CC_OP_ASR
762 && op != CC_OP_LSR
763 && op != CC_OP_LSL)
764 tcg_gen_mov_tl(cc_dest, dst);
766 cris_update_cc_x(dc);
770 /* Update cc after executing ALU op. needs the result. */
771 static inline void cris_update_result(DisasContext *dc, TCGv res)
773 if (dc->update_cc)
774 tcg_gen_mov_tl(cc_result, res);
777 /* Returns one if the write back stage should execute. */
778 static void cris_alu_op_exec(DisasContext *dc, int op,
779 TCGv dst, TCGv a, TCGv b, int size)
781 /* Emit the ALU insns. */
782 switch (op)
784 case CC_OP_ADD:
785 tcg_gen_add_tl(dst, a, b);
786 /* Extended arithmetics. */
787 t_gen_addx_carry(dc, dst);
788 break;
789 case CC_OP_ADDC:
790 tcg_gen_add_tl(dst, a, b);
791 t_gen_add_flag(dst, 0); /* C_FLAG. */
792 break;
793 case CC_OP_MCP:
794 tcg_gen_add_tl(dst, a, b);
795 t_gen_add_flag(dst, 8); /* R_FLAG. */
796 break;
797 case CC_OP_SUB:
798 tcg_gen_sub_tl(dst, a, b);
799 /* Extended arithmetics. */
800 t_gen_subx_carry(dc, dst);
801 break;
802 case CC_OP_MOVE:
803 tcg_gen_mov_tl(dst, b);
804 break;
805 case CC_OP_OR:
806 tcg_gen_or_tl(dst, a, b);
807 break;
808 case CC_OP_AND:
809 tcg_gen_and_tl(dst, a, b);
810 break;
811 case CC_OP_XOR:
812 tcg_gen_xor_tl(dst, a, b);
813 break;
814 case CC_OP_LSL:
815 t_gen_lsl(dst, a, b);
816 break;
817 case CC_OP_LSR:
818 t_gen_lsr(dst, a, b);
819 break;
820 case CC_OP_ASR:
821 t_gen_asr(dst, a, b);
822 break;
823 case CC_OP_NEG:
824 tcg_gen_neg_tl(dst, b);
825 /* Extended arithmetics. */
826 t_gen_subx_carry(dc, dst);
827 break;
828 case CC_OP_LZ:
829 gen_helper_lz(dst, b);
830 break;
831 case CC_OP_MULS:
832 t_gen_muls(dst, cpu_PR[PR_MOF], a, b);
833 break;
834 case CC_OP_MULU:
835 t_gen_mulu(dst, cpu_PR[PR_MOF], a, b);
836 break;
837 case CC_OP_DSTEP:
838 t_gen_cris_dstep(dst, a, b);
839 break;
840 case CC_OP_MSTEP:
841 t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
842 break;
843 case CC_OP_BOUND:
845 int l1;
846 l1 = gen_new_label();
847 tcg_gen_mov_tl(dst, a);
848 tcg_gen_brcond_tl(TCG_COND_LEU, a, b, l1);
849 tcg_gen_mov_tl(dst, b);
850 gen_set_label(l1);
852 break;
853 case CC_OP_CMP:
854 tcg_gen_sub_tl(dst, a, b);
855 /* Extended arithmetics. */
856 t_gen_subx_carry(dc, dst);
857 break;
858 default:
859 qemu_log("illegal ALU op.\n");
860 BUG();
861 break;
864 if (size == 1)
865 tcg_gen_andi_tl(dst, dst, 0xff);
866 else if (size == 2)
867 tcg_gen_andi_tl(dst, dst, 0xffff);
870 static void cris_alu(DisasContext *dc, int op,
871 TCGv d, TCGv op_a, TCGv op_b, int size)
873 TCGv tmp;
874 int writeback;
876 writeback = 1;
878 if (op == CC_OP_CMP) {
879 tmp = tcg_temp_new();
880 writeback = 0;
881 } else if (size == 4) {
882 tmp = d;
883 writeback = 0;
884 } else
885 tmp = tcg_temp_new();
888 cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
889 cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
890 cris_update_result(dc, tmp);
892 /* Writeback. */
893 if (writeback) {
894 if (size == 1)
895 tcg_gen_andi_tl(d, d, ~0xff);
896 else
897 tcg_gen_andi_tl(d, d, ~0xffff);
898 tcg_gen_or_tl(d, d, tmp);
900 if (!TCGV_EQUAL(tmp, d))
901 tcg_temp_free(tmp);
904 static int arith_cc(DisasContext *dc)
906 if (dc->update_cc) {
907 switch (dc->cc_op) {
908 case CC_OP_ADDC: return 1;
909 case CC_OP_ADD: return 1;
910 case CC_OP_SUB: return 1;
911 case CC_OP_DSTEP: return 1;
912 case CC_OP_LSL: return 1;
913 case CC_OP_LSR: return 1;
914 case CC_OP_ASR: return 1;
915 case CC_OP_CMP: return 1;
916 case CC_OP_NEG: return 1;
917 case CC_OP_OR: return 1;
918 case CC_OP_AND: return 1;
919 case CC_OP_XOR: return 1;
920 case CC_OP_MULU: return 1;
921 case CC_OP_MULS: return 1;
922 default:
923 return 0;
926 return 0;
929 static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
931 int arith_opt, move_opt;
933 /* TODO: optimize more condition codes. */
936 * If the flags are live, we've gotta look into the bits of CCS.
937 * Otherwise, if we just did an arithmetic operation we try to
938 * evaluate the condition code faster.
940 * When this function is done, T0 should be non-zero if the condition
941 * code is true.
943 arith_opt = arith_cc(dc) && !dc->flags_uptodate;
944 move_opt = (dc->cc_op == CC_OP_MOVE);
945 switch (cond) {
946 case CC_EQ:
947 if ((arith_opt || move_opt)
948 && dc->cc_x_uptodate != (2 | X_FLAG)) {
949 tcg_gen_setcond_tl(TCG_COND_EQ, cc,
950 cc_result, tcg_const_tl(0));
952 else {
953 cris_evaluate_flags(dc);
954 tcg_gen_andi_tl(cc,
955 cpu_PR[PR_CCS], Z_FLAG);
957 break;
958 case CC_NE:
959 if ((arith_opt || move_opt)
960 && dc->cc_x_uptodate != (2 | X_FLAG)) {
961 tcg_gen_mov_tl(cc, cc_result);
962 } else {
963 cris_evaluate_flags(dc);
964 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
965 Z_FLAG);
966 tcg_gen_andi_tl(cc, cc, Z_FLAG);
968 break;
969 case CC_CS:
970 cris_evaluate_flags(dc);
971 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
972 break;
973 case CC_CC:
974 cris_evaluate_flags(dc);
975 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
976 tcg_gen_andi_tl(cc, cc, C_FLAG);
977 break;
978 case CC_VS:
979 cris_evaluate_flags(dc);
980 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
981 break;
982 case CC_VC:
983 cris_evaluate_flags(dc);
984 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
985 V_FLAG);
986 tcg_gen_andi_tl(cc, cc, V_FLAG);
987 break;
988 case CC_PL:
989 if (arith_opt || move_opt) {
990 int bits = 31;
992 if (dc->cc_size == 1)
993 bits = 7;
994 else if (dc->cc_size == 2)
995 bits = 15;
997 tcg_gen_shri_tl(cc, cc_result, bits);
998 tcg_gen_xori_tl(cc, cc, 1);
999 } else {
1000 cris_evaluate_flags(dc);
1001 tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
1002 N_FLAG);
1003 tcg_gen_andi_tl(cc, cc, N_FLAG);
1005 break;
1006 case CC_MI:
1007 if (arith_opt || move_opt) {
1008 int bits = 31;
1010 if (dc->cc_size == 1)
1011 bits = 7;
1012 else if (dc->cc_size == 2)
1013 bits = 15;
1015 tcg_gen_shri_tl(cc, cc_result, bits);
1016 tcg_gen_andi_tl(cc, cc, 1);
1018 else {
1019 cris_evaluate_flags(dc);
1020 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1021 N_FLAG);
1023 break;
1024 case CC_LS:
1025 cris_evaluate_flags(dc);
1026 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
1027 C_FLAG | Z_FLAG);
1028 break;
1029 case CC_HI:
1030 cris_evaluate_flags(dc);
1032 TCGv tmp;
1034 tmp = tcg_temp_new();
1035 tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
1036 C_FLAG | Z_FLAG);
1037 /* Overlay the C flag on top of the Z. */
1038 tcg_gen_shli_tl(cc, tmp, 2);
1039 tcg_gen_and_tl(cc, tmp, cc);
1040 tcg_gen_andi_tl(cc, cc, Z_FLAG);
1042 tcg_temp_free(tmp);
1044 break;
1045 case CC_GE:
1046 cris_evaluate_flags(dc);
1047 /* Overlay the V flag on top of the N. */
1048 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1049 tcg_gen_xor_tl(cc,
1050 cpu_PR[PR_CCS], cc);
1051 tcg_gen_andi_tl(cc, cc, N_FLAG);
1052 tcg_gen_xori_tl(cc, cc, N_FLAG);
1053 break;
1054 case CC_LT:
1055 cris_evaluate_flags(dc);
1056 /* Overlay the V flag on top of the N. */
1057 tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
1058 tcg_gen_xor_tl(cc,
1059 cpu_PR[PR_CCS], cc);
1060 tcg_gen_andi_tl(cc, cc, N_FLAG);
1061 break;
1062 case CC_GT:
1063 cris_evaluate_flags(dc);
1065 TCGv n, z;
1067 n = tcg_temp_new();
1068 z = tcg_temp_new();
1070 /* To avoid a shift we overlay everything on
1071 the V flag. */
1072 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1073 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1074 /* invert Z. */
1075 tcg_gen_xori_tl(z, z, 2);
1077 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1078 tcg_gen_xori_tl(n, n, 2);
1079 tcg_gen_and_tl(cc, z, n);
1080 tcg_gen_andi_tl(cc, cc, 2);
1082 tcg_temp_free(n);
1083 tcg_temp_free(z);
1085 break;
1086 case CC_LE:
1087 cris_evaluate_flags(dc);
1089 TCGv n, z;
1091 n = tcg_temp_new();
1092 z = tcg_temp_new();
1094 /* To avoid a shift we overlay everything on
1095 the V flag. */
1096 tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
1097 tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
1099 tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
1100 tcg_gen_or_tl(cc, z, n);
1101 tcg_gen_andi_tl(cc, cc, 2);
1103 tcg_temp_free(n);
1104 tcg_temp_free(z);
1106 break;
1107 case CC_P:
1108 cris_evaluate_flags(dc);
1109 tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
1110 break;
1111 case CC_A:
1112 tcg_gen_movi_tl(cc, 1);
1113 break;
1114 default:
1115 BUG();
1116 break;
1120 static void cris_store_direct_jmp(DisasContext *dc)
1122 /* Store the direct jmp state into the cpu-state. */
1123 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1124 if (dc->jmp == JMP_DIRECT) {
1125 tcg_gen_movi_tl(env_btaken, 1);
1127 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1128 dc->jmp = JMP_INDIRECT;
1132 static void cris_prepare_cc_branch (DisasContext *dc,
1133 int offset, int cond)
1135 /* This helps us re-schedule the micro-code to insns in delay-slots
1136 before the actual jump. */
1137 dc->delayed_branch = 2;
1138 dc->jmp = JMP_DIRECT_CC;
1139 dc->jmp_pc = dc->pc + offset;
1141 gen_tst_cc (dc, env_btaken, cond);
1142 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
1146 /* jumps, when the dest is in a live reg for example. Direct should be set
1147 when the dest addr is constant to allow tb chaining. */
1148 static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
1150 /* This helps us re-schedule the micro-code to insns in delay-slots
1151 before the actual jump. */
1152 dc->delayed_branch = 2;
1153 dc->jmp = type;
1154 if (type == JMP_INDIRECT) {
1155 tcg_gen_movi_tl(env_btaken, 1);
1159 static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
1161 int mem_index = cpu_mmu_index(dc->env);
1163 /* If we get a fault on a delayslot we must keep the jmp state in
1164 the cpu-state to be able to re-execute the jmp. */
1165 if (dc->delayed_branch == 1)
1166 cris_store_direct_jmp(dc);
1168 tcg_gen_qemu_ld64(dst, addr, mem_index);
1171 static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
1172 unsigned int size, int sign)
1174 int mem_index = cpu_mmu_index(dc->env);
1176 /* If we get a fault on a delayslot we must keep the jmp state in
1177 the cpu-state to be able to re-execute the jmp. */
1178 if (dc->delayed_branch == 1)
1179 cris_store_direct_jmp(dc);
1181 if (size == 1) {
1182 if (sign)
1183 tcg_gen_qemu_ld8s(dst, addr, mem_index);
1184 else
1185 tcg_gen_qemu_ld8u(dst, addr, mem_index);
1187 else if (size == 2) {
1188 if (sign)
1189 tcg_gen_qemu_ld16s(dst, addr, mem_index);
1190 else
1191 tcg_gen_qemu_ld16u(dst, addr, mem_index);
1193 else if (size == 4) {
1194 tcg_gen_qemu_ld32u(dst, addr, mem_index);
1196 else {
1197 abort();
1201 static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
1202 unsigned int size)
1204 int mem_index = cpu_mmu_index(dc->env);
1206 /* If we get a fault on a delayslot we must keep the jmp state in
1207 the cpu-state to be able to re-execute the jmp. */
1208 if (dc->delayed_branch == 1)
1209 cris_store_direct_jmp(dc);
1212 /* Conditional writes. We only support the kind were X and P are known
1213 at translation time. */
1214 if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
1215 dc->postinc = 0;
1216 cris_evaluate_flags(dc);
1217 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
1218 return;
1221 if (size == 1)
1222 tcg_gen_qemu_st8(val, addr, mem_index);
1223 else if (size == 2)
1224 tcg_gen_qemu_st16(val, addr, mem_index);
1225 else
1226 tcg_gen_qemu_st32(val, addr, mem_index);
1228 if (dc->flagx_known && dc->flags_x) {
1229 cris_evaluate_flags(dc);
1230 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
1234 static inline void t_gen_sext(TCGv d, TCGv s, int size)
1236 if (size == 1)
1237 tcg_gen_ext8s_i32(d, s);
1238 else if (size == 2)
1239 tcg_gen_ext16s_i32(d, s);
1240 else if(!TCGV_EQUAL(d, s))
1241 tcg_gen_mov_tl(d, s);
1244 static inline void t_gen_zext(TCGv d, TCGv s, int size)
1246 if (size == 1)
1247 tcg_gen_ext8u_i32(d, s);
1248 else if (size == 2)
1249 tcg_gen_ext16u_i32(d, s);
1250 else if (!TCGV_EQUAL(d, s))
1251 tcg_gen_mov_tl(d, s);
1254 #if DISAS_CRIS
1255 static char memsize_char(int size)
1257 switch (size)
1259 case 1: return 'b'; break;
1260 case 2: return 'w'; break;
1261 case 4: return 'd'; break;
1262 default:
1263 return 'x';
1264 break;
1267 #endif
1269 static inline unsigned int memsize_z(DisasContext *dc)
1271 return dc->zsize + 1;
1274 static inline unsigned int memsize_zz(DisasContext *dc)
1276 switch (dc->zzsize)
1278 case 0: return 1;
1279 case 1: return 2;
1280 default:
1281 return 4;
1285 static inline void do_postinc (DisasContext *dc, int size)
1287 if (dc->postinc)
1288 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
1291 static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
1292 int size, int s_ext, TCGv dst)
1294 if (s_ext)
1295 t_gen_sext(dst, cpu_R[rs], size);
1296 else
1297 t_gen_zext(dst, cpu_R[rs], size);
1300 /* Prepare T0 and T1 for a register alu operation.
1301 s_ext decides if the operand1 should be sign-extended or zero-extended when
1302 needed. */
1303 static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
1304 int size, int s_ext, TCGv dst, TCGv src)
1306 dec_prep_move_r(dc, rs, rd, size, s_ext, src);
1308 if (s_ext)
1309 t_gen_sext(dst, cpu_R[rd], size);
1310 else
1311 t_gen_zext(dst, cpu_R[rd], size);
1314 static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
1315 TCGv dst)
1317 unsigned int rs;
1318 uint32_t imm;
1319 int is_imm;
1320 int insn_len = 2;
1322 rs = dc->op1;
1323 is_imm = rs == 15 && dc->postinc;
1325 /* Load [$rs] onto T1. */
1326 if (is_imm) {
1327 insn_len = 2 + memsize;
1328 if (memsize == 1)
1329 insn_len++;
1331 imm = cris_fetch(dc, dc->pc + 2, memsize, s_ext);
1332 tcg_gen_movi_tl(dst, imm);
1333 dc->postinc = 0;
1334 } else {
1335 cris_flush_cc_state(dc);
1336 gen_load(dc, dst, cpu_R[rs], memsize, 0);
1337 if (s_ext)
1338 t_gen_sext(dst, dst, memsize);
1339 else
1340 t_gen_zext(dst, dst, memsize);
1342 return insn_len;
1345 /* Prepare T0 and T1 for a memory + alu operation.
1346 s_ext decides if the operand1 should be sign-extended or zero-extended when
1347 needed. */
1348 static int dec_prep_alu_m(DisasContext *dc, int s_ext, int memsize,
1349 TCGv dst, TCGv src)
1351 int insn_len;
1353 insn_len = dec_prep_move_m(dc, s_ext, memsize, src);
1354 tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
1355 return insn_len;
1358 #if DISAS_CRIS
1359 static const char *cc_name(int cc)
1361 static const char *cc_names[16] = {
1362 "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
1363 "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
1365 assert(cc < 16);
1366 return cc_names[cc];
1368 #endif
1370 /* Start of insn decoders. */
1372 static int dec_bccq(DisasContext *dc)
1374 int32_t offset;
1375 int sign;
1376 uint32_t cond = dc->op2;
1378 offset = EXTRACT_FIELD (dc->ir, 1, 7);
1379 sign = EXTRACT_FIELD(dc->ir, 0, 0);
1381 offset *= 2;
1382 offset |= sign << 8;
1383 offset = sign_extend(offset, 8);
1385 LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
1387 /* op2 holds the condition-code. */
1388 cris_cc_mask(dc, 0);
1389 cris_prepare_cc_branch (dc, offset, cond);
1390 return 2;
1392 static int dec_addoq(DisasContext *dc)
1394 int32_t imm;
1396 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
1397 imm = sign_extend(dc->op1, 7);
1399 LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
1400 cris_cc_mask(dc, 0);
1401 /* Fetch register operand, */
1402 tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
1404 return 2;
1406 static int dec_addq(DisasContext *dc)
1408 LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
1410 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1412 cris_cc_mask(dc, CC_MASK_NZVC);
1414 cris_alu(dc, CC_OP_ADD,
1415 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1416 return 2;
1418 static int dec_moveq(DisasContext *dc)
1420 uint32_t imm;
1422 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1423 imm = sign_extend(dc->op1, 5);
1424 LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
1426 tcg_gen_movi_tl(cpu_R[dc->op2], imm);
1427 return 2;
1429 static int dec_subq(DisasContext *dc)
1431 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1433 LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
1435 cris_cc_mask(dc, CC_MASK_NZVC);
1436 cris_alu(dc, CC_OP_SUB,
1437 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
1438 return 2;
1440 static int dec_cmpq(DisasContext *dc)
1442 uint32_t imm;
1443 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1444 imm = sign_extend(dc->op1, 5);
1446 LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
1447 cris_cc_mask(dc, CC_MASK_NZVC);
1449 cris_alu(dc, CC_OP_CMP,
1450 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1451 return 2;
1453 static int dec_andq(DisasContext *dc)
1455 uint32_t imm;
1456 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1457 imm = sign_extend(dc->op1, 5);
1459 LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
1460 cris_cc_mask(dc, CC_MASK_NZ);
1462 cris_alu(dc, CC_OP_AND,
1463 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1464 return 2;
1466 static int dec_orq(DisasContext *dc)
1468 uint32_t imm;
1469 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
1470 imm = sign_extend(dc->op1, 5);
1471 LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
1472 cris_cc_mask(dc, CC_MASK_NZ);
1474 cris_alu(dc, CC_OP_OR,
1475 cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
1476 return 2;
1478 static int dec_btstq(DisasContext *dc)
1480 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1481 LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
1483 cris_cc_mask(dc, CC_MASK_NZ);
1484 cris_evaluate_flags(dc);
1485 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1486 tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
1487 cris_alu(dc, CC_OP_MOVE,
1488 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1489 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1490 dc->flags_uptodate = 1;
1491 return 2;
1493 static int dec_asrq(DisasContext *dc)
1495 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1496 LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
1497 cris_cc_mask(dc, CC_MASK_NZ);
1499 tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1500 cris_alu(dc, CC_OP_MOVE,
1501 cpu_R[dc->op2],
1502 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1503 return 2;
1505 static int dec_lslq(DisasContext *dc)
1507 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1508 LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
1510 cris_cc_mask(dc, CC_MASK_NZ);
1512 tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1514 cris_alu(dc, CC_OP_MOVE,
1515 cpu_R[dc->op2],
1516 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1517 return 2;
1519 static int dec_lsrq(DisasContext *dc)
1521 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
1522 LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
1524 cris_cc_mask(dc, CC_MASK_NZ);
1526 tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
1527 cris_alu(dc, CC_OP_MOVE,
1528 cpu_R[dc->op2],
1529 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1530 return 2;
1533 static int dec_move_r(DisasContext *dc)
1535 int size = memsize_zz(dc);
1537 LOG_DIS("move.%c $r%u, $r%u\n",
1538 memsize_char(size), dc->op1, dc->op2);
1540 cris_cc_mask(dc, CC_MASK_NZ);
1541 if (size == 4) {
1542 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
1543 cris_cc_mask(dc, CC_MASK_NZ);
1544 cris_update_cc_op(dc, CC_OP_MOVE, 4);
1545 cris_update_cc_x(dc);
1546 cris_update_result(dc, cpu_R[dc->op2]);
1548 else {
1549 TCGv t0;
1551 t0 = tcg_temp_new();
1552 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1553 cris_alu(dc, CC_OP_MOVE,
1554 cpu_R[dc->op2],
1555 cpu_R[dc->op2], t0, size);
1556 tcg_temp_free(t0);
1558 return 2;
1561 static int dec_scc_r(DisasContext *dc)
1563 int cond = dc->op2;
1565 LOG_DIS("s%s $r%u\n",
1566 cc_name(cond), dc->op1);
1568 if (cond != CC_A)
1570 int l1;
1572 gen_tst_cc (dc, cpu_R[dc->op1], cond);
1573 l1 = gen_new_label();
1574 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
1575 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1576 gen_set_label(l1);
1578 else
1579 tcg_gen_movi_tl(cpu_R[dc->op1], 1);
1581 cris_cc_mask(dc, 0);
1582 return 2;
1585 static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
1587 if (size == 4) {
1588 t[0] = cpu_R[dc->op2];
1589 t[1] = cpu_R[dc->op1];
1590 } else {
1591 t[0] = tcg_temp_new();
1592 t[1] = tcg_temp_new();
1596 static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
1598 if (size != 4) {
1599 tcg_temp_free(t[0]);
1600 tcg_temp_free(t[1]);
1604 static int dec_and_r(DisasContext *dc)
1606 TCGv t[2];
1607 int size = memsize_zz(dc);
1609 LOG_DIS("and.%c $r%u, $r%u\n",
1610 memsize_char(size), dc->op1, dc->op2);
1612 cris_cc_mask(dc, CC_MASK_NZ);
1614 cris_alu_alloc_temps(dc, size, t);
1615 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1616 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
1617 cris_alu_free_temps(dc, size, t);
1618 return 2;
1621 static int dec_lz_r(DisasContext *dc)
1623 TCGv t0;
1624 LOG_DIS("lz $r%u, $r%u\n",
1625 dc->op1, dc->op2);
1626 cris_cc_mask(dc, CC_MASK_NZ);
1627 t0 = tcg_temp_new();
1628 dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
1629 cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1630 tcg_temp_free(t0);
1631 return 2;
1634 static int dec_lsl_r(DisasContext *dc)
1636 TCGv t[2];
1637 int size = memsize_zz(dc);
1639 LOG_DIS("lsl.%c $r%u, $r%u\n",
1640 memsize_char(size), dc->op1, dc->op2);
1642 cris_cc_mask(dc, CC_MASK_NZ);
1643 cris_alu_alloc_temps(dc, size, t);
1644 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1645 tcg_gen_andi_tl(t[1], t[1], 63);
1646 cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
1647 cris_alu_alloc_temps(dc, size, t);
1648 return 2;
1651 static int dec_lsr_r(DisasContext *dc)
1653 TCGv t[2];
1654 int size = memsize_zz(dc);
1656 LOG_DIS("lsr.%c $r%u, $r%u\n",
1657 memsize_char(size), dc->op1, dc->op2);
1659 cris_cc_mask(dc, CC_MASK_NZ);
1660 cris_alu_alloc_temps(dc, size, t);
1661 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1662 tcg_gen_andi_tl(t[1], t[1], 63);
1663 cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
1664 cris_alu_free_temps(dc, size, t);
1665 return 2;
1668 static int dec_asr_r(DisasContext *dc)
1670 TCGv t[2];
1671 int size = memsize_zz(dc);
1673 LOG_DIS("asr.%c $r%u, $r%u\n",
1674 memsize_char(size), dc->op1, dc->op2);
1676 cris_cc_mask(dc, CC_MASK_NZ);
1677 cris_alu_alloc_temps(dc, size, t);
1678 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1679 tcg_gen_andi_tl(t[1], t[1], 63);
1680 cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
1681 cris_alu_free_temps(dc, size, t);
1682 return 2;
1685 static int dec_muls_r(DisasContext *dc)
1687 TCGv t[2];
1688 int size = memsize_zz(dc);
1690 LOG_DIS("muls.%c $r%u, $r%u\n",
1691 memsize_char(size), dc->op1, dc->op2);
1692 cris_cc_mask(dc, CC_MASK_NZV);
1693 cris_alu_alloc_temps(dc, size, t);
1694 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
1696 cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
1697 cris_alu_free_temps(dc, size, t);
1698 return 2;
1701 static int dec_mulu_r(DisasContext *dc)
1703 TCGv t[2];
1704 int size = memsize_zz(dc);
1706 LOG_DIS("mulu.%c $r%u, $r%u\n",
1707 memsize_char(size), dc->op1, dc->op2);
1708 cris_cc_mask(dc, CC_MASK_NZV);
1709 cris_alu_alloc_temps(dc, size, t);
1710 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1712 cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
1713 cris_alu_alloc_temps(dc, size, t);
1714 return 2;
1718 static int dec_dstep_r(DisasContext *dc)
1720 LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
1721 cris_cc_mask(dc, CC_MASK_NZ);
1722 cris_alu(dc, CC_OP_DSTEP,
1723 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1724 return 2;
1727 static int dec_xor_r(DisasContext *dc)
1729 TCGv t[2];
1730 int size = memsize_zz(dc);
1731 LOG_DIS("xor.%c $r%u, $r%u\n",
1732 memsize_char(size), dc->op1, dc->op2);
1733 BUG_ON(size != 4); /* xor is dword. */
1734 cris_cc_mask(dc, CC_MASK_NZ);
1735 cris_alu_alloc_temps(dc, size, t);
1736 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1738 cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
1739 cris_alu_free_temps(dc, size, t);
1740 return 2;
1743 static int dec_bound_r(DisasContext *dc)
1745 TCGv l0;
1746 int size = memsize_zz(dc);
1747 LOG_DIS("bound.%c $r%u, $r%u\n",
1748 memsize_char(size), dc->op1, dc->op2);
1749 cris_cc_mask(dc, CC_MASK_NZ);
1750 l0 = tcg_temp_local_new();
1751 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
1752 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
1753 tcg_temp_free(l0);
1754 return 2;
1757 static int dec_cmp_r(DisasContext *dc)
1759 TCGv t[2];
1760 int size = memsize_zz(dc);
1761 LOG_DIS("cmp.%c $r%u, $r%u\n",
1762 memsize_char(size), dc->op1, dc->op2);
1763 cris_cc_mask(dc, CC_MASK_NZVC);
1764 cris_alu_alloc_temps(dc, size, t);
1765 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1767 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
1768 cris_alu_free_temps(dc, size, t);
1769 return 2;
1772 static int dec_abs_r(DisasContext *dc)
1774 TCGv t0;
1776 LOG_DIS("abs $r%u, $r%u\n",
1777 dc->op1, dc->op2);
1778 cris_cc_mask(dc, CC_MASK_NZ);
1780 t0 = tcg_temp_new();
1781 tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
1782 tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
1783 tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
1784 tcg_temp_free(t0);
1786 cris_alu(dc, CC_OP_MOVE,
1787 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
1788 return 2;
1791 static int dec_add_r(DisasContext *dc)
1793 TCGv t[2];
1794 int size = memsize_zz(dc);
1795 LOG_DIS("add.%c $r%u, $r%u\n",
1796 memsize_char(size), dc->op1, dc->op2);
1797 cris_cc_mask(dc, CC_MASK_NZVC);
1798 cris_alu_alloc_temps(dc, size, t);
1799 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1801 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
1802 cris_alu_free_temps(dc, size, t);
1803 return 2;
1806 static int dec_addc_r(DisasContext *dc)
1808 LOG_DIS("addc $r%u, $r%u\n",
1809 dc->op1, dc->op2);
1810 cris_evaluate_flags(dc);
1811 /* Set for this insn. */
1812 dc->flagx_known = 1;
1813 dc->flags_x = X_FLAG;
1815 cris_cc_mask(dc, CC_MASK_NZVC);
1816 cris_alu(dc, CC_OP_ADDC,
1817 cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
1818 return 2;
1821 static int dec_mcp_r(DisasContext *dc)
1823 LOG_DIS("mcp $p%u, $r%u\n",
1824 dc->op2, dc->op1);
1825 cris_evaluate_flags(dc);
1826 cris_cc_mask(dc, CC_MASK_RNZV);
1827 cris_alu(dc, CC_OP_MCP,
1828 cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
1829 return 2;
1832 #if DISAS_CRIS
1833 static char * swapmode_name(int mode, char *modename) {
1834 int i = 0;
1835 if (mode & 8)
1836 modename[i++] = 'n';
1837 if (mode & 4)
1838 modename[i++] = 'w';
1839 if (mode & 2)
1840 modename[i++] = 'b';
1841 if (mode & 1)
1842 modename[i++] = 'r';
1843 modename[i++] = 0;
1844 return modename;
1846 #endif
1848 static int dec_swap_r(DisasContext *dc)
1850 TCGv t0;
1851 #if DISAS_CRIS
1852 char modename[4];
1853 #endif
1854 LOG_DIS("swap%s $r%u\n",
1855 swapmode_name(dc->op2, modename), dc->op1);
1857 cris_cc_mask(dc, CC_MASK_NZ);
1858 t0 = tcg_temp_new();
1859 t_gen_mov_TN_reg(t0, dc->op1);
1860 if (dc->op2 & 8)
1861 tcg_gen_not_tl(t0, t0);
1862 if (dc->op2 & 4)
1863 t_gen_swapw(t0, t0);
1864 if (dc->op2 & 2)
1865 t_gen_swapb(t0, t0);
1866 if (dc->op2 & 1)
1867 t_gen_swapr(t0, t0);
1868 cris_alu(dc, CC_OP_MOVE,
1869 cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
1870 tcg_temp_free(t0);
1871 return 2;
1874 static int dec_or_r(DisasContext *dc)
1876 TCGv t[2];
1877 int size = memsize_zz(dc);
1878 LOG_DIS("or.%c $r%u, $r%u\n",
1879 memsize_char(size), dc->op1, dc->op2);
1880 cris_cc_mask(dc, CC_MASK_NZ);
1881 cris_alu_alloc_temps(dc, size, t);
1882 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1883 cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
1884 cris_alu_free_temps(dc, size, t);
1885 return 2;
1888 static int dec_addi_r(DisasContext *dc)
1890 TCGv t0;
1891 LOG_DIS("addi.%c $r%u, $r%u\n",
1892 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1893 cris_cc_mask(dc, 0);
1894 t0 = tcg_temp_new();
1895 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1896 tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
1897 tcg_temp_free(t0);
1898 return 2;
1901 static int dec_addi_acr(DisasContext *dc)
1903 TCGv t0;
1904 LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
1905 memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
1906 cris_cc_mask(dc, 0);
1907 t0 = tcg_temp_new();
1908 tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
1909 tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
1910 tcg_temp_free(t0);
1911 return 2;
1914 static int dec_neg_r(DisasContext *dc)
1916 TCGv t[2];
1917 int size = memsize_zz(dc);
1918 LOG_DIS("neg.%c $r%u, $r%u\n",
1919 memsize_char(size), dc->op1, dc->op2);
1920 cris_cc_mask(dc, CC_MASK_NZVC);
1921 cris_alu_alloc_temps(dc, size, t);
1922 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1924 cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
1925 cris_alu_free_temps(dc, size, t);
1926 return 2;
1929 static int dec_btst_r(DisasContext *dc)
1931 LOG_DIS("btst $r%u, $r%u\n",
1932 dc->op1, dc->op2);
1933 cris_cc_mask(dc, CC_MASK_NZ);
1934 cris_evaluate_flags(dc);
1935 gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->op2],
1936 cpu_R[dc->op1], cpu_PR[PR_CCS]);
1937 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
1938 cpu_R[dc->op2], cpu_R[dc->op2], 4);
1939 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
1940 dc->flags_uptodate = 1;
1941 return 2;
1944 static int dec_sub_r(DisasContext *dc)
1946 TCGv t[2];
1947 int size = memsize_zz(dc);
1948 LOG_DIS("sub.%c $r%u, $r%u\n",
1949 memsize_char(size), dc->op1, dc->op2);
1950 cris_cc_mask(dc, CC_MASK_NZVC);
1951 cris_alu_alloc_temps(dc, size, t);
1952 dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
1953 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
1954 cris_alu_free_temps(dc, size, t);
1955 return 2;
1958 /* Zero extension. From size to dword. */
1959 static int dec_movu_r(DisasContext *dc)
1961 TCGv t0;
1962 int size = memsize_z(dc);
1963 LOG_DIS("movu.%c $r%u, $r%u\n",
1964 memsize_char(size),
1965 dc->op1, dc->op2);
1967 cris_cc_mask(dc, CC_MASK_NZ);
1968 t0 = tcg_temp_new();
1969 dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
1970 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
1971 tcg_temp_free(t0);
1972 return 2;
1975 /* Sign extension. From size to dword. */
1976 static int dec_movs_r(DisasContext *dc)
1978 TCGv t0;
1979 int size = memsize_z(dc);
1980 LOG_DIS("movs.%c $r%u, $r%u\n",
1981 memsize_char(size),
1982 dc->op1, dc->op2);
1984 cris_cc_mask(dc, CC_MASK_NZ);
1985 t0 = tcg_temp_new();
1986 /* Size can only be qi or hi. */
1987 t_gen_sext(t0, cpu_R[dc->op1], size);
1988 cris_alu(dc, CC_OP_MOVE,
1989 cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
1990 tcg_temp_free(t0);
1991 return 2;
1994 /* zero extension. From size to dword. */
1995 static int dec_addu_r(DisasContext *dc)
1997 TCGv t0;
1998 int size = memsize_z(dc);
1999 LOG_DIS("addu.%c $r%u, $r%u\n",
2000 memsize_char(size),
2001 dc->op1, dc->op2);
2003 cris_cc_mask(dc, CC_MASK_NZVC);
2004 t0 = tcg_temp_new();
2005 /* Size can only be qi or hi. */
2006 t_gen_zext(t0, cpu_R[dc->op1], size);
2007 cris_alu(dc, CC_OP_ADD,
2008 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2009 tcg_temp_free(t0);
2010 return 2;
2013 /* Sign extension. From size to dword. */
2014 static int dec_adds_r(DisasContext *dc)
2016 TCGv t0;
2017 int size = memsize_z(dc);
2018 LOG_DIS("adds.%c $r%u, $r%u\n",
2019 memsize_char(size),
2020 dc->op1, dc->op2);
2022 cris_cc_mask(dc, CC_MASK_NZVC);
2023 t0 = tcg_temp_new();
2024 /* Size can only be qi or hi. */
2025 t_gen_sext(t0, cpu_R[dc->op1], size);
2026 cris_alu(dc, CC_OP_ADD,
2027 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2028 tcg_temp_free(t0);
2029 return 2;
2032 /* Zero extension. From size to dword. */
2033 static int dec_subu_r(DisasContext *dc)
2035 TCGv t0;
2036 int size = memsize_z(dc);
2037 LOG_DIS("subu.%c $r%u, $r%u\n",
2038 memsize_char(size),
2039 dc->op1, dc->op2);
2041 cris_cc_mask(dc, CC_MASK_NZVC);
2042 t0 = tcg_temp_new();
2043 /* Size can only be qi or hi. */
2044 t_gen_zext(t0, cpu_R[dc->op1], size);
2045 cris_alu(dc, CC_OP_SUB,
2046 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2047 tcg_temp_free(t0);
2048 return 2;
2051 /* Sign extension. From size to dword. */
2052 static int dec_subs_r(DisasContext *dc)
2054 TCGv t0;
2055 int size = memsize_z(dc);
2056 LOG_DIS("subs.%c $r%u, $r%u\n",
2057 memsize_char(size),
2058 dc->op1, dc->op2);
2060 cris_cc_mask(dc, CC_MASK_NZVC);
2061 t0 = tcg_temp_new();
2062 /* Size can only be qi or hi. */
2063 t_gen_sext(t0, cpu_R[dc->op1], size);
2064 cris_alu(dc, CC_OP_SUB,
2065 cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
2066 tcg_temp_free(t0);
2067 return 2;
2070 static int dec_setclrf(DisasContext *dc)
2072 uint32_t flags;
2073 int set = (~dc->opcode >> 2) & 1;
2076 flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
2077 | EXTRACT_FIELD(dc->ir, 0, 3);
2078 if (set && flags == 0) {
2079 LOG_DIS("nop\n");
2080 return 2;
2081 } else if (!set && (flags & 0x20)) {
2082 LOG_DIS("di\n");
2084 else {
2085 LOG_DIS("%sf %x\n",
2086 set ? "set" : "clr",
2087 flags);
2090 /* User space is not allowed to touch these. Silently ignore. */
2091 if (dc->tb_flags & U_FLAG) {
2092 flags &= ~(S_FLAG | I_FLAG | U_FLAG);
2095 if (flags & X_FLAG) {
2096 dc->flagx_known = 1;
2097 if (set)
2098 dc->flags_x = X_FLAG;
2099 else
2100 dc->flags_x = 0;
2103 /* Break the TB if any of the SPI flag changes. */
2104 if (flags & (P_FLAG | S_FLAG)) {
2105 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2106 dc->is_jmp = DISAS_UPDATE;
2107 dc->cpustate_changed = 1;
2110 /* For the I flag, only act on posedge. */
2111 if ((flags & I_FLAG)) {
2112 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2113 dc->is_jmp = DISAS_UPDATE;
2114 dc->cpustate_changed = 1;
2118 /* Simply decode the flags. */
2119 cris_evaluate_flags (dc);
2120 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2121 cris_update_cc_x(dc);
2122 tcg_gen_movi_tl(cc_op, dc->cc_op);
2124 if (set) {
2125 if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
2126 /* Enter user mode. */
2127 t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
2128 tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
2129 dc->cpustate_changed = 1;
2131 tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
2133 else
2134 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
2136 dc->flags_uptodate = 1;
2137 dc->clear_x = 0;
2138 return 2;
2141 static int dec_move_rs(DisasContext *dc)
2143 LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
2144 cris_cc_mask(dc, 0);
2145 gen_helper_movl_sreg_reg(tcg_const_tl(dc->op2), tcg_const_tl(dc->op1));
2146 return 2;
2148 static int dec_move_sr(DisasContext *dc)
2150 LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
2151 cris_cc_mask(dc, 0);
2152 gen_helper_movl_reg_sreg(tcg_const_tl(dc->op1), tcg_const_tl(dc->op2));
2153 return 2;
2156 static int dec_move_rp(DisasContext *dc)
2158 TCGv t[2];
2159 LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
2160 cris_cc_mask(dc, 0);
2162 t[0] = tcg_temp_new();
2163 if (dc->op2 == PR_CCS) {
2164 cris_evaluate_flags(dc);
2165 t_gen_mov_TN_reg(t[0], dc->op1);
2166 if (dc->tb_flags & U_FLAG) {
2167 t[1] = tcg_temp_new();
2168 /* User space is not allowed to touch all flags. */
2169 tcg_gen_andi_tl(t[0], t[0], 0x39f);
2170 tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
2171 tcg_gen_or_tl(t[0], t[1], t[0]);
2172 tcg_temp_free(t[1]);
2175 else
2176 t_gen_mov_TN_reg(t[0], dc->op1);
2178 t_gen_mov_preg_TN(dc, dc->op2, t[0]);
2179 if (dc->op2 == PR_CCS) {
2180 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
2181 dc->flags_uptodate = 1;
2183 tcg_temp_free(t[0]);
2184 return 2;
2186 static int dec_move_pr(DisasContext *dc)
2188 TCGv t0;
2189 LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
2190 cris_cc_mask(dc, 0);
2192 if (dc->op2 == PR_CCS)
2193 cris_evaluate_flags(dc);
2195 if (dc->op2 == PR_DZ) {
2196 tcg_gen_movi_tl(cpu_R[dc->op1], 0);
2197 } else {
2198 t0 = tcg_temp_new();
2199 t_gen_mov_TN_preg(t0, dc->op2);
2200 cris_alu(dc, CC_OP_MOVE,
2201 cpu_R[dc->op1], cpu_R[dc->op1], t0,
2202 preg_sizes[dc->op2]);
2203 tcg_temp_free(t0);
2205 return 2;
2208 static int dec_move_mr(DisasContext *dc)
2210 int memsize = memsize_zz(dc);
2211 int insn_len;
2212 LOG_DIS("move.%c [$r%u%s, $r%u\n",
2213 memsize_char(memsize),
2214 dc->op1, dc->postinc ? "+]" : "]",
2215 dc->op2);
2217 if (memsize == 4) {
2218 insn_len = dec_prep_move_m(dc, 0, 4, cpu_R[dc->op2]);
2219 cris_cc_mask(dc, CC_MASK_NZ);
2220 cris_update_cc_op(dc, CC_OP_MOVE, 4);
2221 cris_update_cc_x(dc);
2222 cris_update_result(dc, cpu_R[dc->op2]);
2224 else {
2225 TCGv t0;
2227 t0 = tcg_temp_new();
2228 insn_len = dec_prep_move_m(dc, 0, memsize, t0);
2229 cris_cc_mask(dc, CC_MASK_NZ);
2230 cris_alu(dc, CC_OP_MOVE,
2231 cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
2232 tcg_temp_free(t0);
2234 do_postinc(dc, memsize);
2235 return insn_len;
2238 static inline void cris_alu_m_alloc_temps(TCGv *t)
2240 t[0] = tcg_temp_new();
2241 t[1] = tcg_temp_new();
2244 static inline void cris_alu_m_free_temps(TCGv *t)
2246 tcg_temp_free(t[0]);
2247 tcg_temp_free(t[1]);
2250 static int dec_movs_m(DisasContext *dc)
2252 TCGv t[2];
2253 int memsize = memsize_z(dc);
2254 int insn_len;
2255 LOG_DIS("movs.%c [$r%u%s, $r%u\n",
2256 memsize_char(memsize),
2257 dc->op1, dc->postinc ? "+]" : "]",
2258 dc->op2);
2260 cris_alu_m_alloc_temps(t);
2261 /* sign extend. */
2262 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2263 cris_cc_mask(dc, CC_MASK_NZ);
2264 cris_alu(dc, CC_OP_MOVE,
2265 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2266 do_postinc(dc, memsize);
2267 cris_alu_m_free_temps(t);
2268 return insn_len;
2271 static int dec_addu_m(DisasContext *dc)
2273 TCGv t[2];
2274 int memsize = memsize_z(dc);
2275 int insn_len;
2276 LOG_DIS("addu.%c [$r%u%s, $r%u\n",
2277 memsize_char(memsize),
2278 dc->op1, dc->postinc ? "+]" : "]",
2279 dc->op2);
2281 cris_alu_m_alloc_temps(t);
2282 /* sign extend. */
2283 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2284 cris_cc_mask(dc, CC_MASK_NZVC);
2285 cris_alu(dc, CC_OP_ADD,
2286 cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2287 do_postinc(dc, memsize);
2288 cris_alu_m_free_temps(t);
2289 return insn_len;
2292 static int dec_adds_m(DisasContext *dc)
2294 TCGv t[2];
2295 int memsize = memsize_z(dc);
2296 int insn_len;
2297 LOG_DIS("adds.%c [$r%u%s, $r%u\n",
2298 memsize_char(memsize),
2299 dc->op1, dc->postinc ? "+]" : "]",
2300 dc->op2);
2302 cris_alu_m_alloc_temps(t);
2303 /* sign extend. */
2304 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2305 cris_cc_mask(dc, CC_MASK_NZVC);
2306 cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2307 do_postinc(dc, memsize);
2308 cris_alu_m_free_temps(t);
2309 return insn_len;
2312 static int dec_subu_m(DisasContext *dc)
2314 TCGv t[2];
2315 int memsize = memsize_z(dc);
2316 int insn_len;
2317 LOG_DIS("subu.%c [$r%u%s, $r%u\n",
2318 memsize_char(memsize),
2319 dc->op1, dc->postinc ? "+]" : "]",
2320 dc->op2);
2322 cris_alu_m_alloc_temps(t);
2323 /* sign extend. */
2324 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2325 cris_cc_mask(dc, CC_MASK_NZVC);
2326 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2327 do_postinc(dc, memsize);
2328 cris_alu_m_free_temps(t);
2329 return insn_len;
2332 static int dec_subs_m(DisasContext *dc)
2334 TCGv t[2];
2335 int memsize = memsize_z(dc);
2336 int insn_len;
2337 LOG_DIS("subs.%c [$r%u%s, $r%u\n",
2338 memsize_char(memsize),
2339 dc->op1, dc->postinc ? "+]" : "]",
2340 dc->op2);
2342 cris_alu_m_alloc_temps(t);
2343 /* sign extend. */
2344 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2345 cris_cc_mask(dc, CC_MASK_NZVC);
2346 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2347 do_postinc(dc, memsize);
2348 cris_alu_m_free_temps(t);
2349 return insn_len;
2352 static int dec_movu_m(DisasContext *dc)
2354 TCGv t[2];
2355 int memsize = memsize_z(dc);
2356 int insn_len;
2358 LOG_DIS("movu.%c [$r%u%s, $r%u\n",
2359 memsize_char(memsize),
2360 dc->op1, dc->postinc ? "+]" : "]",
2361 dc->op2);
2363 cris_alu_m_alloc_temps(t);
2364 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2365 cris_cc_mask(dc, CC_MASK_NZ);
2366 cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2367 do_postinc(dc, memsize);
2368 cris_alu_m_free_temps(t);
2369 return insn_len;
2372 static int dec_cmpu_m(DisasContext *dc)
2374 TCGv t[2];
2375 int memsize = memsize_z(dc);
2376 int insn_len;
2377 LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
2378 memsize_char(memsize),
2379 dc->op1, dc->postinc ? "+]" : "]",
2380 dc->op2);
2382 cris_alu_m_alloc_temps(t);
2383 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2384 cris_cc_mask(dc, CC_MASK_NZVC);
2385 cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
2386 do_postinc(dc, memsize);
2387 cris_alu_m_free_temps(t);
2388 return insn_len;
2391 static int dec_cmps_m(DisasContext *dc)
2393 TCGv t[2];
2394 int memsize = memsize_z(dc);
2395 int insn_len;
2396 LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
2397 memsize_char(memsize),
2398 dc->op1, dc->postinc ? "+]" : "]",
2399 dc->op2);
2401 cris_alu_m_alloc_temps(t);
2402 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2403 cris_cc_mask(dc, CC_MASK_NZVC);
2404 cris_alu(dc, CC_OP_CMP,
2405 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2406 memsize_zz(dc));
2407 do_postinc(dc, memsize);
2408 cris_alu_m_free_temps(t);
2409 return insn_len;
2412 static int dec_cmp_m(DisasContext *dc)
2414 TCGv t[2];
2415 int memsize = memsize_zz(dc);
2416 int insn_len;
2417 LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
2418 memsize_char(memsize),
2419 dc->op1, dc->postinc ? "+]" : "]",
2420 dc->op2);
2422 cris_alu_m_alloc_temps(t);
2423 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2424 cris_cc_mask(dc, CC_MASK_NZVC);
2425 cris_alu(dc, CC_OP_CMP,
2426 cpu_R[dc->op2], cpu_R[dc->op2], t[1],
2427 memsize_zz(dc));
2428 do_postinc(dc, memsize);
2429 cris_alu_m_free_temps(t);
2430 return insn_len;
2433 static int dec_test_m(DisasContext *dc)
2435 TCGv t[2];
2436 int memsize = memsize_zz(dc);
2437 int insn_len;
2438 LOG_DIS("test.%c [$r%u%s] op2=%x\n",
2439 memsize_char(memsize),
2440 dc->op1, dc->postinc ? "+]" : "]",
2441 dc->op2);
2443 cris_evaluate_flags(dc);
2445 cris_alu_m_alloc_temps(t);
2446 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2447 cris_cc_mask(dc, CC_MASK_NZ);
2448 tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
2450 cris_alu(dc, CC_OP_CMP,
2451 cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
2452 do_postinc(dc, memsize);
2453 cris_alu_m_free_temps(t);
2454 return insn_len;
2457 static int dec_and_m(DisasContext *dc)
2459 TCGv t[2];
2460 int memsize = memsize_zz(dc);
2461 int insn_len;
2462 LOG_DIS("and.%c [$r%u%s, $r%u\n",
2463 memsize_char(memsize),
2464 dc->op1, dc->postinc ? "+]" : "]",
2465 dc->op2);
2467 cris_alu_m_alloc_temps(t);
2468 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2469 cris_cc_mask(dc, CC_MASK_NZ);
2470 cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2471 do_postinc(dc, memsize);
2472 cris_alu_m_free_temps(t);
2473 return insn_len;
2476 static int dec_add_m(DisasContext *dc)
2478 TCGv t[2];
2479 int memsize = memsize_zz(dc);
2480 int insn_len;
2481 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2482 memsize_char(memsize),
2483 dc->op1, dc->postinc ? "+]" : "]",
2484 dc->op2);
2486 cris_alu_m_alloc_temps(t);
2487 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2488 cris_cc_mask(dc, CC_MASK_NZVC);
2489 cris_alu(dc, CC_OP_ADD,
2490 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2491 do_postinc(dc, memsize);
2492 cris_alu_m_free_temps(t);
2493 return insn_len;
2496 static int dec_addo_m(DisasContext *dc)
2498 TCGv t[2];
2499 int memsize = memsize_zz(dc);
2500 int insn_len;
2501 LOG_DIS("add.%c [$r%u%s, $r%u\n",
2502 memsize_char(memsize),
2503 dc->op1, dc->postinc ? "+]" : "]",
2504 dc->op2);
2506 cris_alu_m_alloc_temps(t);
2507 insn_len = dec_prep_alu_m(dc, 1, memsize, t[0], t[1]);
2508 cris_cc_mask(dc, 0);
2509 cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
2510 do_postinc(dc, memsize);
2511 cris_alu_m_free_temps(t);
2512 return insn_len;
2515 static int dec_bound_m(DisasContext *dc)
2517 TCGv l[2];
2518 int memsize = memsize_zz(dc);
2519 int insn_len;
2520 LOG_DIS("bound.%c [$r%u%s, $r%u\n",
2521 memsize_char(memsize),
2522 dc->op1, dc->postinc ? "+]" : "]",
2523 dc->op2);
2525 l[0] = tcg_temp_local_new();
2526 l[1] = tcg_temp_local_new();
2527 insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]);
2528 cris_cc_mask(dc, CC_MASK_NZ);
2529 cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
2530 do_postinc(dc, memsize);
2531 tcg_temp_free(l[0]);
2532 tcg_temp_free(l[1]);
2533 return insn_len;
2536 static int dec_addc_mr(DisasContext *dc)
2538 TCGv t[2];
2539 int insn_len = 2;
2540 LOG_DIS("addc [$r%u%s, $r%u\n",
2541 dc->op1, dc->postinc ? "+]" : "]",
2542 dc->op2);
2544 cris_evaluate_flags(dc);
2546 /* Set for this insn. */
2547 dc->flagx_known = 1;
2548 dc->flags_x = X_FLAG;
2550 cris_alu_m_alloc_temps(t);
2551 insn_len = dec_prep_alu_m(dc, 0, 4, t[0], t[1]);
2552 cris_cc_mask(dc, CC_MASK_NZVC);
2553 cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
2554 do_postinc(dc, 4);
2555 cris_alu_m_free_temps(t);
2556 return insn_len;
2559 static int dec_sub_m(DisasContext *dc)
2561 TCGv t[2];
2562 int memsize = memsize_zz(dc);
2563 int insn_len;
2564 LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
2565 memsize_char(memsize),
2566 dc->op1, dc->postinc ? "+]" : "]",
2567 dc->op2, dc->ir, dc->zzsize);
2569 cris_alu_m_alloc_temps(t);
2570 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2571 cris_cc_mask(dc, CC_MASK_NZVC);
2572 cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
2573 do_postinc(dc, memsize);
2574 cris_alu_m_free_temps(t);
2575 return insn_len;
2578 static int dec_or_m(DisasContext *dc)
2580 TCGv t[2];
2581 int memsize = memsize_zz(dc);
2582 int insn_len;
2583 LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
2584 memsize_char(memsize),
2585 dc->op1, dc->postinc ? "+]" : "]",
2586 dc->op2, dc->pc);
2588 cris_alu_m_alloc_temps(t);
2589 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2590 cris_cc_mask(dc, CC_MASK_NZ);
2591 cris_alu(dc, CC_OP_OR,
2592 cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
2593 do_postinc(dc, memsize);
2594 cris_alu_m_free_temps(t);
2595 return insn_len;
2598 static int dec_move_mp(DisasContext *dc)
2600 TCGv t[2];
2601 int memsize = memsize_zz(dc);
2602 int insn_len = 2;
2604 LOG_DIS("move.%c [$r%u%s, $p%u\n",
2605 memsize_char(memsize),
2606 dc->op1,
2607 dc->postinc ? "+]" : "]",
2608 dc->op2);
2610 cris_alu_m_alloc_temps(t);
2611 insn_len = dec_prep_alu_m(dc, 0, memsize, t[0], t[1]);
2612 cris_cc_mask(dc, 0);
2613 if (dc->op2 == PR_CCS) {
2614 cris_evaluate_flags(dc);
2615 if (dc->tb_flags & U_FLAG) {
2616 /* User space is not allowed to touch all flags. */
2617 tcg_gen_andi_tl(t[1], t[1], 0x39f);
2618 tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
2619 tcg_gen_or_tl(t[1], t[0], t[1]);
2623 t_gen_mov_preg_TN(dc, dc->op2, t[1]);
2625 do_postinc(dc, memsize);
2626 cris_alu_m_free_temps(t);
2627 return insn_len;
2630 static int dec_move_pm(DisasContext *dc)
2632 TCGv t0;
2633 int memsize;
2635 memsize = preg_sizes[dc->op2];
2637 LOG_DIS("move.%c $p%u, [$r%u%s\n",
2638 memsize_char(memsize),
2639 dc->op2, dc->op1, dc->postinc ? "+]" : "]");
2641 /* prepare store. Address in T0, value in T1. */
2642 if (dc->op2 == PR_CCS)
2643 cris_evaluate_flags(dc);
2644 t0 = tcg_temp_new();
2645 t_gen_mov_TN_preg(t0, dc->op2);
2646 cris_flush_cc_state(dc);
2647 gen_store(dc, cpu_R[dc->op1], t0, memsize);
2648 tcg_temp_free(t0);
2650 cris_cc_mask(dc, 0);
2651 if (dc->postinc)
2652 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2653 return 2;
2656 static int dec_movem_mr(DisasContext *dc)
2658 TCGv_i64 tmp[16];
2659 TCGv tmp32;
2660 TCGv addr;
2661 int i;
2662 int nr = dc->op2 + 1;
2664 LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
2665 dc->postinc ? "+]" : "]", dc->op2);
2667 addr = tcg_temp_new();
2668 /* There are probably better ways of doing this. */
2669 cris_flush_cc_state(dc);
2670 for (i = 0; i < (nr >> 1); i++) {
2671 tmp[i] = tcg_temp_new_i64();
2672 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2673 gen_load64(dc, tmp[i], addr);
2675 if (nr & 1) {
2676 tmp32 = tcg_temp_new_i32();
2677 tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
2678 gen_load(dc, tmp32, addr, 4, 0);
2679 } else
2680 TCGV_UNUSED(tmp32);
2681 tcg_temp_free(addr);
2683 for (i = 0; i < (nr >> 1); i++) {
2684 tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]);
2685 tcg_gen_shri_i64(tmp[i], tmp[i], 32);
2686 tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
2687 tcg_temp_free_i64(tmp[i]);
2689 if (nr & 1) {
2690 tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
2691 tcg_temp_free(tmp32);
2694 /* writeback the updated pointer value. */
2695 if (dc->postinc)
2696 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
2698 /* gen_load might want to evaluate the previous insns flags. */
2699 cris_cc_mask(dc, 0);
2700 return 2;
2703 static int dec_movem_rm(DisasContext *dc)
2705 TCGv tmp;
2706 TCGv addr;
2707 int i;
2709 LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
2710 dc->postinc ? "+]" : "]");
2712 cris_flush_cc_state(dc);
2714 tmp = tcg_temp_new();
2715 addr = tcg_temp_new();
2716 tcg_gen_movi_tl(tmp, 4);
2717 tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
2718 for (i = 0; i <= dc->op2; i++) {
2719 /* Displace addr. */
2720 /* Perform the store. */
2721 gen_store(dc, addr, cpu_R[i], 4);
2722 tcg_gen_add_tl(addr, addr, tmp);
2724 if (dc->postinc)
2725 tcg_gen_mov_tl(cpu_R[dc->op1], addr);
2726 cris_cc_mask(dc, 0);
2727 tcg_temp_free(tmp);
2728 tcg_temp_free(addr);
2729 return 2;
2732 static int dec_move_rm(DisasContext *dc)
2734 int memsize;
2736 memsize = memsize_zz(dc);
2738 LOG_DIS("move.%c $r%u, [$r%u]\n",
2739 memsize_char(memsize), dc->op2, dc->op1);
2741 /* prepare store. */
2742 cris_flush_cc_state(dc);
2743 gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
2745 if (dc->postinc)
2746 tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
2747 cris_cc_mask(dc, 0);
2748 return 2;
2751 static int dec_lapcq(DisasContext *dc)
2753 LOG_DIS("lapcq %x, $r%u\n",
2754 dc->pc + dc->op1*2, dc->op2);
2755 cris_cc_mask(dc, 0);
2756 tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
2757 return 2;
2760 static int dec_lapc_im(DisasContext *dc)
2762 unsigned int rd;
2763 int32_t imm;
2764 int32_t pc;
2766 rd = dc->op2;
2768 cris_cc_mask(dc, 0);
2769 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2770 LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
2772 pc = dc->pc;
2773 pc += imm;
2774 tcg_gen_movi_tl(cpu_R[rd], pc);
2775 return 6;
2778 /* Jump to special reg. */
2779 static int dec_jump_p(DisasContext *dc)
2781 LOG_DIS("jump $p%u\n", dc->op2);
2783 if (dc->op2 == PR_CCS)
2784 cris_evaluate_flags(dc);
2785 t_gen_mov_TN_preg(env_btarget, dc->op2);
2786 /* rete will often have low bit set to indicate delayslot. */
2787 tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
2788 cris_cc_mask(dc, 0);
2789 cris_prepare_jmp(dc, JMP_INDIRECT);
2790 return 2;
2793 /* Jump and save. */
2794 static int dec_jas_r(DisasContext *dc)
2796 LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
2797 cris_cc_mask(dc, 0);
2798 /* Store the return address in Pd. */
2799 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2800 if (dc->op2 > 15)
2801 abort();
2802 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
2804 cris_prepare_jmp(dc, JMP_INDIRECT);
2805 return 2;
2808 static int dec_jas_im(DisasContext *dc)
2810 uint32_t imm;
2812 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2814 LOG_DIS("jas 0x%x\n", imm);
2815 cris_cc_mask(dc, 0);
2816 /* Store the return address in Pd. */
2817 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2819 dc->jmp_pc = imm;
2820 cris_prepare_jmp(dc, JMP_DIRECT);
2821 return 6;
2824 static int dec_jasc_im(DisasContext *dc)
2826 uint32_t imm;
2828 imm = cris_fetch(dc, dc->pc + 2, 4, 0);
2830 LOG_DIS("jasc 0x%x\n", imm);
2831 cris_cc_mask(dc, 0);
2832 /* Store the return address in Pd. */
2833 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
2835 dc->jmp_pc = imm;
2836 cris_prepare_jmp(dc, JMP_DIRECT);
2837 return 6;
2840 static int dec_jasc_r(DisasContext *dc)
2842 LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
2843 cris_cc_mask(dc, 0);
2844 /* Store the return address in Pd. */
2845 tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
2846 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
2847 cris_prepare_jmp(dc, JMP_INDIRECT);
2848 return 2;
2851 static int dec_bcc_im(DisasContext *dc)
2853 int32_t offset;
2854 uint32_t cond = dc->op2;
2856 offset = cris_fetch(dc, dc->pc + 2, 2, 1);
2858 LOG_DIS("b%s %d pc=%x dst=%x\n",
2859 cc_name(cond), offset,
2860 dc->pc, dc->pc + offset);
2862 cris_cc_mask(dc, 0);
2863 /* op2 holds the condition-code. */
2864 cris_prepare_cc_branch (dc, offset, cond);
2865 return 4;
2868 static int dec_bas_im(DisasContext *dc)
2870 int32_t simm;
2873 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2875 LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2876 cris_cc_mask(dc, 0);
2877 /* Store the return address in Pd. */
2878 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
2880 dc->jmp_pc = dc->pc + simm;
2881 cris_prepare_jmp(dc, JMP_DIRECT);
2882 return 6;
2885 static int dec_basc_im(DisasContext *dc)
2887 int32_t simm;
2888 simm = cris_fetch(dc, dc->pc + 2, 4, 0);
2890 LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
2891 cris_cc_mask(dc, 0);
2892 /* Store the return address in Pd. */
2893 t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
2895 dc->jmp_pc = dc->pc + simm;
2896 cris_prepare_jmp(dc, JMP_DIRECT);
2897 return 6;
2900 static int dec_rfe_etc(DisasContext *dc)
2902 cris_cc_mask(dc, 0);
2904 if (dc->op2 == 15) {
2905 t_gen_mov_env_TN(halted, tcg_const_tl(1));
2906 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2907 t_gen_raise_exception(EXCP_HLT);
2908 return 2;
2911 switch (dc->op2 & 7) {
2912 case 2:
2913 /* rfe. */
2914 LOG_DIS("rfe\n");
2915 cris_evaluate_flags(dc);
2916 gen_helper_rfe();
2917 dc->is_jmp = DISAS_UPDATE;
2918 break;
2919 case 5:
2920 /* rfn. */
2921 LOG_DIS("rfn\n");
2922 cris_evaluate_flags(dc);
2923 gen_helper_rfn();
2924 dc->is_jmp = DISAS_UPDATE;
2925 break;
2926 case 6:
2927 LOG_DIS("break %d\n", dc->op1);
2928 cris_evaluate_flags (dc);
2929 /* break. */
2930 tcg_gen_movi_tl(env_pc, dc->pc + 2);
2932 /* Breaks start at 16 in the exception vector. */
2933 t_gen_mov_env_TN(trap_vector,
2934 tcg_const_tl(dc->op1 + 16));
2935 t_gen_raise_exception(EXCP_BREAK);
2936 dc->is_jmp = DISAS_UPDATE;
2937 break;
2938 default:
2939 printf ("op2=%x\n", dc->op2);
2940 BUG();
2941 break;
2944 return 2;
2947 static int dec_ftag_fidx_d_m(DisasContext *dc)
2949 return 2;
2952 static int dec_ftag_fidx_i_m(DisasContext *dc)
2954 return 2;
2957 static int dec_null(DisasContext *dc)
2959 printf ("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
2960 dc->pc, dc->opcode, dc->op1, dc->op2);
2961 fflush(NULL);
2962 BUG();
2963 return 2;
2966 static struct decoder_info {
2967 struct {
2968 uint32_t bits;
2969 uint32_t mask;
2971 int (*dec)(DisasContext *dc);
2972 } decinfo[] = {
2973 /* Order matters here. */
2974 {DEC_MOVEQ, dec_moveq},
2975 {DEC_BTSTQ, dec_btstq},
2976 {DEC_CMPQ, dec_cmpq},
2977 {DEC_ADDOQ, dec_addoq},
2978 {DEC_ADDQ, dec_addq},
2979 {DEC_SUBQ, dec_subq},
2980 {DEC_ANDQ, dec_andq},
2981 {DEC_ORQ, dec_orq},
2982 {DEC_ASRQ, dec_asrq},
2983 {DEC_LSLQ, dec_lslq},
2984 {DEC_LSRQ, dec_lsrq},
2985 {DEC_BCCQ, dec_bccq},
2987 {DEC_BCC_IM, dec_bcc_im},
2988 {DEC_JAS_IM, dec_jas_im},
2989 {DEC_JAS_R, dec_jas_r},
2990 {DEC_JASC_IM, dec_jasc_im},
2991 {DEC_JASC_R, dec_jasc_r},
2992 {DEC_BAS_IM, dec_bas_im},
2993 {DEC_BASC_IM, dec_basc_im},
2994 {DEC_JUMP_P, dec_jump_p},
2995 {DEC_LAPC_IM, dec_lapc_im},
2996 {DEC_LAPCQ, dec_lapcq},
2998 {DEC_RFE_ETC, dec_rfe_etc},
2999 {DEC_ADDC_MR, dec_addc_mr},
3001 {DEC_MOVE_MP, dec_move_mp},
3002 {DEC_MOVE_PM, dec_move_pm},
3003 {DEC_MOVEM_MR, dec_movem_mr},
3004 {DEC_MOVEM_RM, dec_movem_rm},
3005 {DEC_MOVE_PR, dec_move_pr},
3006 {DEC_SCC_R, dec_scc_r},
3007 {DEC_SETF, dec_setclrf},
3008 {DEC_CLEARF, dec_setclrf},
3010 {DEC_MOVE_SR, dec_move_sr},
3011 {DEC_MOVE_RP, dec_move_rp},
3012 {DEC_SWAP_R, dec_swap_r},
3013 {DEC_ABS_R, dec_abs_r},
3014 {DEC_LZ_R, dec_lz_r},
3015 {DEC_MOVE_RS, dec_move_rs},
3016 {DEC_BTST_R, dec_btst_r},
3017 {DEC_ADDC_R, dec_addc_r},
3019 {DEC_DSTEP_R, dec_dstep_r},
3020 {DEC_XOR_R, dec_xor_r},
3021 {DEC_MCP_R, dec_mcp_r},
3022 {DEC_CMP_R, dec_cmp_r},
3024 {DEC_ADDI_R, dec_addi_r},
3025 {DEC_ADDI_ACR, dec_addi_acr},
3027 {DEC_ADD_R, dec_add_r},
3028 {DEC_SUB_R, dec_sub_r},
3030 {DEC_ADDU_R, dec_addu_r},
3031 {DEC_ADDS_R, dec_adds_r},
3032 {DEC_SUBU_R, dec_subu_r},
3033 {DEC_SUBS_R, dec_subs_r},
3034 {DEC_LSL_R, dec_lsl_r},
3036 {DEC_AND_R, dec_and_r},
3037 {DEC_OR_R, dec_or_r},
3038 {DEC_BOUND_R, dec_bound_r},
3039 {DEC_ASR_R, dec_asr_r},
3040 {DEC_LSR_R, dec_lsr_r},
3042 {DEC_MOVU_R, dec_movu_r},
3043 {DEC_MOVS_R, dec_movs_r},
3044 {DEC_NEG_R, dec_neg_r},
3045 {DEC_MOVE_R, dec_move_r},
3047 {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
3048 {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
3050 {DEC_MULS_R, dec_muls_r},
3051 {DEC_MULU_R, dec_mulu_r},
3053 {DEC_ADDU_M, dec_addu_m},
3054 {DEC_ADDS_M, dec_adds_m},
3055 {DEC_SUBU_M, dec_subu_m},
3056 {DEC_SUBS_M, dec_subs_m},
3058 {DEC_CMPU_M, dec_cmpu_m},
3059 {DEC_CMPS_M, dec_cmps_m},
3060 {DEC_MOVU_M, dec_movu_m},
3061 {DEC_MOVS_M, dec_movs_m},
3063 {DEC_CMP_M, dec_cmp_m},
3064 {DEC_ADDO_M, dec_addo_m},
3065 {DEC_BOUND_M, dec_bound_m},
3066 {DEC_ADD_M, dec_add_m},
3067 {DEC_SUB_M, dec_sub_m},
3068 {DEC_AND_M, dec_and_m},
3069 {DEC_OR_M, dec_or_m},
3070 {DEC_MOVE_RM, dec_move_rm},
3071 {DEC_TEST_M, dec_test_m},
3072 {DEC_MOVE_MR, dec_move_mr},
3074 {{0, 0}, dec_null}
3077 static unsigned int crisv32_decoder(DisasContext *dc)
3079 int insn_len = 2;
3080 int i;
3082 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3083 tcg_gen_debug_insn_start(dc->pc);
3085 /* Load a halfword onto the instruction register. */
3086 dc->ir = cris_fetch(dc, dc->pc, 2, 0);
3088 /* Now decode it. */
3089 dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
3090 dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
3091 dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
3092 dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
3093 dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
3094 dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
3096 /* Large switch for all insns. */
3097 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
3098 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits)
3100 insn_len = decinfo[i].dec(dc);
3101 break;
3105 #if !defined(CONFIG_USER_ONLY)
3106 /* Single-stepping ? */
3107 if (dc->tb_flags & S_FLAG) {
3108 int l1;
3110 l1 = gen_new_label();
3111 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
3112 /* We treat SPC as a break with an odd trap vector. */
3113 cris_evaluate_flags (dc);
3114 t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
3115 tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
3116 tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
3117 t_gen_raise_exception(EXCP_BREAK);
3118 gen_set_label(l1);
3120 #endif
3121 return insn_len;
3124 static void check_breakpoint(CPUState *env, DisasContext *dc)
3126 CPUBreakpoint *bp;
3128 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3129 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3130 if (bp->pc == dc->pc) {
3131 cris_evaluate_flags (dc);
3132 tcg_gen_movi_tl(env_pc, dc->pc);
3133 t_gen_raise_exception(EXCP_DEBUG);
3134 dc->is_jmp = DISAS_UPDATE;
3140 #include "translate_v10.c"
3143 * Delay slots on QEMU/CRIS.
3145 * If an exception hits on a delayslot, the core will let ERP (the Exception
3146 * Return Pointer) point to the branch (the previous) insn and set the lsb to
3147 * to give SW a hint that the exception actually hit on the dslot.
3149 * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
3150 * the core and any jmp to an odd addresses will mask off that lsb. It is
3151 * simply there to let sw know there was an exception on a dslot.
3153 * When the software returns from an exception, the branch will re-execute.
3154 * On QEMU care needs to be taken when a branch+delayslot sequence is broken
3155 * and the branch and delayslot dont share pages.
3157 * The TB contaning the branch insn will set up env->btarget and evaluate
3158 * env->btaken. When the translation loop exits we will note that the branch
3159 * sequence is broken and let env->dslot be the size of the branch insn (those
3160 * vary in length).
3162 * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
3163 * set). It will also expect to have env->dslot setup with the size of the
3164 * delay slot so that env->pc - env->dslot point to the branch insn. This TB
3165 * will execute the dslot and take the branch, either to btarget or just one
3166 * insn ahead.
3168 * When exceptions occur, we check for env->dslot in do_interrupt to detect
3169 * broken branch sequences and setup $erp accordingly (i.e let it point to the
3170 * branch and set lsb). Then env->dslot gets cleared so that the exception
3171 * handler can enter. When returning from exceptions (jump $erp) the lsb gets
3172 * masked off and we will reexecute the branch insn.
3176 /* generate intermediate code for basic block 'tb'. */
3177 static void
3178 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
3179 int search_pc)
3181 uint16_t *gen_opc_end;
3182 uint32_t pc_start;
3183 unsigned int insn_len;
3184 int j, lj;
3185 struct DisasContext ctx;
3186 struct DisasContext *dc = &ctx;
3187 uint32_t next_page_start;
3188 target_ulong npc;
3189 int num_insns;
3190 int max_insns;
3192 qemu_log_try_set_file(stderr);
3194 if (env->pregs[PR_VR] == 32) {
3195 dc->decoder = crisv32_decoder;
3196 dc->clear_locked_irq = 0;
3197 } else {
3198 dc->decoder = crisv10_decoder;
3199 dc->clear_locked_irq = 1;
3202 /* Odd PC indicates that branch is rexecuting due to exception in the
3203 * delayslot, like in real hw.
3205 pc_start = tb->pc & ~1;
3206 dc->env = env;
3207 dc->tb = tb;
3209 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3211 dc->is_jmp = DISAS_NEXT;
3212 dc->ppc = pc_start;
3213 dc->pc = pc_start;
3214 dc->singlestep_enabled = env->singlestep_enabled;
3215 dc->flags_uptodate = 1;
3216 dc->flagx_known = 1;
3217 dc->flags_x = tb->flags & X_FLAG;
3218 dc->cc_x_uptodate = 0;
3219 dc->cc_mask = 0;
3220 dc->update_cc = 0;
3221 dc->clear_prefix = 0;
3223 cris_update_cc_op(dc, CC_OP_FLAGS, 4);
3224 dc->cc_size_uptodate = -1;
3226 /* Decode TB flags. */
3227 dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
3228 | X_FLAG | PFIX_FLAG);
3229 dc->delayed_branch = !!(tb->flags & 7);
3230 if (dc->delayed_branch)
3231 dc->jmp = JMP_INDIRECT;
3232 else
3233 dc->jmp = JMP_NOJMP;
3235 dc->cpustate_changed = 0;
3237 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3238 qemu_log(
3239 "srch=%d pc=%x %x flg=%" PRIx64 " bt=%x ds=%u ccs=%x\n"
3240 "pid=%x usp=%x\n"
3241 "%x.%x.%x.%x\n"
3242 "%x.%x.%x.%x\n"
3243 "%x.%x.%x.%x\n"
3244 "%x.%x.%x.%x\n",
3245 search_pc, dc->pc, dc->ppc,
3246 (uint64_t)tb->flags,
3247 env->btarget, (unsigned)tb->flags & 7,
3248 env->pregs[PR_CCS],
3249 env->pregs[PR_PID], env->pregs[PR_USP],
3250 env->regs[0], env->regs[1], env->regs[2], env->regs[3],
3251 env->regs[4], env->regs[5], env->regs[6], env->regs[7],
3252 env->regs[8], env->regs[9],
3253 env->regs[10], env->regs[11],
3254 env->regs[12], env->regs[13],
3255 env->regs[14], env->regs[15]);
3256 qemu_log("--------------\n");
3257 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3260 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3261 lj = -1;
3262 num_insns = 0;
3263 max_insns = tb->cflags & CF_COUNT_MASK;
3264 if (max_insns == 0)
3265 max_insns = CF_COUNT_MASK;
3267 gen_icount_start();
3270 check_breakpoint(env, dc);
3272 if (search_pc) {
3273 j = gen_opc_ptr - gen_opc_buf;
3274 if (lj < j) {
3275 lj++;
3276 while (lj < j)
3277 gen_opc_instr_start[lj++] = 0;
3279 if (dc->delayed_branch == 1)
3280 gen_opc_pc[lj] = dc->ppc | 1;
3281 else
3282 gen_opc_pc[lj] = dc->pc;
3283 gen_opc_instr_start[lj] = 1;
3284 gen_opc_icount[lj] = num_insns;
3287 /* Pretty disas. */
3288 LOG_DIS("%8.8x:\t", dc->pc);
3290 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3291 gen_io_start();
3292 dc->clear_x = 1;
3294 insn_len = dc->decoder(dc);
3295 dc->ppc = dc->pc;
3296 dc->pc += insn_len;
3297 if (dc->clear_x)
3298 cris_clear_x_flag(dc);
3300 num_insns++;
3301 /* Check for delayed branches here. If we do it before
3302 actually generating any host code, the simulator will just
3303 loop doing nothing for on this program location. */
3304 if (dc->delayed_branch) {
3305 dc->delayed_branch--;
3306 if (dc->delayed_branch == 0)
3308 if (tb->flags & 7)
3309 t_gen_mov_env_TN(dslot,
3310 tcg_const_tl(0));
3311 if (dc->cpustate_changed || !dc->flagx_known
3312 || (dc->flags_x != (tb->flags & X_FLAG))) {
3313 cris_store_direct_jmp(dc);
3316 if (dc->clear_locked_irq) {
3317 dc->clear_locked_irq = 0;
3318 t_gen_mov_env_TN(locked_irq,
3319 tcg_const_tl(0));
3322 if (dc->jmp == JMP_DIRECT_CC) {
3323 int l1;
3325 l1 = gen_new_label();
3326 cris_evaluate_flags(dc);
3328 /* Conditional jmp. */
3329 tcg_gen_brcondi_tl(TCG_COND_EQ,
3330 env_btaken, 0, l1);
3331 gen_goto_tb(dc, 1, dc->jmp_pc);
3332 gen_set_label(l1);
3333 gen_goto_tb(dc, 0, dc->pc);
3334 dc->is_jmp = DISAS_TB_JUMP;
3335 dc->jmp = JMP_NOJMP;
3336 } else if (dc->jmp == JMP_DIRECT) {
3337 cris_evaluate_flags(dc);
3338 gen_goto_tb(dc, 0, dc->jmp_pc);
3339 dc->is_jmp = DISAS_TB_JUMP;
3340 dc->jmp = JMP_NOJMP;
3341 } else {
3342 t_gen_cc_jmp(env_btarget,
3343 tcg_const_tl(dc->pc));
3344 dc->is_jmp = DISAS_JUMP;
3346 break;
3350 /* If we are rexecuting a branch due to exceptions on
3351 delay slots dont break. */
3352 if (!(tb->pc & 1) && env->singlestep_enabled)
3353 break;
3354 } while (!dc->is_jmp && !dc->cpustate_changed
3355 && gen_opc_ptr < gen_opc_end
3356 && !singlestep
3357 && (dc->pc < next_page_start)
3358 && num_insns < max_insns);
3360 if (dc->clear_locked_irq)
3361 t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
3363 npc = dc->pc;
3365 if (tb->cflags & CF_LAST_IO)
3366 gen_io_end();
3367 /* Force an update if the per-tb cpu state has changed. */
3368 if (dc->is_jmp == DISAS_NEXT
3369 && (dc->cpustate_changed || !dc->flagx_known
3370 || (dc->flags_x != (tb->flags & X_FLAG)))) {
3371 dc->is_jmp = DISAS_UPDATE;
3372 tcg_gen_movi_tl(env_pc, npc);
3374 /* Broken branch+delayslot sequence. */
3375 if (dc->delayed_branch == 1) {
3376 /* Set env->dslot to the size of the branch insn. */
3377 t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
3378 cris_store_direct_jmp(dc);
3381 cris_evaluate_flags (dc);
3383 if (unlikely(env->singlestep_enabled)) {
3384 if (dc->is_jmp == DISAS_NEXT)
3385 tcg_gen_movi_tl(env_pc, npc);
3386 t_gen_raise_exception(EXCP_DEBUG);
3387 } else {
3388 switch(dc->is_jmp) {
3389 case DISAS_NEXT:
3390 gen_goto_tb(dc, 1, npc);
3391 break;
3392 default:
3393 case DISAS_JUMP:
3394 case DISAS_UPDATE:
3395 /* indicate that the hash table must be used
3396 to find the next TB */
3397 tcg_gen_exit_tb(0);
3398 break;
3399 case DISAS_SWI:
3400 case DISAS_TB_JUMP:
3401 /* nothing more to generate */
3402 break;
3405 gen_icount_end(tb, num_insns);
3406 *gen_opc_ptr = INDEX_op_end;
3407 if (search_pc) {
3408 j = gen_opc_ptr - gen_opc_buf;
3409 lj++;
3410 while (lj <= j)
3411 gen_opc_instr_start[lj++] = 0;
3412 } else {
3413 tb->size = dc->pc - pc_start;
3414 tb->icount = num_insns;
3417 #ifdef DEBUG_DISAS
3418 #if !DISAS_CRIS
3419 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3420 log_target_disas(pc_start, dc->pc - pc_start,
3421 dc->env->pregs[PR_VR]);
3422 qemu_log("\nisize=%d osize=%td\n",
3423 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
3425 #endif
3426 #endif
3429 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3431 gen_intermediate_code_internal(env, tb, 0);
3434 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3436 gen_intermediate_code_internal(env, tb, 1);
3439 void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
3440 int flags)
3442 int i;
3443 uint32_t srs;
3445 if (!env || !f)
3446 return;
3448 cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
3449 "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
3450 env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
3451 env->cc_op,
3452 env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
3455 for (i = 0; i < 16; i++) {
3456 cpu_fprintf(f, "%s=%8.8x ",regnames[i], env->regs[i]);
3457 if ((i + 1) % 4 == 0)
3458 cpu_fprintf(f, "\n");
3460 cpu_fprintf(f, "\nspecial regs:\n");
3461 for (i = 0; i < 16; i++) {
3462 cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
3463 if ((i + 1) % 4 == 0)
3464 cpu_fprintf(f, "\n");
3466 srs = env->pregs[PR_SRS];
3467 cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
3468 if (srs < 256) {
3469 for (i = 0; i < 16; i++) {
3470 cpu_fprintf(f, "s%2.2d=%8.8x ",
3471 i, env->sregs[srs][i]);
3472 if ((i + 1) % 4 == 0)
3473 cpu_fprintf(f, "\n");
3476 cpu_fprintf(f, "\n\n");
3480 struct
3482 uint32_t vr;
3483 const char *name;
3484 } cris_cores[] = {
3485 {8, "crisv8"},
3486 {9, "crisv9"},
3487 {10, "crisv10"},
3488 {11, "crisv11"},
3489 {32, "crisv32"},
3492 void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3494 unsigned int i;
3496 (*cpu_fprintf)(f, "Available CPUs:\n");
3497 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3498 (*cpu_fprintf)(f, " %s\n", cris_cores[i].name);
3502 static uint32_t vr_by_name(const char *name)
3504 unsigned int i;
3505 for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
3506 if (strcmp(name, cris_cores[i].name) == 0) {
3507 return cris_cores[i].vr;
3510 return 32;
3513 CPUCRISState *cpu_cris_init (const char *cpu_model)
3515 CPUCRISState *env;
3516 static int tcg_initialized = 0;
3517 int i;
3519 env = qemu_mallocz(sizeof(CPUCRISState));
3521 env->pregs[PR_VR] = vr_by_name(cpu_model);
3522 cpu_exec_init(env);
3523 cpu_reset(env);
3524 qemu_init_vcpu(env);
3526 if (tcg_initialized)
3527 return env;
3529 tcg_initialized = 1;
3531 #define GEN_HELPER 2
3532 #include "helper.h"
3534 if (env->pregs[PR_VR] < 32) {
3535 cpu_crisv10_init(env);
3536 return env;
3540 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
3541 cc_x = tcg_global_mem_new(TCG_AREG0,
3542 offsetof(CPUState, cc_x), "cc_x");
3543 cc_src = tcg_global_mem_new(TCG_AREG0,
3544 offsetof(CPUState, cc_src), "cc_src");
3545 cc_dest = tcg_global_mem_new(TCG_AREG0,
3546 offsetof(CPUState, cc_dest),
3547 "cc_dest");
3548 cc_result = tcg_global_mem_new(TCG_AREG0,
3549 offsetof(CPUState, cc_result),
3550 "cc_result");
3551 cc_op = tcg_global_mem_new(TCG_AREG0,
3552 offsetof(CPUState, cc_op), "cc_op");
3553 cc_size = tcg_global_mem_new(TCG_AREG0,
3554 offsetof(CPUState, cc_size),
3555 "cc_size");
3556 cc_mask = tcg_global_mem_new(TCG_AREG0,
3557 offsetof(CPUState, cc_mask),
3558 "cc_mask");
3560 env_pc = tcg_global_mem_new(TCG_AREG0,
3561 offsetof(CPUState, pc),
3562 "pc");
3563 env_btarget = tcg_global_mem_new(TCG_AREG0,
3564 offsetof(CPUState, btarget),
3565 "btarget");
3566 env_btaken = tcg_global_mem_new(TCG_AREG0,
3567 offsetof(CPUState, btaken),
3568 "btaken");
3569 for (i = 0; i < 16; i++) {
3570 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
3571 offsetof(CPUState, regs[i]),
3572 regnames[i]);
3574 for (i = 0; i < 16; i++) {
3575 cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
3576 offsetof(CPUState, pregs[i]),
3577 pregnames[i]);
3580 return env;
3583 void cpu_reset (CPUCRISState *env)
3585 uint32_t vr;
3587 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
3588 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
3589 log_cpu_state(env, 0);
3592 vr = env->pregs[PR_VR];
3593 memset(env, 0, offsetof(CPUCRISState, breakpoints));
3594 env->pregs[PR_VR] = vr;
3595 tlb_flush(env, 1);
3597 #if defined(CONFIG_USER_ONLY)
3598 /* start in user mode with interrupts enabled. */
3599 env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
3600 #else
3601 cris_mmu_init(env);
3602 env->pregs[PR_CCS] = 0;
3603 #endif
3606 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
3608 env->pc = gen_opc_pc[pc_pos];