2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
51 /* Current rounding mode for this TB. */
53 /* Current flush-to-zero setting for this TB. */
57 /* Return values from translate_one, indicating the state of the TB.
58 Note that zero indicates that we are not exiting the TB. */
63 /* We have emitted one or more goto_tb. No fixup required. */
66 /* We are not using a goto_tb (for whatever reason), but have updated
67 the PC (for whatever reason), so there's no need to do it again on
71 /* We are exiting the TB, but have neither emitted a goto_tb, nor
72 updated the PC for the next instruction to be executed. */
75 /* We are ending the TB with a noreturn function call, e.g. longjmp.
76 No following code will be executed. */
80 /* global register indexes */
81 static TCGv_ptr cpu_env
;
82 static TCGv cpu_ir
[31];
83 static TCGv cpu_fir
[31];
85 static TCGv cpu_lock_addr
;
86 static TCGv cpu_lock_st_addr
;
87 static TCGv cpu_lock_value
;
88 static TCGv cpu_unique
;
89 #ifndef CONFIG_USER_ONLY
90 static TCGv cpu_sysval
;
95 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
97 #include "gen-icount.h"
99 static void alpha_translate_init(void)
103 static int done_init
= 0;
108 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
111 for (i
= 0; i
< 31; i
++) {
112 sprintf(p
, "ir%d", i
);
113 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
114 offsetof(CPUState
, ir
[i
]), p
);
115 p
+= (i
< 10) ? 4 : 5;
117 sprintf(p
, "fir%d", i
);
118 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
119 offsetof(CPUState
, fir
[i
]), p
);
120 p
+= (i
< 10) ? 5 : 6;
123 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
124 offsetof(CPUState
, pc
), "pc");
126 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
127 offsetof(CPUState
, lock_addr
),
129 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
130 offsetof(CPUState
, lock_st_addr
),
132 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
133 offsetof(CPUState
, lock_value
),
136 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
137 offsetof(CPUState
, unique
), "unique");
138 #ifndef CONFIG_USER_ONLY
139 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUState
, sysval
), "sysval");
141 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
142 offsetof(CPUState
, usp
), "usp");
145 /* register helpers */
152 static void gen_excp_1(int exception
, int error_code
)
156 tmp1
= tcg_const_i32(exception
);
157 tmp2
= tcg_const_i32(error_code
);
158 gen_helper_excp(tmp1
, tmp2
);
159 tcg_temp_free_i32(tmp2
);
160 tcg_temp_free_i32(tmp1
);
163 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
165 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
166 gen_excp_1(exception
, error_code
);
167 return EXIT_NORETURN
;
170 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
172 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
175 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
177 TCGv tmp
= tcg_temp_new();
178 TCGv_i32 tmp32
= tcg_temp_new_i32();
179 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
180 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
181 gen_helper_memory_to_f(t0
, tmp32
);
182 tcg_temp_free_i32(tmp32
);
186 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
188 TCGv tmp
= tcg_temp_new();
189 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
190 gen_helper_memory_to_g(t0
, tmp
);
194 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
196 TCGv tmp
= tcg_temp_new();
197 TCGv_i32 tmp32
= tcg_temp_new_i32();
198 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
199 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
200 gen_helper_memory_to_s(t0
, tmp32
);
201 tcg_temp_free_i32(tmp32
);
205 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
207 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
208 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
209 tcg_gen_mov_i64(cpu_lock_value
, t0
);
212 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
214 tcg_gen_qemu_ld64(t0
, t1
, flags
);
215 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
216 tcg_gen_mov_i64(cpu_lock_value
, t0
);
219 static inline void gen_load_mem(DisasContext
*ctx
,
220 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
222 int ra
, int rb
, int32_t disp16
, int fp
,
227 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
228 prefetches, which we can treat as nops. No worries about
229 missed exceptions here. */
230 if (unlikely(ra
== 31)) {
234 addr
= tcg_temp_new();
236 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
238 tcg_gen_andi_i64(addr
, addr
, ~0x7);
244 tcg_gen_movi_i64(addr
, disp16
);
247 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
248 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
253 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
255 TCGv_i32 tmp32
= tcg_temp_new_i32();
256 TCGv tmp
= tcg_temp_new();
257 gen_helper_f_to_memory(tmp32
, t0
);
258 tcg_gen_extu_i32_i64(tmp
, tmp32
);
259 tcg_gen_qemu_st32(tmp
, t1
, flags
);
261 tcg_temp_free_i32(tmp32
);
264 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
266 TCGv tmp
= tcg_temp_new();
267 gen_helper_g_to_memory(tmp
, t0
);
268 tcg_gen_qemu_st64(tmp
, t1
, flags
);
272 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
274 TCGv_i32 tmp32
= tcg_temp_new_i32();
275 TCGv tmp
= tcg_temp_new();
276 gen_helper_s_to_memory(tmp32
, t0
);
277 tcg_gen_extu_i32_i64(tmp
, tmp32
);
278 tcg_gen_qemu_st32(tmp
, t1
, flags
);
280 tcg_temp_free_i32(tmp32
);
283 static inline void gen_store_mem(DisasContext
*ctx
,
284 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
286 int ra
, int rb
, int32_t disp16
, int fp
,
291 addr
= tcg_temp_new();
293 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
295 tcg_gen_andi_i64(addr
, addr
, ~0x7);
301 tcg_gen_movi_i64(addr
, disp16
);
305 va
= tcg_const_i64(0);
307 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
309 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
317 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
318 int32_t disp16
, int quad
)
323 /* ??? Don't bother storing anything. The user can't tell
324 the difference, since the zero register always reads zero. */
328 #if defined(CONFIG_USER_ONLY)
329 addr
= cpu_lock_st_addr
;
331 addr
= tcg_temp_local_new();
335 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
337 tcg_gen_movi_i64(addr
, disp16
);
340 #if defined(CONFIG_USER_ONLY)
341 /* ??? This is handled via a complicated version of compare-and-swap
342 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
343 in TCG so that this isn't necessary. */
344 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
346 /* ??? In system mode we are never multi-threaded, so CAS can be
347 implemented via a non-atomic load-compare-store sequence. */
349 int lab_fail
, lab_done
;
352 lab_fail
= gen_new_label();
353 lab_done
= gen_new_label();
354 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
356 val
= tcg_temp_new();
358 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
360 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
362 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
365 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
367 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
369 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
370 tcg_gen_br(lab_done
);
372 gen_set_label(lab_fail
);
373 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
375 gen_set_label(lab_done
);
376 tcg_gen_movi_i64(cpu_lock_addr
, -1);
384 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
386 /* Check for the dest on the same page as the start of the TB. We
387 also want to suppress goto_tb in the case of single-steping and IO. */
388 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
389 && !ctx
->env
->singlestep_enabled
390 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
393 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
395 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
398 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
401 /* Notice branch-to-next; used to initialize RA with the PC. */
404 } else if (use_goto_tb(ctx
, dest
)) {
406 tcg_gen_movi_i64(cpu_pc
, dest
);
407 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
410 tcg_gen_movi_i64(cpu_pc
, dest
);
411 return EXIT_PC_UPDATED
;
415 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
416 TCGv cmp
, int32_t disp
)
418 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
419 int lab_true
= gen_new_label();
421 if (use_goto_tb(ctx
, dest
)) {
422 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
425 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
426 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
428 gen_set_label(lab_true
);
430 tcg_gen_movi_i64(cpu_pc
, dest
);
431 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
435 int lab_over
= gen_new_label();
437 /* ??? Consider using either
440 movcond pc, cond, 0, tmp, pc
447 The current diamond subgraph surely isn't efficient. */
449 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
450 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
451 tcg_gen_br(lab_over
);
452 gen_set_label(lab_true
);
453 tcg_gen_movi_i64(cpu_pc
, dest
);
454 gen_set_label(lab_over
);
456 return EXIT_PC_UPDATED
;
460 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
461 int32_t disp
, int mask
)
465 if (unlikely(ra
== 31)) {
466 cmp_tmp
= tcg_const_i64(0);
468 cmp_tmp
= tcg_temp_new();
470 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
472 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
476 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
479 /* Fold -0.0 for comparison with COND. */
481 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
483 uint64_t mzero
= 1ull << 63;
488 /* For <= or >, the -0.0 value directly compares the way we want. */
489 tcg_gen_mov_i64(dest
, src
);
494 /* For == or !=, we can simply mask off the sign bit and compare. */
495 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
500 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
501 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
502 tcg_gen_neg_i64(dest
, dest
);
503 tcg_gen_and_i64(dest
, dest
, src
);
511 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
516 if (unlikely(ra
== 31)) {
517 /* Very uncommon case, but easier to optimize it to an integer
518 comparison than continuing with the floating point comparison. */
519 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
522 cmp_tmp
= tcg_temp_new();
523 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
524 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
527 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
528 int islit
, uint8_t lit
, int mask
)
530 TCGCond inv_cond
= tcg_invert_cond(cond
);
533 if (unlikely(rc
== 31))
536 l1
= gen_new_label();
540 TCGv tmp
= tcg_temp_new();
541 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
542 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
545 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
547 /* Very uncommon case - Do not bother to optimize. */
548 TCGv tmp
= tcg_const_i64(0);
549 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
554 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
556 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
560 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
565 if (unlikely(rc
== 31)) {
569 cmp_tmp
= tcg_temp_new();
570 if (unlikely(ra
== 31)) {
571 tcg_gen_movi_i64(cmp_tmp
, 0);
573 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
576 l1
= gen_new_label();
577 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
578 tcg_temp_free(cmp_tmp
);
581 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
583 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
587 #define QUAL_RM_N 0x080 /* Round mode nearest even */
588 #define QUAL_RM_C 0x000 /* Round mode chopped */
589 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
590 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
591 #define QUAL_RM_MASK 0x0c0
593 #define QUAL_U 0x100 /* Underflow enable (fp output) */
594 #define QUAL_V 0x100 /* Overflow enable (int output) */
595 #define QUAL_S 0x400 /* Software completion enable */
596 #define QUAL_I 0x200 /* Inexact detection enable */
598 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
602 fn11
&= QUAL_RM_MASK
;
603 if (fn11
== ctx
->tb_rm
) {
608 tmp
= tcg_temp_new_i32();
611 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
614 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
617 tcg_gen_movi_i32(tmp
, float_round_down
);
620 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
624 #if defined(CONFIG_SOFTFLOAT_INLINE)
625 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
626 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
627 sets the one field. */
628 tcg_gen_st8_i32(tmp
, cpu_env
,
629 offsetof(CPUState
, fp_status
.float_rounding_mode
));
631 gen_helper_setroundmode(tmp
);
634 tcg_temp_free_i32(tmp
);
637 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
642 if (fn11
== ctx
->tb_ftz
) {
647 tmp
= tcg_temp_new_i32();
649 /* Underflow is enabled, use the FPCR setting. */
650 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
652 /* Underflow is disabled, force flush-to-zero. */
653 tcg_gen_movi_i32(tmp
, 1);
656 #if defined(CONFIG_SOFTFLOAT_INLINE)
657 tcg_gen_st8_i32(tmp
, cpu_env
,
658 offsetof(CPUState
, fp_status
.flush_to_zero
));
660 gen_helper_setflushzero(tmp
);
663 tcg_temp_free_i32(tmp
);
666 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
668 TCGv val
= tcg_temp_new();
670 tcg_gen_movi_i64(val
, 0);
671 } else if (fn11
& QUAL_S
) {
672 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
674 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
676 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
681 static void gen_fp_exc_clear(void)
683 #if defined(CONFIG_SOFTFLOAT_INLINE)
684 TCGv_i32 zero
= tcg_const_i32(0);
685 tcg_gen_st8_i32(zero
, cpu_env
,
686 offsetof(CPUState
, fp_status
.float_exception_flags
));
687 tcg_temp_free_i32(zero
);
689 gen_helper_fp_exc_clear();
693 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
695 /* ??? We ought to be able to do something with imprecise exceptions.
696 E.g. notice we're still in the trap shadow of something within the
697 TB and do not generate the code to signal the exception; end the TB
698 when an exception is forced to arrive, either by consumption of a
699 register value or TRAPB or EXCB. */
700 TCGv_i32 exc
= tcg_temp_new_i32();
703 #if defined(CONFIG_SOFTFLOAT_INLINE)
704 tcg_gen_ld8u_i32(exc
, cpu_env
,
705 offsetof(CPUState
, fp_status
.float_exception_flags
));
707 gen_helper_fp_exc_get(exc
);
711 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
714 /* ??? Pass in the regno of the destination so that the helper can
715 set EXC_MASK, which contains a bitmask of destination registers
716 that have caused arithmetic traps. A simple userspace emulation
717 does not require this. We do need it for a guest kernel's entArith,
718 or if we were to do something clever with imprecise exceptions. */
719 reg
= tcg_const_i32(rc
+ 32);
722 gen_helper_fp_exc_raise_s(exc
, reg
);
724 gen_helper_fp_exc_raise(exc
, reg
);
727 tcg_temp_free_i32(reg
);
728 tcg_temp_free_i32(exc
);
731 static inline void gen_fp_exc_raise(int rc
, int fn11
)
733 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
736 static void gen_fcvtlq(int rb
, int rc
)
738 if (unlikely(rc
== 31)) {
741 if (unlikely(rb
== 31)) {
742 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
744 TCGv tmp
= tcg_temp_new();
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
749 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
750 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
752 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
758 static void gen_fcvtql(int rb
, int rc
)
760 if (unlikely(rc
== 31)) {
763 if (unlikely(rb
== 31)) {
764 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
766 TCGv tmp
= tcg_temp_new();
768 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
769 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
770 tcg_gen_shli_i64(tmp
, tmp
, 32);
771 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
772 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
778 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
781 int lab
= gen_new_label();
782 TCGv tmp
= tcg_temp_new();
784 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
785 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
786 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
793 #define FARITH2(name) \
794 static inline void glue(gen_f, name)(int rb, int rc) \
796 if (unlikely(rc == 31)) { \
800 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
802 TCGv tmp = tcg_const_i64(0); \
803 gen_helper_ ## name (cpu_fir[rc], tmp); \
804 tcg_temp_free(tmp); \
808 /* ??? VAX instruction qualifiers ignored. */
816 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
817 int rb
, int rc
, int fn11
)
821 /* ??? This is wrong: the instruction is not a nop, it still may
823 if (unlikely(rc
== 31)) {
827 gen_qual_roundmode(ctx
, fn11
);
828 gen_qual_flushzero(ctx
, fn11
);
831 vb
= gen_ieee_input(rb
, fn11
, 0);
832 helper(cpu_fir
[rc
], vb
);
835 gen_fp_exc_raise(rc
, fn11
);
838 #define IEEE_ARITH2(name) \
839 static inline void glue(gen_f, name)(DisasContext *ctx, \
840 int rb, int rc, int fn11) \
842 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
849 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
854 /* ??? This is wrong: the instruction is not a nop, it still may
856 if (unlikely(rc
== 31)) {
860 /* No need to set flushzero, since we have an integer output. */
862 vb
= gen_ieee_input(rb
, fn11
, 0);
864 /* Almost all integer conversions use cropped rounding, and most
865 also do not have integer overflow enabled. Special case that. */
868 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
870 case QUAL_V
| QUAL_RM_C
:
871 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
872 ignore
= float_flag_inexact
;
874 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
875 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
878 gen_qual_roundmode(ctx
, fn11
);
879 gen_helper_cvttq(cpu_fir
[rc
], vb
);
880 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
881 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
886 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
889 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
890 int rb
, int rc
, int fn11
)
894 /* ??? This is wrong: the instruction is not a nop, it still may
896 if (unlikely(rc
== 31)) {
900 gen_qual_roundmode(ctx
, fn11
);
903 vb
= tcg_const_i64(0);
908 /* The only exception that can be raised by integer conversion
909 is inexact. Thus we only need to worry about exceptions when
910 inexact handling is requested. */
913 helper(cpu_fir
[rc
], vb
);
914 gen_fp_exc_raise(rc
, fn11
);
916 helper(cpu_fir
[rc
], vb
);
924 #define IEEE_INTCVT(name) \
925 static inline void glue(gen_f, name)(DisasContext *ctx, \
926 int rb, int rc, int fn11) \
928 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
933 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
938 if (unlikely(rc
== 31)) {
942 vmask
= tcg_const_i64(mask
);
952 va
= tcg_temp_new_i64();
953 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
955 tcg_gen_andc_i64(va
, vmask
, va
);
957 tcg_gen_and_i64(va
, va
, vmask
);
965 vb
= tcg_temp_new_i64();
966 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
969 switch (za
<< 1 | zb
) {
971 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
974 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
977 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
980 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
984 tcg_temp_free(vmask
);
993 static inline void gen_fcpys(int ra
, int rb
, int rc
)
995 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
998 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
1000 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1003 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1005 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1008 #define FARITH3(name) \
1009 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1013 if (unlikely(rc == 31)) { \
1017 va = tcg_const_i64(0); \
1022 vb = tcg_const_i64(0); \
1027 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1030 tcg_temp_free(va); \
1033 tcg_temp_free(vb); \
1037 /* ??? VAX instruction qualifiers ignored. */
1050 static void gen_ieee_arith3(DisasContext
*ctx
,
1051 void (*helper
)(TCGv
, TCGv
, TCGv
),
1052 int ra
, int rb
, int rc
, int fn11
)
1056 /* ??? This is wrong: the instruction is not a nop, it still may
1057 raise exceptions. */
1058 if (unlikely(rc
== 31)) {
1062 gen_qual_roundmode(ctx
, fn11
);
1063 gen_qual_flushzero(ctx
, fn11
);
1066 va
= gen_ieee_input(ra
, fn11
, 0);
1067 vb
= gen_ieee_input(rb
, fn11
, 0);
1068 helper(cpu_fir
[rc
], va
, vb
);
1072 gen_fp_exc_raise(rc
, fn11
);
1075 #define IEEE_ARITH3(name) \
1076 static inline void glue(gen_f, name)(DisasContext *ctx, \
1077 int ra, int rb, int rc, int fn11) \
1079 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1090 static void gen_ieee_compare(DisasContext
*ctx
,
1091 void (*helper
)(TCGv
, TCGv
, TCGv
),
1092 int ra
, int rb
, int rc
, int fn11
)
1096 /* ??? This is wrong: the instruction is not a nop, it still may
1097 raise exceptions. */
1098 if (unlikely(rc
== 31)) {
1104 va
= gen_ieee_input(ra
, fn11
, 1);
1105 vb
= gen_ieee_input(rb
, fn11
, 1);
1106 helper(cpu_fir
[rc
], va
, vb
);
1110 gen_fp_exc_raise(rc
, fn11
);
1113 #define IEEE_CMP3(name) \
1114 static inline void glue(gen_f, name)(DisasContext *ctx, \
1115 int ra, int rb, int rc, int fn11) \
1117 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1124 static inline uint64_t zapnot_mask(uint8_t lit
)
1129 for (i
= 0; i
< 8; ++i
) {
1131 mask
|= 0xffull
<< (i
* 8);
1136 /* Implement zapnot with an immediate operand, which expands to some
1137 form of immediate AND. This is a basic building block in the
1138 definition of many of the other byte manipulation instructions. */
1139 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1143 tcg_gen_movi_i64(dest
, 0);
1146 tcg_gen_ext8u_i64(dest
, src
);
1149 tcg_gen_ext16u_i64(dest
, src
);
1152 tcg_gen_ext32u_i64(dest
, src
);
1155 tcg_gen_mov_i64(dest
, src
);
1158 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1163 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1165 if (unlikely(rc
== 31))
1167 else if (unlikely(ra
== 31))
1168 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1170 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1172 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1175 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1177 if (unlikely(rc
== 31))
1179 else if (unlikely(ra
== 31))
1180 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1182 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1184 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1188 /* EXTWH, EXTLH, EXTQH */
1189 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1190 uint8_t lit
, uint8_t byte_mask
)
1192 if (unlikely(rc
== 31))
1194 else if (unlikely(ra
== 31))
1195 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1198 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1199 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1201 TCGv tmp1
= tcg_temp_new();
1202 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1203 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1204 tcg_gen_neg_i64(tmp1
, tmp1
);
1205 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1206 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1207 tcg_temp_free(tmp1
);
1209 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1213 /* EXTBL, EXTWL, EXTLL, EXTQL */
1214 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1215 uint8_t lit
, uint8_t byte_mask
)
1217 if (unlikely(rc
== 31))
1219 else if (unlikely(ra
== 31))
1220 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1223 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1225 TCGv tmp
= tcg_temp_new();
1226 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1227 tcg_gen_shli_i64(tmp
, tmp
, 3);
1228 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1231 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1235 /* INSWH, INSLH, INSQH */
1236 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1237 uint8_t lit
, uint8_t byte_mask
)
1239 if (unlikely(rc
== 31))
1241 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1242 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1244 TCGv tmp
= tcg_temp_new();
1246 /* The instruction description has us left-shift the byte mask
1247 and extract bits <15:8> and apply that zap at the end. This
1248 is equivalent to simply performing the zap first and shifting
1250 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1253 /* Note that we have handled the lit==0 case above. */
1254 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1256 TCGv shift
= tcg_temp_new();
1258 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259 Do this portably by splitting the shift into two parts:
1260 shift_count-1 and 1. Arrange for the -1 by using
1261 ones-complement instead of twos-complement in the negation:
1262 ~((B & 7) * 8) & 63. */
1264 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1265 tcg_gen_shli_i64(shift
, shift
, 3);
1266 tcg_gen_not_i64(shift
, shift
);
1267 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1269 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1270 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1271 tcg_temp_free(shift
);
1277 /* INSBL, INSWL, INSLL, INSQL */
1278 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1279 uint8_t lit
, uint8_t byte_mask
)
1281 if (unlikely(rc
== 31))
1283 else if (unlikely(ra
== 31))
1284 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1286 TCGv tmp
= tcg_temp_new();
1288 /* The instruction description has us left-shift the byte mask
1289 the same number of byte slots as the data and apply the zap
1290 at the end. This is equivalent to simply performing the zap
1291 first and shifting afterward. */
1292 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1295 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1297 TCGv shift
= tcg_temp_new();
1298 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1299 tcg_gen_shli_i64(shift
, shift
, 3);
1300 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1301 tcg_temp_free(shift
);
1307 /* MSKWH, MSKLH, MSKQH */
1308 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1309 uint8_t lit
, uint8_t byte_mask
)
1311 if (unlikely(rc
== 31))
1313 else if (unlikely(ra
== 31))
1314 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1316 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1318 TCGv shift
= tcg_temp_new();
1319 TCGv mask
= tcg_temp_new();
1321 /* The instruction description is as above, where the byte_mask
1322 is shifted left, and then we extract bits <15:8>. This can be
1323 emulated with a right-shift on the expanded byte mask. This
1324 requires extra care because for an input <2:0> == 0 we need a
1325 shift of 64 bits in order to generate a zero. This is done by
1326 splitting the shift into two parts, the variable shift - 1
1327 followed by a constant 1 shift. The code we expand below is
1328 equivalent to ~((B & 7) * 8) & 63. */
1330 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1331 tcg_gen_shli_i64(shift
, shift
, 3);
1332 tcg_gen_not_i64(shift
, shift
);
1333 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1334 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1335 tcg_gen_shr_i64(mask
, mask
, shift
);
1336 tcg_gen_shri_i64(mask
, mask
, 1);
1338 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1340 tcg_temp_free(mask
);
1341 tcg_temp_free(shift
);
1345 /* MSKBL, MSKWL, MSKLL, MSKQL */
1346 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1347 uint8_t lit
, uint8_t byte_mask
)
1349 if (unlikely(rc
== 31))
1351 else if (unlikely(ra
== 31))
1352 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1354 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1356 TCGv shift
= tcg_temp_new();
1357 TCGv mask
= tcg_temp_new();
1359 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1360 tcg_gen_shli_i64(shift
, shift
, 3);
1361 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1362 tcg_gen_shl_i64(mask
, mask
, shift
);
1364 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1366 tcg_temp_free(mask
);
1367 tcg_temp_free(shift
);
1371 /* Code to call arith3 helpers */
1372 #define ARITH3(name) \
1373 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1376 if (unlikely(rc == 31)) \
1381 TCGv tmp = tcg_const_i64(lit); \
1382 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1383 tcg_temp_free(tmp); \
1385 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1387 TCGv tmp1 = tcg_const_i64(0); \
1389 TCGv tmp2 = tcg_const_i64(lit); \
1390 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1391 tcg_temp_free(tmp2); \
1393 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1394 tcg_temp_free(tmp1); \
1415 #define MVIOP2(name) \
1416 static inline void glue(gen_, name)(int rb, int rc) \
1418 if (unlikely(rc == 31)) \
1420 if (unlikely(rb == 31)) \
1421 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1423 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1430 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1431 int islit
, uint8_t lit
)
1435 if (unlikely(rc
== 31)) {
1440 va
= tcg_const_i64(0);
1445 vb
= tcg_const_i64(lit
);
1450 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1460 static void gen_rx(int ra
, int set
)
1465 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUState
, intr_flag
));
1468 tmp
= tcg_const_i32(set
);
1469 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUState
, intr_flag
));
1470 tcg_temp_free_i32(tmp
);
1473 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1475 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1476 to internal cpu registers. */
1478 /* Unprivileged PAL call */
1479 if (palcode
>= 0x80 && palcode
< 0xC0) {
1483 /* No-op inside QEMU. */
1487 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1491 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1494 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1499 #ifndef CONFIG_USER_ONLY
1500 /* Privileged PAL code */
1501 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1505 /* No-op inside QEMU. */
1509 /* No-op inside QEMU. */
1513 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUState
, vptptr
));
1517 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1521 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1528 /* Note that we already know we're in kernel mode, so we know
1529 that PS only contains the 3 IPL bits. */
1530 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1532 /* But make sure and store only the 3 IPL bits from the user. */
1533 tmp
= tcg_temp_new();
1534 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1535 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUState
, ps
));
1542 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1546 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1550 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1554 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1555 offsetof(CPUState
, cpu_index
));
1559 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1565 return gen_invalid(ctx
);
1568 #ifndef CONFIG_USER_ONLY
1570 #define PR_BYTE 0x100000
1571 #define PR_LONG 0x200000
1573 static int cpu_pr_data(int pr
)
1576 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1577 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1578 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1579 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1580 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1581 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1582 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1583 case 7: return offsetof(CPUAlphaState
, palbr
);
1584 case 8: return offsetof(CPUAlphaState
, ptbr
);
1585 case 9: return offsetof(CPUAlphaState
, vptptr
);
1586 case 10: return offsetof(CPUAlphaState
, unique
);
1587 case 11: return offsetof(CPUAlphaState
, sysval
);
1588 case 12: return offsetof(CPUAlphaState
, usp
);
1591 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1593 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1598 static void gen_mfpr(int ra
, int regno
)
1600 int data
= cpu_pr_data(regno
);
1602 /* In our emulated PALcode, these processor registers have no
1603 side effects from reading. */
1608 /* The basic registers are data only, and unknown registers
1609 are read-zero, write-ignore. */
1611 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1612 } else if (data
& PR_BYTE
) {
1613 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1614 } else if (data
& PR_LONG
) {
1615 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1617 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1621 static void gen_mtpr(int rb
, int regno
)
1626 tmp
= tcg_const_i64(0);
1631 /* These two register numbers perform a TLB cache flush. Thankfully we
1632 can only do this inside PALmode, which means that the current basic
1633 block cannot be affected by the change in mappings. */
1637 } else if (regno
== 254) {
1639 gen_helper_tbis(tmp
);
1641 /* The basic registers are data only, and unknown registers
1642 are read-zero, write-ignore. */
1643 int data
= cpu_pr_data(regno
);
1645 if (data
& PR_BYTE
) {
1646 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1647 } else if (data
& PR_LONG
) {
1648 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1650 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1659 #endif /* !USER_ONLY*/
1661 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1664 int32_t disp21
, disp16
, disp12
;
1666 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, fn2
, islit
, real_islit
;
1670 /* Decode all instruction fields */
1672 ra
= (insn
>> 21) & 0x1F;
1673 rb
= (insn
>> 16) & 0x1F;
1675 real_islit
= islit
= (insn
>> 12) & 1;
1676 if (rb
== 31 && !islit
) {
1680 lit
= (insn
>> 13) & 0xFF;
1681 palcode
= insn
& 0x03FFFFFF;
1682 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1683 disp16
= (int16_t)(insn
& 0x0000FFFF);
1684 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1685 fn11
= (insn
>> 5) & 0x000007FF;
1687 fn7
= (insn
>> 5) & 0x0000007F;
1688 fn2
= (insn
>> 5) & 0x00000003;
1689 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1690 opc
, ra
, rb
, rc
, disp16
);
1696 ret
= gen_call_pal(ctx
, palcode
);
1721 if (likely(ra
!= 31)) {
1723 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1725 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1730 if (likely(ra
!= 31)) {
1732 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1734 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1739 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1740 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1746 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1750 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1751 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1757 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1761 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1765 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1771 if (likely(rc
!= 31)) {
1774 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1775 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1777 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1778 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1782 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1784 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1790 if (likely(rc
!= 31)) {
1792 TCGv tmp
= tcg_temp_new();
1793 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1795 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1797 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1798 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1802 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1804 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1810 if (likely(rc
!= 31)) {
1813 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1815 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1816 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1819 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1821 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1822 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1828 if (likely(rc
!= 31)) {
1830 TCGv tmp
= tcg_temp_new();
1831 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1833 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1835 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1836 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1840 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1842 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1843 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1850 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1854 if (likely(rc
!= 31)) {
1856 TCGv tmp
= tcg_temp_new();
1857 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1859 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1861 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1862 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1866 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1868 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1874 if (likely(rc
!= 31)) {
1876 TCGv tmp
= tcg_temp_new();
1877 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1879 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1881 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1882 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1886 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1888 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1889 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1896 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1900 if (likely(rc
!= 31)) {
1903 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1905 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1908 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1910 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1916 if (likely(rc
!= 31)) {
1918 TCGv tmp
= tcg_temp_new();
1919 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1921 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1923 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1927 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1929 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1935 if (likely(rc
!= 31)) {
1938 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1940 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1943 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1945 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1951 if (likely(rc
!= 31)) {
1953 TCGv tmp
= tcg_temp_new();
1954 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1956 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1958 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1962 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1964 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1970 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1974 if (likely(rc
!= 31)) {
1976 TCGv tmp
= tcg_temp_new();
1977 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1979 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1981 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1985 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1987 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1993 if (likely(rc
!= 31)) {
1995 TCGv tmp
= tcg_temp_new();
1996 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1998 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2000 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2004 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2006 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2012 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2016 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2020 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2024 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2028 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2032 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2036 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2046 if (likely(rc
!= 31)) {
2048 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2050 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2052 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2057 if (likely(rc
!= 31)) {
2060 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2062 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2064 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2069 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2073 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2077 if (likely(rc
!= 31)) {
2080 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2082 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2085 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2087 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2093 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2097 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2101 if (likely(rc
!= 31)) {
2104 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2106 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2109 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2111 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2117 if (likely(rc
!= 31)) {
2120 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2122 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2125 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2127 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2133 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2137 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2141 if (likely(rc
!= 31)) {
2144 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2146 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2149 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2151 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2157 if (likely(rc
!= 31)) {
2158 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2161 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2163 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2169 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2173 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2178 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2188 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2192 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2196 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2200 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2204 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2208 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2212 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2216 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2220 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2224 gen_zap(ra
, rb
, rc
, islit
, lit
);
2228 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2232 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2236 if (likely(rc
!= 31)) {
2239 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2241 TCGv shift
= tcg_temp_new();
2242 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2243 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2244 tcg_temp_free(shift
);
2247 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2252 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2256 if (likely(rc
!= 31)) {
2259 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2261 TCGv shift
= tcg_temp_new();
2262 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2263 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2264 tcg_temp_free(shift
);
2267 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2272 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2276 if (likely(rc
!= 31)) {
2279 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2281 TCGv shift
= tcg_temp_new();
2282 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2283 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2284 tcg_temp_free(shift
);
2287 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2292 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2296 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2300 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2304 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2308 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2312 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2316 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2320 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2324 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2334 if (likely(rc
!= 31)) {
2336 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2339 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2341 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2342 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2348 if (likely(rc
!= 31)) {
2350 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2352 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2354 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2359 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2363 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2367 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2374 switch (fpfn
) { /* fn11 & 0x3F */
2377 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2380 if (likely(rc
!= 31)) {
2382 TCGv_i32 tmp
= tcg_temp_new_i32();
2383 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2384 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2385 tcg_temp_free_i32(tmp
);
2387 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2392 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2399 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2400 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2406 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2409 if (likely(rc
!= 31)) {
2411 TCGv_i32 tmp
= tcg_temp_new_i32();
2412 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2413 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2414 tcg_temp_free_i32(tmp
);
2416 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2421 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2424 if (likely(rc
!= 31)) {
2426 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2428 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2433 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2440 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2441 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2450 /* VAX floating point */
2451 /* XXX: rounding mode and trap are ignored (!) */
2452 switch (fpfn
) { /* fn11 & 0x3F */
2455 gen_faddf(ra
, rb
, rc
);
2459 gen_fsubf(ra
, rb
, rc
);
2463 gen_fmulf(ra
, rb
, rc
);
2467 gen_fdivf(ra
, rb
, rc
);
2479 gen_faddg(ra
, rb
, rc
);
2483 gen_fsubg(ra
, rb
, rc
);
2487 gen_fmulg(ra
, rb
, rc
);
2491 gen_fdivg(ra
, rb
, rc
);
2495 gen_fcmpgeq(ra
, rb
, rc
);
2499 gen_fcmpglt(ra
, rb
, rc
);
2503 gen_fcmpgle(ra
, rb
, rc
);
2534 /* IEEE floating-point */
2535 switch (fpfn
) { /* fn11 & 0x3F */
2538 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2542 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2546 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2550 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2554 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2558 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2562 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2566 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2570 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2574 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2578 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2582 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2585 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2587 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2590 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2595 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2599 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2603 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2616 if (likely(rc
!= 31)) {
2620 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2622 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2625 gen_fcpys(ra
, rb
, rc
);
2631 gen_fcpysn(ra
, rb
, rc
);
2635 gen_fcpyse(ra
, rb
, rc
);
2639 if (likely(ra
!= 31))
2640 gen_helper_store_fpcr(cpu_fir
[ra
]);
2642 TCGv tmp
= tcg_const_i64(0);
2643 gen_helper_store_fpcr(tmp
);
2649 if (likely(ra
!= 31))
2650 gen_helper_load_fpcr(cpu_fir
[ra
]);
2654 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2658 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2662 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2666 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2670 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2674 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2684 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2685 /v doesn't do. The only thing I can think is that /sv is a
2686 valid instruction merely for completeness in the ISA. */
2687 gen_fcvtql_v(ctx
, rb
, rc
);
2694 switch ((uint16_t)disp16
) {
2722 gen_helper_load_pcc(cpu_ir
[ra
]);
2744 /* HW_MFPR (PALcode) */
2745 #ifndef CONFIG_USER_ONLY
2746 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2747 gen_mfpr(ra
, insn
& 0xffff);
2753 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2754 prediction stack action, which of course we don't implement. */
2756 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2758 tcg_gen_movi_i64(cpu_pc
, 0);
2761 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2763 ret
= EXIT_PC_UPDATED
;
2766 /* HW_LD (PALcode) */
2767 #ifndef CONFIG_USER_ONLY
2768 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2775 addr
= tcg_temp_new();
2777 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2779 tcg_gen_movi_i64(addr
, disp12
);
2780 switch ((insn
>> 12) & 0xF) {
2782 /* Longword physical access (hw_ldl/p) */
2783 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2786 /* Quadword physical access (hw_ldq/p) */
2787 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2790 /* Longword physical access with lock (hw_ldl_l/p) */
2791 gen_helper_ldl_l_phys(cpu_ir
[ra
], addr
);
2794 /* Quadword physical access with lock (hw_ldq_l/p) */
2795 gen_helper_ldq_l_phys(cpu_ir
[ra
], addr
);
2798 /* Longword virtual PTE fetch (hw_ldl/v) */
2801 /* Quadword virtual PTE fetch (hw_ldq/v) */
2805 /* Incpu_ir[ra]id */
2808 /* Incpu_ir[ra]id */
2811 /* Longword virtual access (hw_ldl) */
2814 /* Quadword virtual access (hw_ldq) */
2817 /* Longword virtual access with protection check (hw_ldl/w) */
2818 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2821 /* Quadword virtual access with protection check (hw_ldq/w) */
2822 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2825 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2828 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2831 /* Longword virtual access with alternate access mode and
2832 protection checks (hw_ldl/wa) */
2833 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2836 /* Quadword virtual access with alternate access mode and
2837 protection checks (hw_ldq/wa) */
2838 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2841 tcg_temp_free(addr
);
2850 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2853 if (likely(rc
!= 31)) {
2855 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2857 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2862 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2863 if (likely(rc
!= 31)) {
2865 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2867 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2875 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2876 if (likely(rc
!= 31)) {
2878 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2880 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2888 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2889 gen_perr(ra
, rb
, rc
, islit
, lit
);
2895 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2896 if (likely(rc
!= 31)) {
2898 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2900 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2908 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2909 if (likely(rc
!= 31)) {
2911 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2913 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2921 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2922 if (real_islit
|| ra
!= 31) {
2931 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2932 if (real_islit
|| ra
!= 31) {
2941 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2942 if (real_islit
|| ra
!= 31) {
2951 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2952 if (real_islit
|| ra
!= 31) {
2961 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2962 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
2968 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2969 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
2975 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2976 gen_minub8(ra
, rb
, rc
, islit
, lit
);
2982 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2983 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
2989 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2990 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
2996 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2997 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3003 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3004 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3010 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3011 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3017 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3020 if (likely(rc
!= 31)) {
3022 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3024 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3029 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3033 TCGv_i32 tmp1
= tcg_temp_new_i32();
3035 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3037 TCGv tmp2
= tcg_const_i64(0);
3038 gen_helper_s_to_memory(tmp1
, tmp2
);
3039 tcg_temp_free(tmp2
);
3041 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3042 tcg_temp_free_i32(tmp1
);
3050 /* HW_MTPR (PALcode) */
3051 #ifndef CONFIG_USER_ONLY
3052 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3053 gen_mtpr(rb
, insn
& 0xffff);
3059 /* HW_RET (PALcode) */
3060 #ifndef CONFIG_USER_ONLY
3061 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3063 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3064 address from EXC_ADDR. This turns out to be useful for our
3065 emulation PALcode, so continue to accept it. */
3066 TCGv tmp
= tcg_temp_new();
3067 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUState
, exc_addr
));
3068 gen_helper_hw_ret(tmp
);
3071 gen_helper_hw_ret(cpu_ir
[rb
]);
3073 ret
= EXIT_PC_UPDATED
;
3079 /* HW_ST (PALcode) */
3080 #ifndef CONFIG_USER_ONLY
3081 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3083 addr
= tcg_temp_new();
3085 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3087 tcg_gen_movi_i64(addr
, disp12
);
3091 val
= tcg_temp_new();
3092 tcg_gen_movi_i64(val
, 0);
3094 switch ((insn
>> 12) & 0xF) {
3096 /* Longword physical access */
3097 gen_helper_stl_phys(addr
, val
);
3100 /* Quadword physical access */
3101 gen_helper_stq_phys(addr
, val
);
3104 /* Longword physical access with lock */
3105 gen_helper_stl_c_phys(val
, addr
, val
);
3108 /* Quadword physical access with lock */
3109 gen_helper_stq_c_phys(val
, addr
, val
);
3112 /* Longword virtual access */
3115 /* Quadword virtual access */
3136 /* Longword virtual access with alternate access mode */
3139 /* Quadword virtual access with alternate access mode */
3150 tcg_temp_free(addr
);
3157 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3161 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3165 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3169 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3173 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3177 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3181 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3185 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3189 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3193 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3197 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3201 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3205 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3209 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3213 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3217 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3221 ret
= gen_bdirect(ctx
, ra
, disp21
);
3223 case 0x31: /* FBEQ */
3224 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3226 case 0x32: /* FBLT */
3227 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3229 case 0x33: /* FBLE */
3230 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3234 ret
= gen_bdirect(ctx
, ra
, disp21
);
3236 case 0x35: /* FBNE */
3237 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3239 case 0x36: /* FBGE */
3240 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3242 case 0x37: /* FBGT */
3243 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3247 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3251 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3255 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3259 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3263 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3267 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3271 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3275 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3278 ret
= gen_invalid(ctx
);
3285 static inline void gen_intermediate_code_internal(CPUState
*env
,
3286 TranslationBlock
*tb
,
3289 DisasContext ctx
, *ctxp
= &ctx
;
3290 target_ulong pc_start
;
3292 uint16_t *gen_opc_end
;
3300 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3305 ctx
.mem_idx
= cpu_mmu_index(env
);
3307 /* ??? Every TB begins with unset rounding mode, to be initialized on
3308 the first fp insn of the TB. Alternately we could define a proper
3309 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3310 to reset the FP_STATUS to that default at the end of any TB that
3311 changes the default. We could even (gasp) dynamiclly figure out
3312 what default would be most efficient given the running program. */
3314 /* Similarly for flush-to-zero. */
3318 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3320 max_insns
= CF_COUNT_MASK
;
3324 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3325 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3326 if (bp
->pc
== ctx
.pc
) {
3327 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3333 j
= gen_opc_ptr
- gen_opc_buf
;
3337 gen_opc_instr_start
[lj
++] = 0;
3339 gen_opc_pc
[lj
] = ctx
.pc
;
3340 gen_opc_instr_start
[lj
] = 1;
3341 gen_opc_icount
[lj
] = num_insns
;
3343 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3345 insn
= ldl_code(ctx
.pc
);
3348 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3349 tcg_gen_debug_insn_start(ctx
.pc
);
3353 ret
= translate_one(ctxp
, insn
);
3355 /* If we reach a page boundary, are single stepping,
3356 or exhaust instruction count, stop generation. */
3358 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3359 || gen_opc_ptr
>= gen_opc_end
3360 || num_insns
>= max_insns
3362 || env
->singlestep_enabled
)) {
3363 ret
= EXIT_PC_STALE
;
3365 } while (ret
== NO_EXIT
);
3367 if (tb
->cflags
& CF_LAST_IO
) {
3376 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3378 case EXIT_PC_UPDATED
:
3379 if (env
->singlestep_enabled
) {
3380 gen_excp_1(EXCP_DEBUG
, 0);
3389 gen_icount_end(tb
, num_insns
);
3390 *gen_opc_ptr
= INDEX_op_end
;
3392 j
= gen_opc_ptr
- gen_opc_buf
;
3395 gen_opc_instr_start
[lj
++] = 0;
3397 tb
->size
= ctx
.pc
- pc_start
;
3398 tb
->icount
= num_insns
;
3402 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3403 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3404 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3410 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3412 gen_intermediate_code_internal(env
, tb
, 0);
3415 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3417 gen_intermediate_code_internal(env
, tb
, 1);
3425 static const struct cpu_def_t cpu_defs
[] = {
3426 { "ev4", IMPLVER_2106x
, 0 },
3427 { "ev5", IMPLVER_21164
, 0 },
3428 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3429 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3430 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3431 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3432 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3433 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3434 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3435 { "21064", IMPLVER_2106x
, 0 },
3436 { "21164", IMPLVER_21164
, 0 },
3437 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3438 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3439 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3440 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3441 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3444 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3447 int implver
, amask
, i
, max
;
3449 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3451 alpha_translate_init();
3454 /* Default to ev67; no reason not to emulate insns by default. */
3455 implver
= IMPLVER_21264
;
3456 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3457 | AMASK_TRAP
| AMASK_PREFETCH
);
3459 max
= ARRAY_SIZE(cpu_defs
);
3460 for (i
= 0; i
< max
; i
++) {
3461 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3462 implver
= cpu_defs
[i
].implver
;
3463 amask
= cpu_defs
[i
].amask
;
3467 env
->implver
= implver
;
3470 #if defined (CONFIG_USER_ONLY)
3471 env
->ps
= PS_USER_MODE
;
3472 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3473 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3475 env
->lock_addr
= -1;
3478 qemu_init_vcpu(env
);
3482 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3484 env
->pc
= gen_opc_pc
[pc_pos
];