2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 DisasContextBase base
;
48 #ifndef CONFIG_USER_ONLY
54 /* implver and amask values for this CPU. */
58 /* Current rounding mode for this TB. */
60 /* Current flush-to-zero setting for this TB. */
63 /* The set of registers active in the current context. */
66 /* Temporaries for $31 and $f31 as source and destination. */
71 /* Target-specific return values from translate_one, indicating the
72 state of the TB. Note that DISAS_NEXT indicates that we are not
74 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
75 #define DISAS_PC_UPDATED DISAS_TARGET_1
76 #define DISAS_PC_STALE DISAS_TARGET_2
78 /* global register indexes */
79 static TCGv cpu_std_ir
[31];
80 static TCGv cpu_fir
[31];
82 static TCGv cpu_lock_addr
;
83 static TCGv cpu_lock_value
;
85 #ifndef CONFIG_USER_ONLY
86 static TCGv cpu_pal_ir
[31];
89 #include "exec/gen-icount.h"
91 void alpha_translate_init(void)
93 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
95 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
96 static const GlobalVar vars
[] = {
104 /* Use the symbolic register names that match the disassembler. */
105 static const char greg_names
[31][4] = {
106 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
107 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
108 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
109 "t10", "t11", "ra", "t12", "at", "gp", "sp"
111 static const char freg_names
[31][4] = {
112 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
113 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
114 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
115 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
117 #ifndef CONFIG_USER_ONLY
118 static const char shadow_names
[8][8] = {
119 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
120 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
126 for (i
= 0; i
< 31; i
++) {
127 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
128 offsetof(CPUAlphaState
, ir
[i
]),
132 for (i
= 0; i
< 31; i
++) {
133 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
134 offsetof(CPUAlphaState
, fir
[i
]),
138 #ifndef CONFIG_USER_ONLY
139 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
140 for (i
= 0; i
< 8; i
++) {
141 int r
= (i
== 7 ? 25 : i
+ 8);
142 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
143 offsetof(CPUAlphaState
,
149 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
150 const GlobalVar
*v
= &vars
[i
];
151 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
155 static TCGv
load_zero(DisasContext
*ctx
)
158 ctx
->zero
= tcg_constant_i64(0);
163 static TCGv
dest_sink(DisasContext
*ctx
)
166 ctx
->sink
= tcg_temp_new();
171 static void free_context_temps(DisasContext
*ctx
)
174 tcg_gen_discard_i64(ctx
->sink
);
175 tcg_temp_free(ctx
->sink
);
180 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
182 if (likely(reg
< 31)) {
185 return load_zero(ctx
);
189 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
190 uint8_t lit
, bool islit
)
193 return tcg_constant_i64(lit
);
194 } else if (likely(reg
< 31)) {
197 return load_zero(ctx
);
201 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
203 if (likely(reg
< 31)) {
206 return dest_sink(ctx
);
210 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
212 if (likely(reg
< 31)) {
215 return load_zero(ctx
);
219 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
221 if (likely(reg
< 31)) {
224 return dest_sink(ctx
);
228 static int get_flag_ofs(unsigned shift
)
230 int ofs
= offsetof(CPUAlphaState
, flags
);
231 #ifdef HOST_WORDS_BIGENDIAN
232 ofs
+= 3 - (shift
/ 8);
239 static void ld_flag_byte(TCGv val
, unsigned shift
)
241 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
244 static void st_flag_byte(TCGv val
, unsigned shift
)
246 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
249 static void gen_excp_1(int exception
, int error_code
)
253 tmp1
= tcg_constant_i32(exception
);
254 tmp2
= tcg_constant_i32(error_code
);
255 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
258 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
260 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
261 gen_excp_1(exception
, error_code
);
262 return DISAS_NORETURN
;
265 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
267 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
270 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
272 TCGv_i32 tmp32
= tcg_temp_new_i32();
273 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
274 gen_helper_memory_to_f(t0
, tmp32
);
275 tcg_temp_free_i32(tmp32
);
278 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
280 TCGv tmp
= tcg_temp_new();
281 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
282 gen_helper_memory_to_g(t0
, tmp
);
286 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
288 TCGv_i32 tmp32
= tcg_temp_new_i32();
289 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
290 gen_helper_memory_to_s(t0
, tmp32
);
291 tcg_temp_free_i32(tmp32
);
294 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
296 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
297 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
298 tcg_gen_mov_i64(cpu_lock_value
, t0
);
301 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
303 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
304 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
305 tcg_gen_mov_i64(cpu_lock_value
, t0
);
308 static inline void gen_load_mem(DisasContext
*ctx
,
309 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
311 int ra
, int rb
, int32_t disp16
, bool fp
,
316 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
317 prefetches, which we can treat as nops. No worries about
318 missed exceptions here. */
319 if (unlikely(ra
== 31)) {
323 tmp
= tcg_temp_new();
324 addr
= load_gpr(ctx
, rb
);
327 tcg_gen_addi_i64(tmp
, addr
, disp16
);
331 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
335 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
336 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
341 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
343 TCGv_i32 tmp32
= tcg_temp_new_i32();
344 gen_helper_f_to_memory(tmp32
, t0
);
345 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
346 tcg_temp_free_i32(tmp32
);
349 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
351 TCGv tmp
= tcg_temp_new();
352 gen_helper_g_to_memory(tmp
, t0
);
353 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
357 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
359 TCGv_i32 tmp32
= tcg_temp_new_i32();
360 gen_helper_s_to_memory(tmp32
, t0
);
361 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
362 tcg_temp_free_i32(tmp32
);
365 static inline void gen_store_mem(DisasContext
*ctx
,
366 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
368 int ra
, int rb
, int32_t disp16
, bool fp
,
373 tmp
= tcg_temp_new();
374 addr
= load_gpr(ctx
, rb
);
377 tcg_gen_addi_i64(tmp
, addr
, disp16
);
381 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
385 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
386 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
391 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
392 int32_t disp16
, int mem_idx
,
395 TCGLabel
*lab_fail
, *lab_done
;
398 addr
= tcg_temp_new_i64();
399 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
400 free_context_temps(ctx
);
402 lab_fail
= gen_new_label();
403 lab_done
= gen_new_label();
404 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
405 tcg_temp_free_i64(addr
);
407 val
= tcg_temp_new_i64();
408 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
409 load_gpr(ctx
, ra
), mem_idx
, op
);
410 free_context_temps(ctx
);
413 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
415 tcg_temp_free_i64(val
);
416 tcg_gen_br(lab_done
);
418 gen_set_label(lab_fail
);
420 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
423 gen_set_label(lab_done
);
424 tcg_gen_movi_i64(cpu_lock_addr
, -1);
428 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
430 return translator_use_goto_tb(&ctx
->base
, dest
);
433 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
435 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
438 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
441 /* Notice branch-to-next; used to initialize RA with the PC. */
444 } else if (use_goto_tb(ctx
, dest
)) {
446 tcg_gen_movi_i64(cpu_pc
, dest
);
447 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
448 return DISAS_NORETURN
;
450 tcg_gen_movi_i64(cpu_pc
, dest
);
451 return DISAS_PC_UPDATED
;
455 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
456 TCGv cmp
, int32_t disp
)
458 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
459 TCGLabel
*lab_true
= gen_new_label();
461 if (use_goto_tb(ctx
, dest
)) {
462 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
465 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
466 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
468 gen_set_label(lab_true
);
470 tcg_gen_movi_i64(cpu_pc
, dest
);
471 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
473 return DISAS_NORETURN
;
475 TCGv_i64 z
= load_zero(ctx
);
476 TCGv_i64 d
= tcg_constant_i64(dest
);
477 TCGv_i64 p
= tcg_constant_i64(ctx
->base
.pc_next
);
479 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
480 return DISAS_PC_UPDATED
;
484 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
485 int32_t disp
, int mask
)
488 TCGv tmp
= tcg_temp_new();
491 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
492 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
496 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
499 /* Fold -0.0 for comparison with COND. */
501 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
503 uint64_t mzero
= 1ull << 63;
508 /* For <= or >, the -0.0 value directly compares the way we want. */
509 tcg_gen_mov_i64(dest
, src
);
514 /* For == or !=, we can simply mask off the sign bit and compare. */
515 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
520 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
521 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
522 tcg_gen_neg_i64(dest
, dest
);
523 tcg_gen_and_i64(dest
, dest
, src
);
531 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
534 TCGv cmp_tmp
= tcg_temp_new();
537 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
538 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
539 tcg_temp_free(cmp_tmp
);
543 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
548 vb
= load_fpr(ctx
, rb
);
550 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
552 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
557 #define QUAL_RM_N 0x080 /* Round mode nearest even */
558 #define QUAL_RM_C 0x000 /* Round mode chopped */
559 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
560 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
561 #define QUAL_RM_MASK 0x0c0
563 #define QUAL_U 0x100 /* Underflow enable (fp output) */
564 #define QUAL_V 0x100 /* Overflow enable (int output) */
565 #define QUAL_S 0x400 /* Software completion enable */
566 #define QUAL_I 0x200 /* Inexact detection enable */
568 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
572 fn11
&= QUAL_RM_MASK
;
573 if (fn11
== ctx
->tb_rm
) {
578 tmp
= tcg_temp_new_i32();
581 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
584 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
587 tcg_gen_movi_i32(tmp
, float_round_down
);
590 tcg_gen_ld8u_i32(tmp
, cpu_env
,
591 offsetof(CPUAlphaState
, fpcr_dyn_round
));
595 #if defined(CONFIG_SOFTFLOAT_INLINE)
596 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
597 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
598 sets the one field. */
599 tcg_gen_st8_i32(tmp
, cpu_env
,
600 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
602 gen_helper_setroundmode(tmp
);
605 tcg_temp_free_i32(tmp
);
608 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
613 if (fn11
== ctx
->tb_ftz
) {
618 tmp
= tcg_temp_new_i32();
620 /* Underflow is enabled, use the FPCR setting. */
621 tcg_gen_ld8u_i32(tmp
, cpu_env
,
622 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
624 /* Underflow is disabled, force flush-to-zero. */
625 tcg_gen_movi_i32(tmp
, 1);
628 #if defined(CONFIG_SOFTFLOAT_INLINE)
629 tcg_gen_st8_i32(tmp
, cpu_env
,
630 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
632 gen_helper_setflushzero(tmp
);
635 tcg_temp_free_i32(tmp
);
638 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
642 if (unlikely(reg
== 31)) {
643 val
= load_zero(ctx
);
646 if ((fn11
& QUAL_S
) == 0) {
648 gen_helper_ieee_input_cmp(cpu_env
, val
);
650 gen_helper_ieee_input(cpu_env
, val
);
653 #ifndef CONFIG_USER_ONLY
654 /* In system mode, raise exceptions for denormals like real
655 hardware. In user mode, proceed as if the OS completion
656 handler is handling the denormal as per spec. */
657 gen_helper_ieee_input_s(cpu_env
, val
);
664 static void gen_fp_exc_raise(int rc
, int fn11
)
666 /* ??? We ought to be able to do something with imprecise exceptions.
667 E.g. notice we're still in the trap shadow of something within the
668 TB and do not generate the code to signal the exception; end the TB
669 when an exception is forced to arrive, either by consumption of a
670 register value or TRAPB or EXCB. */
674 if (!(fn11
& QUAL_U
)) {
675 /* Note that QUAL_U == QUAL_V, so ignore either. */
676 ignore
|= FPCR_UNF
| FPCR_IOV
;
678 if (!(fn11
& QUAL_I
)) {
681 ign
= tcg_constant_i32(ignore
);
683 /* ??? Pass in the regno of the destination so that the helper can
684 set EXC_MASK, which contains a bitmask of destination registers
685 that have caused arithmetic traps. A simple userspace emulation
686 does not require this. We do need it for a guest kernel's entArith,
687 or if we were to do something clever with imprecise exceptions. */
688 reg
= tcg_constant_i32(rc
+ 32);
690 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
692 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
696 static void gen_cvtlq(TCGv vc
, TCGv vb
)
698 TCGv tmp
= tcg_temp_new();
700 /* The arithmetic right shift here, plus the sign-extended mask below
701 yields a sign-extended result without an explicit ext32s_i64. */
702 tcg_gen_shri_i64(tmp
, vb
, 29);
703 tcg_gen_sari_i64(vc
, vb
, 32);
704 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
709 static void gen_ieee_arith2(DisasContext
*ctx
,
710 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
711 int rb
, int rc
, int fn11
)
715 gen_qual_roundmode(ctx
, fn11
);
716 gen_qual_flushzero(ctx
, fn11
);
718 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
719 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
721 gen_fp_exc_raise(rc
, fn11
);
724 #define IEEE_ARITH2(name) \
725 static inline void glue(gen_, name)(DisasContext *ctx, \
726 int rb, int rc, int fn11) \
728 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
735 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
739 /* No need to set flushzero, since we have an integer output. */
740 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
741 vc
= dest_fpr(ctx
, rc
);
743 /* Almost all integer conversions use cropped rounding;
744 special case that. */
745 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
746 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
748 gen_qual_roundmode(ctx
, fn11
);
749 gen_helper_cvttq(vc
, cpu_env
, vb
);
751 gen_fp_exc_raise(rc
, fn11
);
754 static void gen_ieee_intcvt(DisasContext
*ctx
,
755 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
756 int rb
, int rc
, int fn11
)
760 gen_qual_roundmode(ctx
, fn11
);
761 vb
= load_fpr(ctx
, rb
);
762 vc
= dest_fpr(ctx
, rc
);
764 /* The only exception that can be raised by integer conversion
765 is inexact. Thus we only need to worry about exceptions when
766 inexact handling is requested. */
768 helper(vc
, cpu_env
, vb
);
769 gen_fp_exc_raise(rc
, fn11
);
771 helper(vc
, cpu_env
, vb
);
775 #define IEEE_INTCVT(name) \
776 static inline void glue(gen_, name)(DisasContext *ctx, \
777 int rb, int rc, int fn11) \
779 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
784 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
786 TCGv vmask
= tcg_constant_i64(mask
);
787 TCGv tmp
= tcg_temp_new_i64();
790 tcg_gen_andc_i64(tmp
, vmask
, va
);
792 tcg_gen_and_i64(tmp
, va
, vmask
);
795 tcg_gen_andc_i64(vc
, vb
, vmask
);
796 tcg_gen_or_i64(vc
, vc
, tmp
);
801 static void gen_ieee_arith3(DisasContext
*ctx
,
802 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
803 int ra
, int rb
, int rc
, int fn11
)
807 gen_qual_roundmode(ctx
, fn11
);
808 gen_qual_flushzero(ctx
, fn11
);
810 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
811 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
812 vc
= dest_fpr(ctx
, rc
);
813 helper(vc
, cpu_env
, va
, vb
);
815 gen_fp_exc_raise(rc
, fn11
);
818 #define IEEE_ARITH3(name) \
819 static inline void glue(gen_, name)(DisasContext *ctx, \
820 int ra, int rb, int rc, int fn11) \
822 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
833 static void gen_ieee_compare(DisasContext
*ctx
,
834 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
835 int ra
, int rb
, int rc
, int fn11
)
839 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
840 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
841 vc
= dest_fpr(ctx
, rc
);
842 helper(vc
, cpu_env
, va
, vb
);
844 gen_fp_exc_raise(rc
, fn11
);
847 #define IEEE_CMP3(name) \
848 static inline void glue(gen_, name)(DisasContext *ctx, \
849 int ra, int rb, int rc, int fn11) \
851 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
858 static inline uint64_t zapnot_mask(uint8_t lit
)
863 for (i
= 0; i
< 8; ++i
) {
864 if ((lit
>> i
) & 1) {
865 mask
|= 0xffull
<< (i
* 8);
871 /* Implement zapnot with an immediate operand, which expands to some
872 form of immediate AND. This is a basic building block in the
873 definition of many of the other byte manipulation instructions. */
874 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
878 tcg_gen_movi_i64(dest
, 0);
881 tcg_gen_ext8u_i64(dest
, src
);
884 tcg_gen_ext16u_i64(dest
, src
);
887 tcg_gen_ext32u_i64(dest
, src
);
890 tcg_gen_mov_i64(dest
, src
);
893 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
898 /* EXTWH, EXTLH, EXTQH */
899 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
900 uint8_t lit
, uint8_t byte_mask
)
903 int pos
= (64 - lit
* 8) & 0x3f;
904 int len
= cto32(byte_mask
) * 8;
906 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
908 tcg_gen_movi_i64(vc
, 0);
911 TCGv tmp
= tcg_temp_new();
912 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
913 tcg_gen_neg_i64(tmp
, tmp
);
914 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
915 tcg_gen_shl_i64(vc
, va
, tmp
);
918 gen_zapnoti(vc
, vc
, byte_mask
);
921 /* EXTBL, EXTWL, EXTLL, EXTQL */
922 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
923 uint8_t lit
, uint8_t byte_mask
)
926 int pos
= (lit
& 7) * 8;
927 int len
= cto32(byte_mask
) * 8;
928 if (pos
+ len
>= 64) {
931 tcg_gen_extract_i64(vc
, va
, pos
, len
);
933 TCGv tmp
= tcg_temp_new();
934 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
935 tcg_gen_shli_i64(tmp
, tmp
, 3);
936 tcg_gen_shr_i64(vc
, va
, tmp
);
938 gen_zapnoti(vc
, vc
, byte_mask
);
942 /* INSWH, INSLH, INSQH */
943 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
944 uint8_t lit
, uint8_t byte_mask
)
947 int pos
= 64 - (lit
& 7) * 8;
948 int len
= cto32(byte_mask
) * 8;
950 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
952 tcg_gen_movi_i64(vc
, 0);
955 TCGv tmp
= tcg_temp_new();
956 TCGv shift
= tcg_temp_new();
958 /* The instruction description has us left-shift the byte mask
959 and extract bits <15:8> and apply that zap at the end. This
960 is equivalent to simply performing the zap first and shifting
962 gen_zapnoti(tmp
, va
, byte_mask
);
964 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
965 portably by splitting the shift into two parts: shift_count-1 and 1.
966 Arrange for the -1 by using ones-complement instead of
967 twos-complement in the negation: ~(B * 8) & 63. */
969 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
970 tcg_gen_not_i64(shift
, shift
);
971 tcg_gen_andi_i64(shift
, shift
, 0x3f);
973 tcg_gen_shr_i64(vc
, tmp
, shift
);
974 tcg_gen_shri_i64(vc
, vc
, 1);
975 tcg_temp_free(shift
);
980 /* INSBL, INSWL, INSLL, INSQL */
981 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
982 uint8_t lit
, uint8_t byte_mask
)
985 int pos
= (lit
& 7) * 8;
986 int len
= cto32(byte_mask
) * 8;
987 if (pos
+ len
> 64) {
990 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
992 TCGv tmp
= tcg_temp_new();
993 TCGv shift
= tcg_temp_new();
995 /* The instruction description has us left-shift the byte mask
996 and extract bits <15:8> and apply that zap at the end. This
997 is equivalent to simply performing the zap first and shifting
999 gen_zapnoti(tmp
, va
, byte_mask
);
1001 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1002 tcg_gen_shli_i64(shift
, shift
, 3);
1003 tcg_gen_shl_i64(vc
, tmp
, shift
);
1004 tcg_temp_free(shift
);
1009 /* MSKWH, MSKLH, MSKQH */
1010 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1011 uint8_t lit
, uint8_t byte_mask
)
1014 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1016 TCGv shift
= tcg_temp_new();
1017 TCGv mask
= tcg_temp_new();
1019 /* The instruction description is as above, where the byte_mask
1020 is shifted left, and then we extract bits <15:8>. This can be
1021 emulated with a right-shift on the expanded byte mask. This
1022 requires extra care because for an input <2:0> == 0 we need a
1023 shift of 64 bits in order to generate a zero. This is done by
1024 splitting the shift into two parts, the variable shift - 1
1025 followed by a constant 1 shift. The code we expand below is
1026 equivalent to ~(B * 8) & 63. */
1028 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1029 tcg_gen_not_i64(shift
, shift
);
1030 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1031 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1032 tcg_gen_shr_i64(mask
, mask
, shift
);
1033 tcg_gen_shri_i64(mask
, mask
, 1);
1035 tcg_gen_andc_i64(vc
, va
, mask
);
1037 tcg_temp_free(mask
);
1038 tcg_temp_free(shift
);
1042 /* MSKBL, MSKWL, MSKLL, MSKQL */
1043 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1044 uint8_t lit
, uint8_t byte_mask
)
1047 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1049 TCGv shift
= tcg_temp_new();
1050 TCGv mask
= tcg_temp_new();
1052 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1053 tcg_gen_shli_i64(shift
, shift
, 3);
1054 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1055 tcg_gen_shl_i64(mask
, mask
, shift
);
1057 tcg_gen_andc_i64(vc
, va
, mask
);
1059 tcg_temp_free(mask
);
1060 tcg_temp_free(shift
);
1064 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1067 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1070 st_flag_byte(tcg_constant_i64(set
), ENV_FLAG_RX_SHIFT
);
1073 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1075 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1076 to internal cpu registers. */
1078 /* Unprivileged PAL call */
1079 if (palcode
>= 0x80 && palcode
< 0xC0) {
1083 /* No-op inside QEMU. */
1087 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1088 offsetof(CPUAlphaState
, unique
));
1092 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1093 offsetof(CPUAlphaState
, unique
));
1102 #ifndef CONFIG_USER_ONLY
1103 /* Privileged PAL code */
1104 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1108 /* No-op inside QEMU. */
1112 /* No-op inside QEMU. */
1116 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1117 offsetof(CPUAlphaState
, vptptr
));
1121 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1122 offsetof(CPUAlphaState
, sysval
));
1126 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1127 offsetof(CPUAlphaState
, sysval
));
1132 /* Note that we already know we're in kernel mode, so we know
1133 that PS only contains the 3 IPL bits. */
1134 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1136 /* But make sure and store only the 3 IPL bits from the user. */
1138 TCGv tmp
= tcg_temp_new();
1139 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1140 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1144 /* Allow interrupts to be recognized right away. */
1145 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1146 return DISAS_PC_UPDATED_NOCHAIN
;
1150 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1155 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1156 offsetof(CPUAlphaState
, usp
));
1160 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1161 offsetof(CPUAlphaState
, usp
));
1165 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1166 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1171 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1172 -offsetof(AlphaCPU
, env
) +
1173 offsetof(CPUState
, halted
));
1174 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1175 return gen_excp(ctx
, EXCP_HALTED
, 0);
1184 return gen_invalid(ctx
);
1187 #ifdef CONFIG_USER_ONLY
1188 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1191 TCGv tmp
= tcg_temp_new();
1192 uint64_t exc_addr
= ctx
->base
.pc_next
;
1193 uint64_t entry
= ctx
->palbr
;
1195 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1198 tcg_gen_movi_i64(tmp
, 1);
1199 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1202 tcg_gen_movi_i64(tmp
, exc_addr
);
1203 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1206 entry
+= (palcode
& 0x80
1207 ? 0x2000 + (palcode
- 0x80) * 64
1208 : 0x1000 + palcode
* 64);
1210 tcg_gen_movi_i64(cpu_pc
, entry
);
1211 return DISAS_PC_UPDATED
;
1216 #ifndef CONFIG_USER_ONLY
1218 #define PR_LONG 0x200000
1220 static int cpu_pr_data(int pr
)
1223 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1224 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1225 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1226 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1227 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1228 case 7: return offsetof(CPUAlphaState
, palbr
);
1229 case 8: return offsetof(CPUAlphaState
, ptbr
);
1230 case 9: return offsetof(CPUAlphaState
, vptptr
);
1231 case 10: return offsetof(CPUAlphaState
, unique
);
1232 case 11: return offsetof(CPUAlphaState
, sysval
);
1233 case 12: return offsetof(CPUAlphaState
, usp
);
1236 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1239 return offsetof(CPUAlphaState
, alarm_expire
);
1244 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1246 void (*helper
)(TCGv
);
1251 /* Accessing the "non-shadow" general registers. */
1252 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1253 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1256 case 250: /* WALLTIME */
1257 helper
= gen_helper_get_walltime
;
1259 case 249: /* VMTIME */
1260 helper
= gen_helper_get_vmtime
;
1262 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1265 return DISAS_PC_STALE
;
1272 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1275 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1279 /* The basic registers are data only, and unknown registers
1280 are read-zero, write-ignore. */
1281 data
= cpu_pr_data(regno
);
1283 tcg_gen_movi_i64(va
, 0);
1284 } else if (data
& PR_LONG
) {
1285 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1287 tcg_gen_ld_i64(va
, cpu_env
, data
);
1295 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1298 DisasJumpType ret
= DISAS_NEXT
;
1303 gen_helper_tbia(cpu_env
);
1308 gen_helper_tbis(cpu_env
, vb
);
1313 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1314 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, halted
));
1315 return gen_excp(ctx
, EXCP_HALTED
, 0);
1319 gen_helper_halt(vb
);
1320 return DISAS_PC_STALE
;
1324 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1326 ret
= DISAS_PC_STALE
;
1328 gen_helper_set_alarm(cpu_env
, vb
);
1333 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1334 /* Changing the PAL base register implies un-chaining all of the TBs
1335 that ended with a CALL_PAL. Since the base register usually only
1336 changes during boot, flushing everything works well. */
1337 gen_helper_tb_flush(cpu_env
);
1338 return DISAS_PC_STALE
;
1341 /* Accessing the "non-shadow" general registers. */
1342 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1343 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1347 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1350 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1354 /* The basic registers are data only, and unknown registers
1355 are read-zero, write-ignore. */
1356 data
= cpu_pr_data(regno
);
1358 if (data
& PR_LONG
) {
1359 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1361 tcg_gen_st_i64(vb
, cpu_env
, data
);
1369 #endif /* !USER_ONLY*/
1371 #define REQUIRE_NO_LIT \
1378 #define REQUIRE_AMASK(FLAG) \
1380 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1385 #define REQUIRE_TB_FLAG(FLAG) \
1387 if ((ctx->tbflags & (FLAG)) == 0) { \
1392 #define REQUIRE_REG_31(WHICH) \
1394 if (WHICH != 31) { \
1399 #define REQUIRE_FEN \
1401 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1406 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1408 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1410 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1411 bool islit
, real_islit
;
1412 TCGv va
, vb
, vc
, tmp
, tmp2
;
1416 /* Decode all instruction fields */
1417 opc
= extract32(insn
, 26, 6);
1418 ra
= extract32(insn
, 21, 5);
1419 rb
= extract32(insn
, 16, 5);
1420 rc
= extract32(insn
, 0, 5);
1421 real_islit
= islit
= extract32(insn
, 12, 1);
1422 lit
= extract32(insn
, 13, 8);
1424 disp21
= sextract32(insn
, 0, 21);
1425 disp16
= sextract32(insn
, 0, 16);
1426 disp12
= sextract32(insn
, 0, 12);
1428 fn11
= extract32(insn
, 5, 11);
1429 fpfn
= extract32(insn
, 5, 6);
1430 fn7
= extract32(insn
, 5, 7);
1432 if (rb
== 31 && !islit
) {
1441 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1467 disp16
= (uint32_t)disp16
<< 16;
1471 va
= dest_gpr(ctx
, ra
);
1472 /* It's worth special-casing immediate loads. */
1474 tcg_gen_movi_i64(va
, disp16
);
1476 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1483 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1487 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1492 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1497 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1502 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1506 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1510 vc
= dest_gpr(ctx
, rc
);
1511 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1515 /* Special case ADDL as SEXTL. */
1516 tcg_gen_ext32s_i64(vc
, vb
);
1520 /* Special case SUBQ as NEGQ. */
1521 tcg_gen_neg_i64(vc
, vb
);
1526 va
= load_gpr(ctx
, ra
);
1530 tcg_gen_add_i64(vc
, va
, vb
);
1531 tcg_gen_ext32s_i64(vc
, vc
);
1535 tmp
= tcg_temp_new();
1536 tcg_gen_shli_i64(tmp
, va
, 2);
1537 tcg_gen_add_i64(tmp
, tmp
, vb
);
1538 tcg_gen_ext32s_i64(vc
, tmp
);
1543 tcg_gen_sub_i64(vc
, va
, vb
);
1544 tcg_gen_ext32s_i64(vc
, vc
);
1548 tmp
= tcg_temp_new();
1549 tcg_gen_shli_i64(tmp
, va
, 2);
1550 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1551 tcg_gen_ext32s_i64(vc
, tmp
);
1557 /* Special case 0 >= X as X == 0. */
1558 gen_helper_cmpbe0(vc
, vb
);
1560 gen_helper_cmpbge(vc
, va
, vb
);
1565 tmp
= tcg_temp_new();
1566 tcg_gen_shli_i64(tmp
, va
, 3);
1567 tcg_gen_add_i64(tmp
, tmp
, vb
);
1568 tcg_gen_ext32s_i64(vc
, tmp
);
1573 tmp
= tcg_temp_new();
1574 tcg_gen_shli_i64(tmp
, va
, 3);
1575 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1576 tcg_gen_ext32s_i64(vc
, tmp
);
1581 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1585 tcg_gen_add_i64(vc
, va
, vb
);
1589 tmp
= tcg_temp_new();
1590 tcg_gen_shli_i64(tmp
, va
, 2);
1591 tcg_gen_add_i64(vc
, tmp
, vb
);
1596 tcg_gen_sub_i64(vc
, va
, vb
);
1600 tmp
= tcg_temp_new();
1601 tcg_gen_shli_i64(tmp
, va
, 2);
1602 tcg_gen_sub_i64(vc
, tmp
, vb
);
1607 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1611 tmp
= tcg_temp_new();
1612 tcg_gen_shli_i64(tmp
, va
, 3);
1613 tcg_gen_add_i64(vc
, tmp
, vb
);
1618 tmp
= tcg_temp_new();
1619 tcg_gen_shli_i64(tmp
, va
, 3);
1620 tcg_gen_sub_i64(vc
, tmp
, vb
);
1625 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1629 tmp
= tcg_temp_new();
1630 tcg_gen_ext32s_i64(tmp
, va
);
1631 tcg_gen_ext32s_i64(vc
, vb
);
1632 tcg_gen_add_i64(tmp
, tmp
, vc
);
1633 tcg_gen_ext32s_i64(vc
, tmp
);
1634 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1639 tmp
= tcg_temp_new();
1640 tcg_gen_ext32s_i64(tmp
, va
);
1641 tcg_gen_ext32s_i64(vc
, vb
);
1642 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1643 tcg_gen_ext32s_i64(vc
, tmp
);
1644 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1649 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1653 tmp
= tcg_temp_new();
1654 tmp2
= tcg_temp_new();
1655 tcg_gen_eqv_i64(tmp
, va
, vb
);
1656 tcg_gen_mov_i64(tmp2
, va
);
1657 tcg_gen_add_i64(vc
, va
, vb
);
1658 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1659 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1660 tcg_gen_shri_i64(tmp
, tmp
, 63);
1661 tcg_gen_movi_i64(tmp2
, 0);
1662 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1664 tcg_temp_free(tmp2
);
1668 tmp
= tcg_temp_new();
1669 tmp2
= tcg_temp_new();
1670 tcg_gen_xor_i64(tmp
, va
, vb
);
1671 tcg_gen_mov_i64(tmp2
, va
);
1672 tcg_gen_sub_i64(vc
, va
, vb
);
1673 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1674 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1675 tcg_gen_shri_i64(tmp
, tmp
, 63);
1676 tcg_gen_movi_i64(tmp2
, 0);
1677 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1679 tcg_temp_free(tmp2
);
1683 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1693 /* Special case BIS as NOP. */
1697 /* Special case BIS as MOV. */
1698 vc
= dest_gpr(ctx
, rc
);
1700 tcg_gen_movi_i64(vc
, lit
);
1702 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1708 vc
= dest_gpr(ctx
, rc
);
1709 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1711 if (fn7
== 0x28 && ra
== 31) {
1712 /* Special case ORNOT as NOT. */
1713 tcg_gen_not_i64(vc
, vb
);
1717 va
= load_gpr(ctx
, ra
);
1721 tcg_gen_and_i64(vc
, va
, vb
);
1725 tcg_gen_andc_i64(vc
, va
, vb
);
1729 tmp
= tcg_temp_new();
1730 tcg_gen_andi_i64(tmp
, va
, 1);
1731 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1732 vb
, load_gpr(ctx
, rc
));
1737 tmp
= tcg_temp_new();
1738 tcg_gen_andi_i64(tmp
, va
, 1);
1739 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1740 vb
, load_gpr(ctx
, rc
));
1745 tcg_gen_or_i64(vc
, va
, vb
);
1749 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1750 vb
, load_gpr(ctx
, rc
));
1754 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1755 vb
, load_gpr(ctx
, rc
));
1759 tcg_gen_orc_i64(vc
, va
, vb
);
1763 tcg_gen_xor_i64(vc
, va
, vb
);
1767 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1768 vb
, load_gpr(ctx
, rc
));
1772 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1773 vb
, load_gpr(ctx
, rc
));
1777 tcg_gen_eqv_i64(vc
, va
, vb
);
1782 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1786 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1787 vb
, load_gpr(ctx
, rc
));
1791 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1792 vb
, load_gpr(ctx
, rc
));
1797 tcg_gen_movi_i64(vc
, ctx
->implver
);
1805 vc
= dest_gpr(ctx
, rc
);
1806 va
= load_gpr(ctx
, ra
);
1810 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1814 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1818 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1822 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1826 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1830 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1834 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1838 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1842 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1847 gen_zapnoti(vc
, va
, ~lit
);
1849 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1855 gen_zapnoti(vc
, va
, lit
);
1857 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1862 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1867 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1869 tmp
= tcg_temp_new();
1870 vb
= load_gpr(ctx
, rb
);
1871 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1872 tcg_gen_shr_i64(vc
, va
, tmp
);
1878 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1883 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1885 tmp
= tcg_temp_new();
1886 vb
= load_gpr(ctx
, rb
);
1887 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1888 tcg_gen_shl_i64(vc
, va
, tmp
);
1894 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1899 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1901 tmp
= tcg_temp_new();
1902 vb
= load_gpr(ctx
, rb
);
1903 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1904 tcg_gen_sar_i64(vc
, va
, tmp
);
1910 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1914 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1918 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1922 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1926 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1930 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1934 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1938 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1942 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1950 vc
= dest_gpr(ctx
, rc
);
1951 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1952 va
= load_gpr(ctx
, ra
);
1956 tcg_gen_mul_i64(vc
, va
, vb
);
1957 tcg_gen_ext32s_i64(vc
, vc
);
1961 tcg_gen_mul_i64(vc
, va
, vb
);
1965 tmp
= tcg_temp_new();
1966 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1971 tmp
= tcg_temp_new();
1972 tcg_gen_ext32s_i64(tmp
, va
);
1973 tcg_gen_ext32s_i64(vc
, vb
);
1974 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1975 tcg_gen_ext32s_i64(vc
, tmp
);
1976 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1981 tmp
= tcg_temp_new();
1982 tmp2
= tcg_temp_new();
1983 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1984 tcg_gen_sari_i64(tmp2
, vc
, 63);
1985 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1987 tcg_temp_free(tmp2
);
1996 vc
= dest_fpr(ctx
, rc
);
1997 switch (fpfn
) { /* fn11 & 0x3F */
2002 t32
= tcg_temp_new_i32();
2003 va
= load_gpr(ctx
, ra
);
2004 tcg_gen_extrl_i64_i32(t32
, va
);
2005 gen_helper_memory_to_s(vc
, t32
);
2006 tcg_temp_free_i32(t32
);
2012 vb
= load_fpr(ctx
, rb
);
2013 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2019 gen_sqrts(ctx
, rb
, rc
, fn11
);
2025 t32
= tcg_temp_new_i32();
2026 va
= load_gpr(ctx
, ra
);
2027 tcg_gen_extrl_i64_i32(t32
, va
);
2028 gen_helper_memory_to_f(vc
, t32
);
2029 tcg_temp_free_i32(t32
);
2035 va
= load_gpr(ctx
, ra
);
2036 tcg_gen_mov_i64(vc
, va
);
2042 vb
= load_fpr(ctx
, rb
);
2043 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2049 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2057 /* VAX floating point */
2058 /* XXX: rounding mode and trap are ignored (!) */
2059 vc
= dest_fpr(ctx
, rc
);
2060 vb
= load_fpr(ctx
, rb
);
2061 va
= load_fpr(ctx
, ra
);
2062 switch (fpfn
) { /* fn11 & 0x3F */
2066 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2071 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2076 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2081 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2090 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2095 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2100 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2105 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2110 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2115 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2120 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2126 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2136 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2142 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2148 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2156 /* IEEE floating-point */
2157 switch (fpfn
) { /* fn11 & 0x3F */
2161 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2166 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2171 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2176 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2181 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2186 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2191 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2196 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2201 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2206 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2211 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2216 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2221 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2223 gen_cvtst(ctx
, rb
, rc
, fn11
);
2226 gen_cvtts(ctx
, rb
, rc
, fn11
);
2233 gen_cvttq(ctx
, rb
, rc
, fn11
);
2239 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2245 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2258 vc
= dest_fpr(ctx
, rc
);
2259 vb
= load_fpr(ctx
, rb
);
2266 /* Special case CPYS as FNOP. */
2268 vc
= dest_fpr(ctx
, rc
);
2269 va
= load_fpr(ctx
, ra
);
2271 /* Special case CPYS as FMOV. */
2272 tcg_gen_mov_i64(vc
, va
);
2274 vb
= load_fpr(ctx
, rb
);
2275 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2282 vc
= dest_fpr(ctx
, rc
);
2283 vb
= load_fpr(ctx
, rb
);
2284 va
= load_fpr(ctx
, ra
);
2285 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2290 vc
= dest_fpr(ctx
, rc
);
2291 vb
= load_fpr(ctx
, rb
);
2292 va
= load_fpr(ctx
, ra
);
2293 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2298 va
= load_fpr(ctx
, ra
);
2299 gen_helper_store_fpcr(cpu_env
, va
);
2300 if (ctx
->tb_rm
== QUAL_RM_D
) {
2301 /* Re-do the copy of the rounding mode to fp_status
2302 the next time we use dynamic rounding. */
2309 va
= dest_fpr(ctx
, ra
);
2310 gen_helper_load_fpcr(va
, cpu_env
);
2315 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2320 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2325 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2330 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2335 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2340 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2342 case 0x030: /* CVTQL */
2343 case 0x130: /* CVTQL/V */
2344 case 0x530: /* CVTQL/SV */
2347 vc
= dest_fpr(ctx
, rc
);
2348 vb
= load_fpr(ctx
, rb
);
2349 gen_helper_cvtql(vc
, cpu_env
, vb
);
2350 gen_fp_exc_raise(rc
, fn11
);
2358 switch ((uint16_t)disp16
) {
2369 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2373 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2385 va
= dest_gpr(ctx
, ra
);
2386 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2388 gen_helper_load_pcc(va
, cpu_env
);
2389 ret
= DISAS_PC_STALE
;
2391 gen_helper_load_pcc(va
, cpu_env
);
2419 /* HW_MFPR (PALcode) */
2420 #ifndef CONFIG_USER_ONLY
2421 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2422 va
= dest_gpr(ctx
, ra
);
2423 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2430 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2431 prediction stack action, which of course we don't implement. */
2432 vb
= load_gpr(ctx
, rb
);
2433 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2435 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2437 ret
= DISAS_PC_UPDATED
;
2441 /* HW_LD (PALcode) */
2442 #ifndef CONFIG_USER_ONLY
2443 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2445 TCGv addr
= tcg_temp_new();
2446 vb
= load_gpr(ctx
, rb
);
2447 va
= dest_gpr(ctx
, ra
);
2449 tcg_gen_addi_i64(addr
, vb
, disp12
);
2450 switch ((insn
>> 12) & 0xF) {
2452 /* Longword physical access (hw_ldl/p) */
2453 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2456 /* Quadword physical access (hw_ldq/p) */
2457 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2460 /* Longword physical access with lock (hw_ldl_l/p) */
2461 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2464 /* Quadword physical access with lock (hw_ldq_l/p) */
2465 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2468 /* Longword virtual PTE fetch (hw_ldl/v) */
2471 /* Quadword virtual PTE fetch (hw_ldq/v) */
2481 /* Longword virtual access (hw_ldl) */
2484 /* Quadword virtual access (hw_ldq) */
2487 /* Longword virtual access with protection check (hw_ldl/w) */
2488 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2491 /* Quadword virtual access with protection check (hw_ldq/w) */
2492 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2495 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2498 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2501 /* Longword virtual access with alternate access mode and
2502 protection checks (hw_ldl/wa) */
2503 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2506 /* Quadword virtual access with alternate access mode and
2507 protection checks (hw_ldq/wa) */
2508 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2511 tcg_temp_free(addr
);
2519 vc
= dest_gpr(ctx
, rc
);
2524 va
= load_fpr(ctx
, ra
);
2525 tcg_gen_mov_i64(vc
, va
);
2527 } else if (fn7
== 0x78) {
2531 t32
= tcg_temp_new_i32();
2532 va
= load_fpr(ctx
, ra
);
2533 gen_helper_s_to_memory(t32
, va
);
2534 tcg_gen_ext_i32_i64(vc
, t32
);
2535 tcg_temp_free_i32(t32
);
2539 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2545 tcg_gen_ext8s_i64(vc
, vb
);
2551 tcg_gen_ext16s_i64(vc
, vb
);
2558 tcg_gen_ctpop_i64(vc
, vb
);
2564 va
= load_gpr(ctx
, ra
);
2565 gen_helper_perr(vc
, va
, vb
);
2572 tcg_gen_clzi_i64(vc
, vb
, 64);
2579 tcg_gen_ctzi_i64(vc
, vb
, 64);
2586 gen_helper_unpkbw(vc
, vb
);
2593 gen_helper_unpkbl(vc
, vb
);
2600 gen_helper_pkwb(vc
, vb
);
2607 gen_helper_pklb(vc
, vb
);
2612 va
= load_gpr(ctx
, ra
);
2613 gen_helper_minsb8(vc
, va
, vb
);
2618 va
= load_gpr(ctx
, ra
);
2619 gen_helper_minsw4(vc
, va
, vb
);
2624 va
= load_gpr(ctx
, ra
);
2625 gen_helper_minub8(vc
, va
, vb
);
2630 va
= load_gpr(ctx
, ra
);
2631 gen_helper_minuw4(vc
, va
, vb
);
2636 va
= load_gpr(ctx
, ra
);
2637 gen_helper_maxub8(vc
, va
, vb
);
2642 va
= load_gpr(ctx
, ra
);
2643 gen_helper_maxuw4(vc
, va
, vb
);
2648 va
= load_gpr(ctx
, ra
);
2649 gen_helper_maxsb8(vc
, va
, vb
);
2654 va
= load_gpr(ctx
, ra
);
2655 gen_helper_maxsw4(vc
, va
, vb
);
2663 /* HW_MTPR (PALcode) */
2664 #ifndef CONFIG_USER_ONLY
2665 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2666 vb
= load_gpr(ctx
, rb
);
2667 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2674 /* HW_RET (PALcode) */
2675 #ifndef CONFIG_USER_ONLY
2676 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2678 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2679 address from EXC_ADDR. This turns out to be useful for our
2680 emulation PALcode, so continue to accept it. */
2681 vb
= dest_sink(ctx
);
2682 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2684 vb
= load_gpr(ctx
, rb
);
2686 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2687 st_flag_byte(load_zero(ctx
), ENV_FLAG_RX_SHIFT
);
2688 tmp
= tcg_temp_new();
2689 tcg_gen_andi_i64(tmp
, vb
, 1);
2690 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2692 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2693 /* Allow interrupts to be recognized right away. */
2694 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2701 /* HW_ST (PALcode) */
2702 #ifndef CONFIG_USER_ONLY
2703 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2705 switch ((insn
>> 12) & 0xF) {
2707 /* Longword physical access */
2708 va
= load_gpr(ctx
, ra
);
2709 vb
= load_gpr(ctx
, rb
);
2710 tmp
= tcg_temp_new();
2711 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2712 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2716 /* Quadword physical access */
2717 va
= load_gpr(ctx
, ra
);
2718 vb
= load_gpr(ctx
, rb
);
2719 tmp
= tcg_temp_new();
2720 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2721 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2725 /* Longword physical access with lock */
2726 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2727 MMU_PHYS_IDX
, MO_LESL
);
2730 /* Quadword physical access with lock */
2731 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2732 MMU_PHYS_IDX
, MO_LEQ
);
2735 /* Longword virtual access */
2738 /* Quadword virtual access */
2759 /* Longword virtual access with alternate access mode */
2762 /* Quadword virtual access with alternate access mode */
2779 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2784 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2789 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2794 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2799 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2804 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2809 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2814 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2818 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2822 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2826 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2830 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2834 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2838 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2842 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2843 ctx
->mem_idx
, MO_LESL
);
2847 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2848 ctx
->mem_idx
, MO_LEQ
);
2852 ret
= gen_bdirect(ctx
, ra
, disp21
);
2854 case 0x31: /* FBEQ */
2856 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2858 case 0x32: /* FBLT */
2860 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2862 case 0x33: /* FBLE */
2864 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2868 ret
= gen_bdirect(ctx
, ra
, disp21
);
2870 case 0x35: /* FBNE */
2872 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2874 case 0x36: /* FBGE */
2876 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2878 case 0x37: /* FBGT */
2880 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2884 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2888 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2892 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2896 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2900 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2904 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2908 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2912 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2915 ret
= gen_invalid(ctx
);
2918 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2925 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2927 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2928 CPUAlphaState
*env
= cpu
->env_ptr
;
2931 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2932 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2933 ctx
->implver
= env
->implver
;
2934 ctx
->amask
= env
->amask
;
2936 #ifdef CONFIG_USER_ONLY
2937 ctx
->ir
= cpu_std_ir
;
2939 ctx
->palbr
= env
->palbr
;
2940 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2943 /* ??? Every TB begins with unset rounding mode, to be initialized on
2944 the first fp insn of the TB. Alternately we could define a proper
2945 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2946 to reset the FP_STATUS to that default at the end of any TB that
2947 changes the default. We could even (gasp) dynamiclly figure out
2948 what default would be most efficient given the running program. */
2950 /* Similarly for flush-to-zero. */
2956 /* Bound the number of insns to execute to those left on the page. */
2957 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
2958 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2961 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2965 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2967 tcg_gen_insn_start(dcbase
->pc_next
);
2970 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2972 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2973 CPUAlphaState
*env
= cpu
->env_ptr
;
2974 uint32_t insn
= translator_ldl(env
, ctx
->base
.pc_next
);
2976 ctx
->base
.pc_next
+= 4;
2977 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2979 free_context_temps(ctx
);
2980 translator_loop_temp_check(&ctx
->base
);
2983 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
2985 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2987 switch (ctx
->base
.is_jmp
) {
2988 case DISAS_NORETURN
:
2990 case DISAS_TOO_MANY
:
2991 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
2993 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2994 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
2997 case DISAS_PC_STALE
:
2998 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3000 case DISAS_PC_UPDATED
:
3001 if (!ctx
->base
.singlestep_enabled
) {
3002 tcg_gen_lookup_and_goto_ptr();
3006 case DISAS_PC_UPDATED_NOCHAIN
:
3007 if (ctx
->base
.singlestep_enabled
) {
3008 gen_excp_1(EXCP_DEBUG
, 0);
3010 tcg_gen_exit_tb(NULL
, 0);
3014 g_assert_not_reached();
3018 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3020 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3021 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
3024 static const TranslatorOps alpha_tr_ops
= {
3025 .init_disas_context
= alpha_tr_init_disas_context
,
3026 .tb_start
= alpha_tr_tb_start
,
3027 .insn_start
= alpha_tr_insn_start
,
3028 .translate_insn
= alpha_tr_translate_insn
,
3029 .tb_stop
= alpha_tr_tb_stop
,
3030 .disas_log
= alpha_tr_disas_log
,
3033 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
3036 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
3039 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,