2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
45 DisasContextBase base
;
47 #ifdef CONFIG_USER_ONLY
55 /* implver and amask values for this CPU. */
59 /* Current rounding mode for this TB. */
61 /* Current flush-to-zero setting for this TB. */
64 /* The set of registers active in the current context. */
67 /* Temporaries for $31 and $f31 as source and destination. */
72 #ifdef CONFIG_USER_ONLY
73 #define UNALIGN(C) (C)->unalign
78 /* Target-specific return values from translate_one, indicating the
79 state of the TB. Note that DISAS_NEXT indicates that we are not
81 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
82 #define DISAS_PC_UPDATED DISAS_TARGET_1
83 #define DISAS_PC_STALE DISAS_TARGET_2
85 /* global register indexes */
86 static TCGv cpu_std_ir
[31];
87 static TCGv cpu_fir
[31];
89 static TCGv cpu_lock_addr
;
90 static TCGv cpu_lock_value
;
92 #ifndef CONFIG_USER_ONLY
93 static TCGv cpu_pal_ir
[31];
96 #include "exec/gen-icount.h"
98 void alpha_translate_init(void)
100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
103 static const GlobalVar vars
[] = {
111 /* Use the symbolic register names that match the disassembler. */
112 static const char greg_names
[31][4] = {
113 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
114 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
115 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
116 "t10", "t11", "ra", "t12", "at", "gp", "sp"
118 static const char freg_names
[31][4] = {
119 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
120 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
121 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
122 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124 #ifndef CONFIG_USER_ONLY
125 static const char shadow_names
[8][8] = {
126 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
127 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
133 for (i
= 0; i
< 31; i
++) {
134 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
135 offsetof(CPUAlphaState
, ir
[i
]),
139 for (i
= 0; i
< 31; i
++) {
140 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
141 offsetof(CPUAlphaState
, fir
[i
]),
145 #ifndef CONFIG_USER_ONLY
146 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
147 for (i
= 0; i
< 8; i
++) {
148 int r
= (i
== 7 ? 25 : i
+ 8);
149 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
150 offsetof(CPUAlphaState
,
156 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
157 const GlobalVar
*v
= &vars
[i
];
158 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
162 static TCGv
load_zero(DisasContext
*ctx
)
165 ctx
->zero
= tcg_constant_i64(0);
170 static TCGv
dest_sink(DisasContext
*ctx
)
173 ctx
->sink
= tcg_temp_new();
178 static void free_context_temps(DisasContext
*ctx
)
181 tcg_gen_discard_i64(ctx
->sink
);
186 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
188 if (likely(reg
< 31)) {
191 return load_zero(ctx
);
195 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
196 uint8_t lit
, bool islit
)
199 return tcg_constant_i64(lit
);
200 } else if (likely(reg
< 31)) {
203 return load_zero(ctx
);
207 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
209 if (likely(reg
< 31)) {
212 return dest_sink(ctx
);
216 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
218 if (likely(reg
< 31)) {
221 return load_zero(ctx
);
225 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
227 if (likely(reg
< 31)) {
230 return dest_sink(ctx
);
234 static int get_flag_ofs(unsigned shift
)
236 int ofs
= offsetof(CPUAlphaState
, flags
);
238 ofs
+= 3 - (shift
/ 8);
245 static void ld_flag_byte(TCGv val
, unsigned shift
)
247 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
250 static void st_flag_byte(TCGv val
, unsigned shift
)
252 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
255 static void gen_excp_1(int exception
, int error_code
)
259 tmp1
= tcg_constant_i32(exception
);
260 tmp2
= tcg_constant_i32(error_code
);
261 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
264 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
266 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
267 gen_excp_1(exception
, error_code
);
268 return DISAS_NORETURN
;
271 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
273 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
276 static void gen_ldf(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
278 TCGv_i32 tmp32
= tcg_temp_new_i32();
279 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
280 gen_helper_memory_to_f(dest
, tmp32
);
283 static void gen_ldg(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
285 TCGv tmp
= tcg_temp_new();
286 tcg_gen_qemu_ld_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
287 gen_helper_memory_to_g(dest
, tmp
);
290 static void gen_lds(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
292 TCGv_i32 tmp32
= tcg_temp_new_i32();
293 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
294 gen_helper_memory_to_s(dest
, tmp32
);
297 static void gen_ldt(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
299 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
302 static void gen_load_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
303 void (*func
)(DisasContext
*, TCGv
, TCGv
))
305 /* Loads to $f31 are prefetches, which we can treat as nops. */
306 if (likely(ra
!= 31)) {
307 TCGv addr
= tcg_temp_new();
308 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
309 func(ctx
, cpu_fir
[ra
], addr
);
313 static void gen_load_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
314 MemOp op
, bool clear
, bool locked
)
318 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
319 prefetches, which we can treat as nops. No worries about
320 missed exceptions here. */
321 if (unlikely(ra
== 31)) {
325 addr
= tcg_temp_new();
326 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
328 tcg_gen_andi_i64(addr
, addr
, ~0x7);
329 } else if (!locked
) {
334 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, op
);
337 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
338 tcg_gen_mov_i64(cpu_lock_value
, dest
);
342 static void gen_stf(DisasContext
*ctx
, TCGv src
, TCGv addr
)
344 TCGv_i32 tmp32
= tcg_temp_new_i32();
345 gen_helper_f_to_memory(tmp32
, addr
);
346 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
349 static void gen_stg(DisasContext
*ctx
, TCGv src
, TCGv addr
)
351 TCGv tmp
= tcg_temp_new();
352 gen_helper_g_to_memory(tmp
, src
);
353 tcg_gen_qemu_st_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
356 static void gen_sts(DisasContext
*ctx
, TCGv src
, TCGv addr
)
358 TCGv_i32 tmp32
= tcg_temp_new_i32();
359 gen_helper_s_to_memory(tmp32
, src
);
360 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
363 static void gen_stt(DisasContext
*ctx
, TCGv src
, TCGv addr
)
365 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
368 static void gen_store_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
369 void (*func
)(DisasContext
*, TCGv
, TCGv
))
371 TCGv addr
= tcg_temp_new();
372 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
373 func(ctx
, load_fpr(ctx
, ra
), addr
);
376 static void gen_store_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
377 MemOp op
, bool clear
)
381 addr
= tcg_temp_new();
382 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
384 tcg_gen_andi_i64(addr
, addr
, ~0x7);
389 src
= load_gpr(ctx
, ra
);
390 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, op
);
393 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
394 int32_t disp16
, int mem_idx
,
397 TCGLabel
*lab_fail
, *lab_done
;
400 addr
= tcg_temp_new_i64();
401 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
402 free_context_temps(ctx
);
404 lab_fail
= gen_new_label();
405 lab_done
= gen_new_label();
406 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
408 val
= tcg_temp_new_i64();
409 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
410 load_gpr(ctx
, ra
), mem_idx
, op
);
411 free_context_temps(ctx
);
414 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
416 tcg_gen_br(lab_done
);
418 gen_set_label(lab_fail
);
420 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
423 gen_set_label(lab_done
);
424 tcg_gen_movi_i64(cpu_lock_addr
, -1);
428 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
430 return translator_use_goto_tb(&ctx
->base
, dest
);
433 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
435 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
438 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
441 /* Notice branch-to-next; used to initialize RA with the PC. */
444 } else if (use_goto_tb(ctx
, dest
)) {
446 tcg_gen_movi_i64(cpu_pc
, dest
);
447 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
448 return DISAS_NORETURN
;
450 tcg_gen_movi_i64(cpu_pc
, dest
);
451 return DISAS_PC_UPDATED
;
455 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
456 TCGv cmp
, int32_t disp
)
458 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
459 TCGLabel
*lab_true
= gen_new_label();
461 if (use_goto_tb(ctx
, dest
)) {
462 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
465 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
466 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
468 gen_set_label(lab_true
);
470 tcg_gen_movi_i64(cpu_pc
, dest
);
471 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
473 return DISAS_NORETURN
;
475 TCGv_i64 z
= load_zero(ctx
);
476 TCGv_i64 d
= tcg_constant_i64(dest
);
477 TCGv_i64 p
= tcg_constant_i64(ctx
->base
.pc_next
);
479 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
480 return DISAS_PC_UPDATED
;
484 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
485 int32_t disp
, int mask
)
488 TCGv tmp
= tcg_temp_new();
491 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
492 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
495 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
498 /* Fold -0.0 for comparison with COND. */
500 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
502 uint64_t mzero
= 1ull << 63;
507 /* For <= or >, the -0.0 value directly compares the way we want. */
508 tcg_gen_mov_i64(dest
, src
);
513 /* For == or !=, we can simply mask off the sign bit and compare. */
514 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
519 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
520 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
521 tcg_gen_neg_i64(dest
, dest
);
522 tcg_gen_and_i64(dest
, dest
, src
);
530 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
533 TCGv cmp_tmp
= tcg_temp_new();
536 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
537 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
541 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
546 vb
= load_fpr(ctx
, rb
);
548 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
550 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
553 #define QUAL_RM_N 0x080 /* Round mode nearest even */
554 #define QUAL_RM_C 0x000 /* Round mode chopped */
555 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
556 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
557 #define QUAL_RM_MASK 0x0c0
559 #define QUAL_U 0x100 /* Underflow enable (fp output) */
560 #define QUAL_V 0x100 /* Overflow enable (int output) */
561 #define QUAL_S 0x400 /* Software completion enable */
562 #define QUAL_I 0x200 /* Inexact detection enable */
564 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
568 fn11
&= QUAL_RM_MASK
;
569 if (fn11
== ctx
->tb_rm
) {
574 tmp
= tcg_temp_new_i32();
577 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
580 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
583 tcg_gen_movi_i32(tmp
, float_round_down
);
586 tcg_gen_ld8u_i32(tmp
, cpu_env
,
587 offsetof(CPUAlphaState
, fpcr_dyn_round
));
591 #if defined(CONFIG_SOFTFLOAT_INLINE)
592 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
593 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
594 sets the one field. */
595 tcg_gen_st8_i32(tmp
, cpu_env
,
596 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
598 gen_helper_setroundmode(tmp
);
602 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
607 if (fn11
== ctx
->tb_ftz
) {
612 tmp
= tcg_temp_new_i32();
614 /* Underflow is enabled, use the FPCR setting. */
615 tcg_gen_ld8u_i32(tmp
, cpu_env
,
616 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
618 /* Underflow is disabled, force flush-to-zero. */
619 tcg_gen_movi_i32(tmp
, 1);
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623 tcg_gen_st8_i32(tmp
, cpu_env
,
624 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
626 gen_helper_setflushzero(tmp
);
630 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
634 if (unlikely(reg
== 31)) {
635 val
= load_zero(ctx
);
638 if ((fn11
& QUAL_S
) == 0) {
640 gen_helper_ieee_input_cmp(cpu_env
, val
);
642 gen_helper_ieee_input(cpu_env
, val
);
645 #ifndef CONFIG_USER_ONLY
646 /* In system mode, raise exceptions for denormals like real
647 hardware. In user mode, proceed as if the OS completion
648 handler is handling the denormal as per spec. */
649 gen_helper_ieee_input_s(cpu_env
, val
);
656 static void gen_fp_exc_raise(int rc
, int fn11
)
658 /* ??? We ought to be able to do something with imprecise exceptions.
659 E.g. notice we're still in the trap shadow of something within the
660 TB and do not generate the code to signal the exception; end the TB
661 when an exception is forced to arrive, either by consumption of a
662 register value or TRAPB or EXCB. */
666 if (!(fn11
& QUAL_U
)) {
667 /* Note that QUAL_U == QUAL_V, so ignore either. */
668 ignore
|= FPCR_UNF
| FPCR_IOV
;
670 if (!(fn11
& QUAL_I
)) {
673 ign
= tcg_constant_i32(ignore
);
675 /* ??? Pass in the regno of the destination so that the helper can
676 set EXC_MASK, which contains a bitmask of destination registers
677 that have caused arithmetic traps. A simple userspace emulation
678 does not require this. We do need it for a guest kernel's entArith,
679 or if we were to do something clever with imprecise exceptions. */
680 reg
= tcg_constant_i32(rc
+ 32);
682 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
684 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
688 static void gen_cvtlq(TCGv vc
, TCGv vb
)
690 TCGv tmp
= tcg_temp_new();
692 /* The arithmetic right shift here, plus the sign-extended mask below
693 yields a sign-extended result without an explicit ext32s_i64. */
694 tcg_gen_shri_i64(tmp
, vb
, 29);
695 tcg_gen_sari_i64(vc
, vb
, 32);
696 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
699 static void gen_ieee_arith2(DisasContext
*ctx
,
700 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
701 int rb
, int rc
, int fn11
)
705 gen_qual_roundmode(ctx
, fn11
);
706 gen_qual_flushzero(ctx
, fn11
);
708 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
709 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
711 gen_fp_exc_raise(rc
, fn11
);
714 #define IEEE_ARITH2(name) \
715 static inline void glue(gen_, name)(DisasContext *ctx, \
716 int rb, int rc, int fn11) \
718 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
725 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
729 /* No need to set flushzero, since we have an integer output. */
730 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
731 vc
= dest_fpr(ctx
, rc
);
733 /* Almost all integer conversions use cropped rounding;
734 special case that. */
735 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
736 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
738 gen_qual_roundmode(ctx
, fn11
);
739 gen_helper_cvttq(vc
, cpu_env
, vb
);
741 gen_fp_exc_raise(rc
, fn11
);
744 static void gen_ieee_intcvt(DisasContext
*ctx
,
745 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
746 int rb
, int rc
, int fn11
)
750 gen_qual_roundmode(ctx
, fn11
);
751 vb
= load_fpr(ctx
, rb
);
752 vc
= dest_fpr(ctx
, rc
);
754 /* The only exception that can be raised by integer conversion
755 is inexact. Thus we only need to worry about exceptions when
756 inexact handling is requested. */
758 helper(vc
, cpu_env
, vb
);
759 gen_fp_exc_raise(rc
, fn11
);
761 helper(vc
, cpu_env
, vb
);
765 #define IEEE_INTCVT(name) \
766 static inline void glue(gen_, name)(DisasContext *ctx, \
767 int rb, int rc, int fn11) \
769 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
774 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
776 TCGv vmask
= tcg_constant_i64(mask
);
777 TCGv tmp
= tcg_temp_new_i64();
780 tcg_gen_andc_i64(tmp
, vmask
, va
);
782 tcg_gen_and_i64(tmp
, va
, vmask
);
785 tcg_gen_andc_i64(vc
, vb
, vmask
);
786 tcg_gen_or_i64(vc
, vc
, tmp
);
789 static void gen_ieee_arith3(DisasContext
*ctx
,
790 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
791 int ra
, int rb
, int rc
, int fn11
)
795 gen_qual_roundmode(ctx
, fn11
);
796 gen_qual_flushzero(ctx
, fn11
);
798 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
799 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
800 vc
= dest_fpr(ctx
, rc
);
801 helper(vc
, cpu_env
, va
, vb
);
803 gen_fp_exc_raise(rc
, fn11
);
806 #define IEEE_ARITH3(name) \
807 static inline void glue(gen_, name)(DisasContext *ctx, \
808 int ra, int rb, int rc, int fn11) \
810 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
821 static void gen_ieee_compare(DisasContext
*ctx
,
822 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
823 int ra
, int rb
, int rc
, int fn11
)
827 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
828 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
829 vc
= dest_fpr(ctx
, rc
);
830 helper(vc
, cpu_env
, va
, vb
);
832 gen_fp_exc_raise(rc
, fn11
);
835 #define IEEE_CMP3(name) \
836 static inline void glue(gen_, name)(DisasContext *ctx, \
837 int ra, int rb, int rc, int fn11) \
839 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
846 static inline uint64_t zapnot_mask(uint8_t lit
)
851 for (i
= 0; i
< 8; ++i
) {
852 if ((lit
>> i
) & 1) {
853 mask
|= 0xffull
<< (i
* 8);
859 /* Implement zapnot with an immediate operand, which expands to some
860 form of immediate AND. This is a basic building block in the
861 definition of many of the other byte manipulation instructions. */
862 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
866 tcg_gen_movi_i64(dest
, 0);
869 tcg_gen_ext8u_i64(dest
, src
);
872 tcg_gen_ext16u_i64(dest
, src
);
875 tcg_gen_ext32u_i64(dest
, src
);
878 tcg_gen_mov_i64(dest
, src
);
881 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
886 /* EXTWH, EXTLH, EXTQH */
887 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
888 uint8_t lit
, uint8_t byte_mask
)
891 int pos
= (64 - lit
* 8) & 0x3f;
892 int len
= cto32(byte_mask
) * 8;
894 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
896 tcg_gen_movi_i64(vc
, 0);
899 TCGv tmp
= tcg_temp_new();
900 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
901 tcg_gen_neg_i64(tmp
, tmp
);
902 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
903 tcg_gen_shl_i64(vc
, va
, tmp
);
905 gen_zapnoti(vc
, vc
, byte_mask
);
908 /* EXTBL, EXTWL, EXTLL, EXTQL */
909 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
910 uint8_t lit
, uint8_t byte_mask
)
913 int pos
= (lit
& 7) * 8;
914 int len
= cto32(byte_mask
) * 8;
915 if (pos
+ len
>= 64) {
918 tcg_gen_extract_i64(vc
, va
, pos
, len
);
920 TCGv tmp
= tcg_temp_new();
921 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
922 tcg_gen_shli_i64(tmp
, tmp
, 3);
923 tcg_gen_shr_i64(vc
, va
, tmp
);
924 gen_zapnoti(vc
, vc
, byte_mask
);
928 /* INSWH, INSLH, INSQH */
929 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
930 uint8_t lit
, uint8_t byte_mask
)
933 int pos
= 64 - (lit
& 7) * 8;
934 int len
= cto32(byte_mask
) * 8;
936 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
938 tcg_gen_movi_i64(vc
, 0);
941 TCGv tmp
= tcg_temp_new();
942 TCGv shift
= tcg_temp_new();
944 /* The instruction description has us left-shift the byte mask
945 and extract bits <15:8> and apply that zap at the end. This
946 is equivalent to simply performing the zap first and shifting
948 gen_zapnoti(tmp
, va
, byte_mask
);
950 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
951 portably by splitting the shift into two parts: shift_count-1 and 1.
952 Arrange for the -1 by using ones-complement instead of
953 twos-complement in the negation: ~(B * 8) & 63. */
955 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
956 tcg_gen_not_i64(shift
, shift
);
957 tcg_gen_andi_i64(shift
, shift
, 0x3f);
959 tcg_gen_shr_i64(vc
, tmp
, shift
);
960 tcg_gen_shri_i64(vc
, vc
, 1);
964 /* INSBL, INSWL, INSLL, INSQL */
965 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
966 uint8_t lit
, uint8_t byte_mask
)
969 int pos
= (lit
& 7) * 8;
970 int len
= cto32(byte_mask
) * 8;
971 if (pos
+ len
> 64) {
974 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
976 TCGv tmp
= tcg_temp_new();
977 TCGv shift
= tcg_temp_new();
979 /* The instruction description has us left-shift the byte mask
980 and extract bits <15:8> and apply that zap at the end. This
981 is equivalent to simply performing the zap first and shifting
983 gen_zapnoti(tmp
, va
, byte_mask
);
985 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
986 tcg_gen_shli_i64(shift
, shift
, 3);
987 tcg_gen_shl_i64(vc
, tmp
, shift
);
991 /* MSKWH, MSKLH, MSKQH */
992 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
993 uint8_t lit
, uint8_t byte_mask
)
996 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
998 TCGv shift
= tcg_temp_new();
999 TCGv mask
= tcg_temp_new();
1001 /* The instruction description is as above, where the byte_mask
1002 is shifted left, and then we extract bits <15:8>. This can be
1003 emulated with a right-shift on the expanded byte mask. This
1004 requires extra care because for an input <2:0> == 0 we need a
1005 shift of 64 bits in order to generate a zero. This is done by
1006 splitting the shift into two parts, the variable shift - 1
1007 followed by a constant 1 shift. The code we expand below is
1008 equivalent to ~(B * 8) & 63. */
1010 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1011 tcg_gen_not_i64(shift
, shift
);
1012 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1013 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1014 tcg_gen_shr_i64(mask
, mask
, shift
);
1015 tcg_gen_shri_i64(mask
, mask
, 1);
1017 tcg_gen_andc_i64(vc
, va
, mask
);
1021 /* MSKBL, MSKWL, MSKLL, MSKQL */
1022 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1023 uint8_t lit
, uint8_t byte_mask
)
1026 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1028 TCGv shift
= tcg_temp_new();
1029 TCGv mask
= tcg_temp_new();
1031 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1032 tcg_gen_shli_i64(shift
, shift
, 3);
1033 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1034 tcg_gen_shl_i64(mask
, mask
, shift
);
1036 tcg_gen_andc_i64(vc
, va
, mask
);
1040 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1043 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1046 st_flag_byte(tcg_constant_i64(set
), ENV_FLAG_RX_SHIFT
);
1049 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1051 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1052 to internal cpu registers. */
1054 /* Unprivileged PAL call */
1055 if (palcode
>= 0x80 && palcode
< 0xC0) {
1059 /* No-op inside QEMU. */
1063 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1064 offsetof(CPUAlphaState
, unique
));
1068 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1069 offsetof(CPUAlphaState
, unique
));
1078 #ifndef CONFIG_USER_ONLY
1079 /* Privileged PAL code */
1080 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1084 /* No-op inside QEMU. */
1088 /* No-op inside QEMU. */
1092 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1093 offsetof(CPUAlphaState
, vptptr
));
1097 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1098 offsetof(CPUAlphaState
, sysval
));
1102 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1103 offsetof(CPUAlphaState
, sysval
));
1108 /* Note that we already know we're in kernel mode, so we know
1109 that PS only contains the 3 IPL bits. */
1110 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1112 /* But make sure and store only the 3 IPL bits from the user. */
1114 TCGv tmp
= tcg_temp_new();
1115 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1116 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1119 /* Allow interrupts to be recognized right away. */
1120 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1121 return DISAS_PC_UPDATED_NOCHAIN
;
1125 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1130 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1131 offsetof(CPUAlphaState
, usp
));
1135 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1136 offsetof(CPUAlphaState
, usp
));
1140 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1141 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1146 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1147 -offsetof(AlphaCPU
, env
) +
1148 offsetof(CPUState
, halted
));
1149 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1150 return gen_excp(ctx
, EXCP_HALTED
, 0);
1159 return gen_invalid(ctx
);
1162 #ifdef CONFIG_USER_ONLY
1163 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1166 TCGv tmp
= tcg_temp_new();
1167 uint64_t exc_addr
= ctx
->base
.pc_next
;
1168 uint64_t entry
= ctx
->palbr
;
1170 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1173 tcg_gen_movi_i64(tmp
, 1);
1174 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1177 tcg_gen_movi_i64(tmp
, exc_addr
);
1178 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1180 entry
+= (palcode
& 0x80
1181 ? 0x2000 + (palcode
- 0x80) * 64
1182 : 0x1000 + palcode
* 64);
1184 tcg_gen_movi_i64(cpu_pc
, entry
);
1185 return DISAS_PC_UPDATED
;
1190 #ifndef CONFIG_USER_ONLY
1192 #define PR_LONG 0x200000
1194 static int cpu_pr_data(int pr
)
1197 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1198 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1199 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1200 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1201 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1202 case 7: return offsetof(CPUAlphaState
, palbr
);
1203 case 8: return offsetof(CPUAlphaState
, ptbr
);
1204 case 9: return offsetof(CPUAlphaState
, vptptr
);
1205 case 10: return offsetof(CPUAlphaState
, unique
);
1206 case 11: return offsetof(CPUAlphaState
, sysval
);
1207 case 12: return offsetof(CPUAlphaState
, usp
);
1210 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1213 return offsetof(CPUAlphaState
, alarm_expire
);
1218 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1220 void (*helper
)(TCGv
);
1225 /* Accessing the "non-shadow" general registers. */
1226 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1227 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1230 case 250: /* WALLTIME */
1231 helper
= gen_helper_get_walltime
;
1233 case 249: /* VMTIME */
1234 helper
= gen_helper_get_vmtime
;
1236 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1239 return DISAS_PC_STALE
;
1246 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1249 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1253 /* The basic registers are data only, and unknown registers
1254 are read-zero, write-ignore. */
1255 data
= cpu_pr_data(regno
);
1257 tcg_gen_movi_i64(va
, 0);
1258 } else if (data
& PR_LONG
) {
1259 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1261 tcg_gen_ld_i64(va
, cpu_env
, data
);
1269 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1272 DisasJumpType ret
= DISAS_NEXT
;
1277 gen_helper_tbia(cpu_env
);
1282 gen_helper_tbis(cpu_env
, vb
);
1287 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1288 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, halted
));
1289 return gen_excp(ctx
, EXCP_HALTED
, 0);
1293 gen_helper_halt(vb
);
1294 return DISAS_PC_STALE
;
1298 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1300 ret
= DISAS_PC_STALE
;
1302 gen_helper_set_alarm(cpu_env
, vb
);
1307 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1308 /* Changing the PAL base register implies un-chaining all of the TBs
1309 that ended with a CALL_PAL. Since the base register usually only
1310 changes during boot, flushing everything works well. */
1311 gen_helper_tb_flush(cpu_env
);
1312 return DISAS_PC_STALE
;
1315 /* Accessing the "non-shadow" general registers. */
1316 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1317 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1321 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1324 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1328 /* The basic registers are data only, and unknown registers
1329 are read-zero, write-ignore. */
1330 data
= cpu_pr_data(regno
);
1332 if (data
& PR_LONG
) {
1333 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1335 tcg_gen_st_i64(vb
, cpu_env
, data
);
1343 #endif /* !USER_ONLY*/
1345 #define REQUIRE_NO_LIT \
1352 #define REQUIRE_AMASK(FLAG) \
1354 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1359 #define REQUIRE_TB_FLAG(FLAG) \
1361 if ((ctx->tbflags & (FLAG)) == 0) { \
1366 #define REQUIRE_REG_31(WHICH) \
1368 if (WHICH != 31) { \
1373 #define REQUIRE_FEN \
1375 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1380 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1382 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1384 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1385 bool islit
, real_islit
;
1386 TCGv va
, vb
, vc
, tmp
, tmp2
;
1390 /* Decode all instruction fields */
1391 opc
= extract32(insn
, 26, 6);
1392 ra
= extract32(insn
, 21, 5);
1393 rb
= extract32(insn
, 16, 5);
1394 rc
= extract32(insn
, 0, 5);
1395 real_islit
= islit
= extract32(insn
, 12, 1);
1396 lit
= extract32(insn
, 13, 8);
1398 disp21
= sextract32(insn
, 0, 21);
1399 disp16
= sextract32(insn
, 0, 16);
1400 disp12
= sextract32(insn
, 0, 12);
1402 fn11
= extract32(insn
, 5, 11);
1403 fpfn
= extract32(insn
, 5, 6);
1404 fn7
= extract32(insn
, 5, 7);
1406 if (rb
== 31 && !islit
) {
1415 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1441 disp16
= (uint32_t)disp16
<< 16;
1445 va
= dest_gpr(ctx
, ra
);
1446 /* It's worth special-casing immediate loads. */
1448 tcg_gen_movi_i64(va
, disp16
);
1450 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1457 gen_load_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0, 0);
1461 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1, 0);
1466 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0, 0);
1471 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0);
1476 gen_store_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0);
1480 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1);
1484 vc
= dest_gpr(ctx
, rc
);
1485 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1489 /* Special case ADDL as SEXTL. */
1490 tcg_gen_ext32s_i64(vc
, vb
);
1494 /* Special case SUBQ as NEGQ. */
1495 tcg_gen_neg_i64(vc
, vb
);
1500 va
= load_gpr(ctx
, ra
);
1504 tcg_gen_add_i64(vc
, va
, vb
);
1505 tcg_gen_ext32s_i64(vc
, vc
);
1509 tmp
= tcg_temp_new();
1510 tcg_gen_shli_i64(tmp
, va
, 2);
1511 tcg_gen_add_i64(tmp
, tmp
, vb
);
1512 tcg_gen_ext32s_i64(vc
, tmp
);
1516 tcg_gen_sub_i64(vc
, va
, vb
);
1517 tcg_gen_ext32s_i64(vc
, vc
);
1521 tmp
= tcg_temp_new();
1522 tcg_gen_shli_i64(tmp
, va
, 2);
1523 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1524 tcg_gen_ext32s_i64(vc
, tmp
);
1529 /* Special case 0 >= X as X == 0. */
1530 gen_helper_cmpbe0(vc
, vb
);
1532 gen_helper_cmpbge(vc
, va
, vb
);
1537 tmp
= tcg_temp_new();
1538 tcg_gen_shli_i64(tmp
, va
, 3);
1539 tcg_gen_add_i64(tmp
, tmp
, vb
);
1540 tcg_gen_ext32s_i64(vc
, tmp
);
1544 tmp
= tcg_temp_new();
1545 tcg_gen_shli_i64(tmp
, va
, 3);
1546 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1547 tcg_gen_ext32s_i64(vc
, tmp
);
1551 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1555 tcg_gen_add_i64(vc
, va
, vb
);
1559 tmp
= tcg_temp_new();
1560 tcg_gen_shli_i64(tmp
, va
, 2);
1561 tcg_gen_add_i64(vc
, tmp
, vb
);
1565 tcg_gen_sub_i64(vc
, va
, vb
);
1569 tmp
= tcg_temp_new();
1570 tcg_gen_shli_i64(tmp
, va
, 2);
1571 tcg_gen_sub_i64(vc
, tmp
, vb
);
1575 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1579 tmp
= tcg_temp_new();
1580 tcg_gen_shli_i64(tmp
, va
, 3);
1581 tcg_gen_add_i64(vc
, tmp
, vb
);
1585 tmp
= tcg_temp_new();
1586 tcg_gen_shli_i64(tmp
, va
, 3);
1587 tcg_gen_sub_i64(vc
, tmp
, vb
);
1591 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1595 tmp
= tcg_temp_new();
1596 tcg_gen_ext32s_i64(tmp
, va
);
1597 tcg_gen_ext32s_i64(vc
, vb
);
1598 tcg_gen_add_i64(tmp
, tmp
, vc
);
1599 tcg_gen_ext32s_i64(vc
, tmp
);
1600 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1604 tmp
= tcg_temp_new();
1605 tcg_gen_ext32s_i64(tmp
, va
);
1606 tcg_gen_ext32s_i64(vc
, vb
);
1607 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1608 tcg_gen_ext32s_i64(vc
, tmp
);
1609 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1613 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1617 tmp
= tcg_temp_new();
1618 tmp2
= tcg_temp_new();
1619 tcg_gen_eqv_i64(tmp
, va
, vb
);
1620 tcg_gen_mov_i64(tmp2
, va
);
1621 tcg_gen_add_i64(vc
, va
, vb
);
1622 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1623 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1624 tcg_gen_shri_i64(tmp
, tmp
, 63);
1625 tcg_gen_movi_i64(tmp2
, 0);
1626 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1630 tmp
= tcg_temp_new();
1631 tmp2
= tcg_temp_new();
1632 tcg_gen_xor_i64(tmp
, va
, vb
);
1633 tcg_gen_mov_i64(tmp2
, va
);
1634 tcg_gen_sub_i64(vc
, va
, vb
);
1635 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1636 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1637 tcg_gen_shri_i64(tmp
, tmp
, 63);
1638 tcg_gen_movi_i64(tmp2
, 0);
1639 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1643 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1653 /* Special case BIS as NOP. */
1657 /* Special case BIS as MOV. */
1658 vc
= dest_gpr(ctx
, rc
);
1660 tcg_gen_movi_i64(vc
, lit
);
1662 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1668 vc
= dest_gpr(ctx
, rc
);
1669 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1671 if (fn7
== 0x28 && ra
== 31) {
1672 /* Special case ORNOT as NOT. */
1673 tcg_gen_not_i64(vc
, vb
);
1677 va
= load_gpr(ctx
, ra
);
1681 tcg_gen_and_i64(vc
, va
, vb
);
1685 tcg_gen_andc_i64(vc
, va
, vb
);
1689 tmp
= tcg_temp_new();
1690 tcg_gen_andi_i64(tmp
, va
, 1);
1691 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1692 vb
, load_gpr(ctx
, rc
));
1696 tmp
= tcg_temp_new();
1697 tcg_gen_andi_i64(tmp
, va
, 1);
1698 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1699 vb
, load_gpr(ctx
, rc
));
1703 tcg_gen_or_i64(vc
, va
, vb
);
1707 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1708 vb
, load_gpr(ctx
, rc
));
1712 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1713 vb
, load_gpr(ctx
, rc
));
1717 tcg_gen_orc_i64(vc
, va
, vb
);
1721 tcg_gen_xor_i64(vc
, va
, vb
);
1725 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1726 vb
, load_gpr(ctx
, rc
));
1730 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1731 vb
, load_gpr(ctx
, rc
));
1735 tcg_gen_eqv_i64(vc
, va
, vb
);
1740 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1744 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1745 vb
, load_gpr(ctx
, rc
));
1749 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1750 vb
, load_gpr(ctx
, rc
));
1755 tcg_gen_movi_i64(vc
, ctx
->implver
);
1763 vc
= dest_gpr(ctx
, rc
);
1764 va
= load_gpr(ctx
, ra
);
1768 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1772 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1776 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1780 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1784 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1788 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1792 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1796 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1800 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1805 gen_zapnoti(vc
, va
, ~lit
);
1807 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1813 gen_zapnoti(vc
, va
, lit
);
1815 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1820 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1825 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1827 tmp
= tcg_temp_new();
1828 vb
= load_gpr(ctx
, rb
);
1829 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1830 tcg_gen_shr_i64(vc
, va
, tmp
);
1835 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1840 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1842 tmp
= tcg_temp_new();
1843 vb
= load_gpr(ctx
, rb
);
1844 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1845 tcg_gen_shl_i64(vc
, va
, tmp
);
1850 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1855 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1857 tmp
= tcg_temp_new();
1858 vb
= load_gpr(ctx
, rb
);
1859 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1860 tcg_gen_sar_i64(vc
, va
, tmp
);
1865 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1869 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1873 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1877 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1881 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1885 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1889 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1893 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1897 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1905 vc
= dest_gpr(ctx
, rc
);
1906 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1907 va
= load_gpr(ctx
, ra
);
1911 tcg_gen_mul_i64(vc
, va
, vb
);
1912 tcg_gen_ext32s_i64(vc
, vc
);
1916 tcg_gen_mul_i64(vc
, va
, vb
);
1920 tmp
= tcg_temp_new();
1921 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1925 tmp
= tcg_temp_new();
1926 tcg_gen_ext32s_i64(tmp
, va
);
1927 tcg_gen_ext32s_i64(vc
, vb
);
1928 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1929 tcg_gen_ext32s_i64(vc
, tmp
);
1930 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1934 tmp
= tcg_temp_new();
1935 tmp2
= tcg_temp_new();
1936 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1937 tcg_gen_sari_i64(tmp2
, vc
, 63);
1938 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1947 vc
= dest_fpr(ctx
, rc
);
1948 switch (fpfn
) { /* fn11 & 0x3F */
1953 t32
= tcg_temp_new_i32();
1954 va
= load_gpr(ctx
, ra
);
1955 tcg_gen_extrl_i64_i32(t32
, va
);
1956 gen_helper_memory_to_s(vc
, t32
);
1962 vb
= load_fpr(ctx
, rb
);
1963 gen_helper_sqrtf(vc
, cpu_env
, vb
);
1969 gen_sqrts(ctx
, rb
, rc
, fn11
);
1975 t32
= tcg_temp_new_i32();
1976 va
= load_gpr(ctx
, ra
);
1977 tcg_gen_extrl_i64_i32(t32
, va
);
1978 gen_helper_memory_to_f(vc
, t32
);
1984 va
= load_gpr(ctx
, ra
);
1985 tcg_gen_mov_i64(vc
, va
);
1991 vb
= load_fpr(ctx
, rb
);
1992 gen_helper_sqrtg(vc
, cpu_env
, vb
);
1998 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2006 /* VAX floating point */
2007 /* XXX: rounding mode and trap are ignored (!) */
2008 vc
= dest_fpr(ctx
, rc
);
2009 vb
= load_fpr(ctx
, rb
);
2010 va
= load_fpr(ctx
, ra
);
2011 switch (fpfn
) { /* fn11 & 0x3F */
2015 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2020 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2025 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2030 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2039 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2044 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2049 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2054 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2059 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2064 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2069 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2075 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2085 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2091 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2097 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2105 /* IEEE floating-point */
2106 switch (fpfn
) { /* fn11 & 0x3F */
2110 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2115 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2120 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2125 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2130 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2135 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2140 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2145 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2150 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2155 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2160 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2165 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2170 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2172 gen_cvtst(ctx
, rb
, rc
, fn11
);
2175 gen_cvtts(ctx
, rb
, rc
, fn11
);
2182 gen_cvttq(ctx
, rb
, rc
, fn11
);
2188 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2194 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2207 vc
= dest_fpr(ctx
, rc
);
2208 vb
= load_fpr(ctx
, rb
);
2215 /* Special case CPYS as FNOP. */
2217 vc
= dest_fpr(ctx
, rc
);
2218 va
= load_fpr(ctx
, ra
);
2220 /* Special case CPYS as FMOV. */
2221 tcg_gen_mov_i64(vc
, va
);
2223 vb
= load_fpr(ctx
, rb
);
2224 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2231 vc
= dest_fpr(ctx
, rc
);
2232 vb
= load_fpr(ctx
, rb
);
2233 va
= load_fpr(ctx
, ra
);
2234 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2239 vc
= dest_fpr(ctx
, rc
);
2240 vb
= load_fpr(ctx
, rb
);
2241 va
= load_fpr(ctx
, ra
);
2242 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2247 va
= load_fpr(ctx
, ra
);
2248 gen_helper_store_fpcr(cpu_env
, va
);
2249 if (ctx
->tb_rm
== QUAL_RM_D
) {
2250 /* Re-do the copy of the rounding mode to fp_status
2251 the next time we use dynamic rounding. */
2258 va
= dest_fpr(ctx
, ra
);
2259 gen_helper_load_fpcr(va
, cpu_env
);
2264 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2269 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2274 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2279 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2284 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2289 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2291 case 0x030: /* CVTQL */
2292 case 0x130: /* CVTQL/V */
2293 case 0x530: /* CVTQL/SV */
2296 vc
= dest_fpr(ctx
, rc
);
2297 vb
= load_fpr(ctx
, rb
);
2298 gen_helper_cvtql(vc
, cpu_env
, vb
);
2299 gen_fp_exc_raise(rc
, fn11
);
2307 switch ((uint16_t)disp16
) {
2318 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2322 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2334 va
= dest_gpr(ctx
, ra
);
2335 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2337 gen_helper_load_pcc(va
, cpu_env
);
2338 ret
= DISAS_PC_STALE
;
2340 gen_helper_load_pcc(va
, cpu_env
);
2368 /* HW_MFPR (PALcode) */
2369 #ifndef CONFIG_USER_ONLY
2370 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2371 va
= dest_gpr(ctx
, ra
);
2372 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2379 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2380 prediction stack action, which of course we don't implement. */
2381 vb
= load_gpr(ctx
, rb
);
2382 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2384 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2386 ret
= DISAS_PC_UPDATED
;
2390 /* HW_LD (PALcode) */
2391 #ifndef CONFIG_USER_ONLY
2392 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2394 TCGv addr
= tcg_temp_new();
2395 vb
= load_gpr(ctx
, rb
);
2396 va
= dest_gpr(ctx
, ra
);
2398 tcg_gen_addi_i64(addr
, vb
, disp12
);
2399 switch ((insn
>> 12) & 0xF) {
2401 /* Longword physical access (hw_ldl/p) */
2402 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2405 /* Quadword physical access (hw_ldq/p) */
2406 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
);
2409 /* Longword physical access with lock (hw_ldl_l/p) */
2410 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2411 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2412 tcg_gen_mov_i64(cpu_lock_value
, va
);
2415 /* Quadword physical access with lock (hw_ldq_l/p) */
2416 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
);
2417 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2418 tcg_gen_mov_i64(cpu_lock_value
, va
);
2421 /* Longword virtual PTE fetch (hw_ldl/v) */
2424 /* Quadword virtual PTE fetch (hw_ldq/v) */
2434 /* Longword virtual access (hw_ldl) */
2437 /* Quadword virtual access (hw_ldq) */
2440 /* Longword virtual access with protection check (hw_ldl/w) */
2441 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2444 /* Quadword virtual access with protection check (hw_ldq/w) */
2445 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEUQ
);
2448 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2451 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2454 /* Longword virtual access with alternate access mode and
2455 protection checks (hw_ldl/wa) */
2456 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2459 /* Quadword virtual access with alternate access mode and
2460 protection checks (hw_ldq/wa) */
2461 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEUQ
);
2471 vc
= dest_gpr(ctx
, rc
);
2476 va
= load_fpr(ctx
, ra
);
2477 tcg_gen_mov_i64(vc
, va
);
2479 } else if (fn7
== 0x78) {
2483 t32
= tcg_temp_new_i32();
2484 va
= load_fpr(ctx
, ra
);
2485 gen_helper_s_to_memory(t32
, va
);
2486 tcg_gen_ext_i32_i64(vc
, t32
);
2490 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2496 tcg_gen_ext8s_i64(vc
, vb
);
2502 tcg_gen_ext16s_i64(vc
, vb
);
2509 tcg_gen_ctpop_i64(vc
, vb
);
2515 va
= load_gpr(ctx
, ra
);
2516 gen_helper_perr(vc
, va
, vb
);
2523 tcg_gen_clzi_i64(vc
, vb
, 64);
2530 tcg_gen_ctzi_i64(vc
, vb
, 64);
2537 gen_helper_unpkbw(vc
, vb
);
2544 gen_helper_unpkbl(vc
, vb
);
2551 gen_helper_pkwb(vc
, vb
);
2558 gen_helper_pklb(vc
, vb
);
2563 va
= load_gpr(ctx
, ra
);
2564 gen_helper_minsb8(vc
, va
, vb
);
2569 va
= load_gpr(ctx
, ra
);
2570 gen_helper_minsw4(vc
, va
, vb
);
2575 va
= load_gpr(ctx
, ra
);
2576 gen_helper_minub8(vc
, va
, vb
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_minuw4(vc
, va
, vb
);
2587 va
= load_gpr(ctx
, ra
);
2588 gen_helper_maxub8(vc
, va
, vb
);
2593 va
= load_gpr(ctx
, ra
);
2594 gen_helper_maxuw4(vc
, va
, vb
);
2599 va
= load_gpr(ctx
, ra
);
2600 gen_helper_maxsb8(vc
, va
, vb
);
2605 va
= load_gpr(ctx
, ra
);
2606 gen_helper_maxsw4(vc
, va
, vb
);
2614 /* HW_MTPR (PALcode) */
2615 #ifndef CONFIG_USER_ONLY
2616 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2617 vb
= load_gpr(ctx
, rb
);
2618 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2625 /* HW_RET (PALcode) */
2626 #ifndef CONFIG_USER_ONLY
2627 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2629 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2630 address from EXC_ADDR. This turns out to be useful for our
2631 emulation PALcode, so continue to accept it. */
2632 vb
= dest_sink(ctx
);
2633 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2635 vb
= load_gpr(ctx
, rb
);
2637 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2638 st_flag_byte(load_zero(ctx
), ENV_FLAG_RX_SHIFT
);
2639 tmp
= tcg_temp_new();
2640 tcg_gen_andi_i64(tmp
, vb
, 1);
2641 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2642 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2643 /* Allow interrupts to be recognized right away. */
2644 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2651 /* HW_ST (PALcode) */
2652 #ifndef CONFIG_USER_ONLY
2653 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2655 switch ((insn
>> 12) & 0xF) {
2657 /* Longword physical access */
2658 va
= load_gpr(ctx
, ra
);
2659 vb
= load_gpr(ctx
, rb
);
2660 tmp
= tcg_temp_new();
2661 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2662 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2665 /* Quadword physical access */
2666 va
= load_gpr(ctx
, ra
);
2667 vb
= load_gpr(ctx
, rb
);
2668 tmp
= tcg_temp_new();
2669 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2670 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEUQ
);
2673 /* Longword physical access with lock */
2674 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2675 MMU_PHYS_IDX
, MO_LESL
);
2678 /* Quadword physical access with lock */
2679 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2680 MMU_PHYS_IDX
, MO_LEUQ
);
2683 /* Longword virtual access */
2686 /* Quadword virtual access */
2707 /* Longword virtual access with alternate access mode */
2710 /* Quadword virtual access with alternate access mode */
2727 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldf
);
2732 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldg
);
2737 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_lds
);
2742 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldt
);
2747 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stf
);
2752 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stg
);
2757 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_sts
);
2762 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stt
);
2766 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 0);
2770 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0, 0);
2774 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 1);
2778 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0, 1);
2782 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUL
, 0);
2786 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0);
2790 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2791 ctx
->mem_idx
, MO_LESL
);
2795 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2796 ctx
->mem_idx
, MO_LEUQ
);
2800 ret
= gen_bdirect(ctx
, ra
, disp21
);
2802 case 0x31: /* FBEQ */
2804 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2806 case 0x32: /* FBLT */
2808 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2810 case 0x33: /* FBLE */
2812 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2816 ret
= gen_bdirect(ctx
, ra
, disp21
);
2818 case 0x35: /* FBNE */
2820 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2822 case 0x36: /* FBGE */
2824 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2826 case 0x37: /* FBGT */
2828 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2832 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2836 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2840 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2844 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2848 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2852 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2856 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2860 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2863 ret
= gen_invalid(ctx
);
2866 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2873 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2875 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2876 CPUAlphaState
*env
= cpu
->env_ptr
;
2879 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2880 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2881 ctx
->implver
= env
->implver
;
2882 ctx
->amask
= env
->amask
;
2884 #ifdef CONFIG_USER_ONLY
2885 ctx
->ir
= cpu_std_ir
;
2886 ctx
->unalign
= (ctx
->tbflags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
2888 ctx
->palbr
= env
->palbr
;
2889 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2892 /* ??? Every TB begins with unset rounding mode, to be initialized on
2893 the first fp insn of the TB. Alternately we could define a proper
2894 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2895 to reset the FP_STATUS to that default at the end of any TB that
2896 changes the default. We could even (gasp) dynamiclly figure out
2897 what default would be most efficient given the running program. */
2899 /* Similarly for flush-to-zero. */
2905 /* Bound the number of insns to execute to those left on the page. */
2906 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
2907 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2910 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2914 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2916 tcg_gen_insn_start(dcbase
->pc_next
);
2919 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2921 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2922 CPUAlphaState
*env
= cpu
->env_ptr
;
2923 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
2925 ctx
->base
.pc_next
+= 4;
2926 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2928 free_context_temps(ctx
);
2931 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
2933 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2935 switch (ctx
->base
.is_jmp
) {
2936 case DISAS_NORETURN
:
2938 case DISAS_TOO_MANY
:
2939 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
2941 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2942 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
2945 case DISAS_PC_STALE
:
2946 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2948 case DISAS_PC_UPDATED
:
2949 tcg_gen_lookup_and_goto_ptr();
2951 case DISAS_PC_UPDATED_NOCHAIN
:
2952 tcg_gen_exit_tb(NULL
, 0);
2955 g_assert_not_reached();
2959 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
,
2960 CPUState
*cpu
, FILE *logfile
)
2962 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
2963 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
2966 static const TranslatorOps alpha_tr_ops
= {
2967 .init_disas_context
= alpha_tr_init_disas_context
,
2968 .tb_start
= alpha_tr_tb_start
,
2969 .insn_start
= alpha_tr_insn_start
,
2970 .translate_insn
= alpha_tr_translate_insn
,
2971 .tb_stop
= alpha_tr_tb_stop
,
2972 .disas_log
= alpha_tr_disas_log
,
2975 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
2976 target_ulong pc
, void *host_pc
)
2979 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &alpha_tr_ops
, &dc
.base
);