4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/translator.h"
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
44 #define DISAS_EXIT DISAS_TARGET_0
46 /* global register indexes */
47 static TCGv_ptr cpu_regwptr
;
48 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
49 static TCGv_i32 cpu_cc_op
;
50 static TCGv_i32 cpu_psr
;
51 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
52 static TCGv cpu_regs
[32];
54 #ifndef CONFIG_USER_ONLY
59 static TCGv_i32 cpu_xcc
, cpu_fprs
;
61 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
62 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
66 /* Floating point registers */
67 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
69 #include "exec/gen-icount.h"
71 typedef struct DisasContext
{
72 DisasContextBase base
;
73 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
74 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
75 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
78 bool address_mask_32bit
;
79 #ifndef CONFIG_USER_ONLY
86 uint32_t cc_op
; /* current CC operation */
105 // This function uses non-native bit order
106 #define GET_FIELD(X, FROM, TO) \
107 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
109 // This function uses the order in the manuals, i.e. bit 0 is 2^0
110 #define GET_FIELD_SP(X, FROM, TO) \
111 GET_FIELD(X, 31 - (TO), 31 - (FROM))
113 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
114 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
116 #ifdef TARGET_SPARC64
117 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
118 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
120 #define DFPREG(r) (r & 0x1e)
121 #define QFPREG(r) (r & 0x1c)
124 #define UA2005_HTRAP_MASK 0xff
125 #define V8_TRAP_MASK 0x7f
127 static int sign_extend(int x
, int len
)
130 return (x
<< len
) >> len
;
133 #define IS_IMM (insn & (1<<13))
135 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
138 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
139 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
143 static inline TCGv
get_temp_tl(DisasContext
*dc
)
146 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
147 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
151 static inline void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
153 #if defined(TARGET_SPARC64)
154 int bit
= (rd
< 32) ? 1 : 2;
155 /* If we know we've already set this bit within the TB,
156 we can avoid setting it again. */
157 if (!(dc
->fprs_dirty
& bit
)) {
158 dc
->fprs_dirty
|= bit
;
159 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
164 /* floating point registers moves */
165 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
167 #if TCG_TARGET_REG_BITS == 32
169 return TCGV_LOW(cpu_fpr
[src
/ 2]);
171 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
174 TCGv_i32 ret
= get_temp_i32(dc
);
176 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
178 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
184 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
186 #if TCG_TARGET_REG_BITS == 32
188 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
190 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
193 TCGv_i64 t
= (TCGv_i64
)v
;
194 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
195 (dst
& 1 ? 0 : 32), 32);
197 gen_update_fprs_dirty(dc
, dst
);
200 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
202 return get_temp_i32(dc
);
205 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
208 return cpu_fpr
[src
/ 2];
211 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
214 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
215 gen_update_fprs_dirty(dc
, dst
);
218 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
220 return cpu_fpr
[DFPREG(dst
) / 2];
223 static void gen_op_load_fpr_QT0(unsigned int src
)
225 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
226 offsetof(CPU_QuadU
, ll
.upper
));
227 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
228 offsetof(CPU_QuadU
, ll
.lower
));
231 static void gen_op_load_fpr_QT1(unsigned int src
)
233 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
234 offsetof(CPU_QuadU
, ll
.upper
));
235 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
236 offsetof(CPU_QuadU
, ll
.lower
));
239 static void gen_op_store_QT0_fpr(unsigned int dst
)
241 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
242 offsetof(CPU_QuadU
, ll
.upper
));
243 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
244 offsetof(CPU_QuadU
, ll
.lower
));
247 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
248 TCGv_i64 v1
, TCGv_i64 v2
)
252 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
253 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
254 gen_update_fprs_dirty(dc
, dst
);
257 #ifdef TARGET_SPARC64
258 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
261 return cpu_fpr
[src
/ 2];
264 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
267 return cpu_fpr
[src
/ 2 + 1];
270 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
275 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
276 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
277 gen_update_fprs_dirty(dc
, rd
);
282 #ifdef CONFIG_USER_ONLY
283 #define supervisor(dc) 0
284 #ifdef TARGET_SPARC64
285 #define hypervisor(dc) 0
288 #ifdef TARGET_SPARC64
289 #define hypervisor(dc) (dc->hypervisor)
290 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
292 #define supervisor(dc) (dc->supervisor)
296 #ifdef TARGET_SPARC64
298 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
300 #define AM_CHECK(dc) (1)
304 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
306 #ifdef TARGET_SPARC64
308 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
312 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
316 return cpu_regs
[reg
];
318 TCGv t
= get_temp_tl(dc
);
319 tcg_gen_movi_tl(t
, 0);
324 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
328 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
332 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
336 return cpu_regs
[reg
];
338 return get_temp_tl(dc
);
342 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
,
345 if (unlikely(s
->base
.singlestep_enabled
|| singlestep
)) {
349 #ifndef CONFIG_USER_ONLY
350 return (pc
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) &&
351 (npc
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
);
357 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
358 target_ulong pc
, target_ulong npc
)
360 if (use_goto_tb(s
, pc
, npc
)) {
361 /* jump to same page: we can use a direct jump */
362 tcg_gen_goto_tb(tb_num
);
363 tcg_gen_movi_tl(cpu_pc
, pc
);
364 tcg_gen_movi_tl(cpu_npc
, npc
);
365 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
367 /* jump to another page: currently not optimized */
368 tcg_gen_movi_tl(cpu_pc
, pc
);
369 tcg_gen_movi_tl(cpu_npc
, npc
);
370 tcg_gen_exit_tb(NULL
, 0);
375 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
377 tcg_gen_extu_i32_tl(reg
, src
);
378 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
381 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
383 tcg_gen_extu_i32_tl(reg
, src
);
384 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
387 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
389 tcg_gen_extu_i32_tl(reg
, src
);
390 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
393 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
395 tcg_gen_extu_i32_tl(reg
, src
);
396 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
399 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
401 tcg_gen_mov_tl(cpu_cc_src
, src1
);
402 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
403 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
404 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
407 static TCGv_i32
gen_add32_carry32(void)
409 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
411 /* Carry is computed from a previous add: (dst < src) */
412 #if TARGET_LONG_BITS == 64
413 cc_src1_32
= tcg_temp_new_i32();
414 cc_src2_32
= tcg_temp_new_i32();
415 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
416 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
418 cc_src1_32
= cpu_cc_dst
;
419 cc_src2_32
= cpu_cc_src
;
422 carry_32
= tcg_temp_new_i32();
423 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
425 #if TARGET_LONG_BITS == 64
426 tcg_temp_free_i32(cc_src1_32
);
427 tcg_temp_free_i32(cc_src2_32
);
433 static TCGv_i32
gen_sub32_carry32(void)
435 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
437 /* Carry is computed from a previous borrow: (src1 < src2) */
438 #if TARGET_LONG_BITS == 64
439 cc_src1_32
= tcg_temp_new_i32();
440 cc_src2_32
= tcg_temp_new_i32();
441 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
442 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
444 cc_src1_32
= cpu_cc_src
;
445 cc_src2_32
= cpu_cc_src2
;
448 carry_32
= tcg_temp_new_i32();
449 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
451 #if TARGET_LONG_BITS == 64
452 tcg_temp_free_i32(cc_src1_32
);
453 tcg_temp_free_i32(cc_src2_32
);
459 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
460 TCGv src2
, int update_cc
)
468 /* Carry is known to be zero. Fall back to plain ADD. */
470 gen_op_add_cc(dst
, src1
, src2
);
472 tcg_gen_add_tl(dst
, src1
, src2
);
479 if (TARGET_LONG_BITS
== 32) {
480 /* We can re-use the host's hardware carry generation by using
481 an ADD2 opcode. We discard the low part of the output.
482 Ideally we'd combine this operation with the add that
483 generated the carry in the first place. */
484 carry
= tcg_temp_new();
485 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
486 tcg_temp_free(carry
);
489 carry_32
= gen_add32_carry32();
495 carry_32
= gen_sub32_carry32();
499 /* We need external help to produce the carry. */
500 carry_32
= tcg_temp_new_i32();
501 gen_helper_compute_C_icc(carry_32
, cpu_env
);
505 #if TARGET_LONG_BITS == 64
506 carry
= tcg_temp_new();
507 tcg_gen_extu_i32_i64(carry
, carry_32
);
512 tcg_gen_add_tl(dst
, src1
, src2
);
513 tcg_gen_add_tl(dst
, dst
, carry
);
515 tcg_temp_free_i32(carry_32
);
516 #if TARGET_LONG_BITS == 64
517 tcg_temp_free(carry
);
522 tcg_gen_mov_tl(cpu_cc_src
, src1
);
523 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
524 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
525 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
526 dc
->cc_op
= CC_OP_ADDX
;
530 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
532 tcg_gen_mov_tl(cpu_cc_src
, src1
);
533 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
534 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
535 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
538 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
539 TCGv src2
, int update_cc
)
547 /* Carry is known to be zero. Fall back to plain SUB. */
549 gen_op_sub_cc(dst
, src1
, src2
);
551 tcg_gen_sub_tl(dst
, src1
, src2
);
558 carry_32
= gen_add32_carry32();
564 if (TARGET_LONG_BITS
== 32) {
565 /* We can re-use the host's hardware carry generation by using
566 a SUB2 opcode. We discard the low part of the output.
567 Ideally we'd combine this operation with the add that
568 generated the carry in the first place. */
569 carry
= tcg_temp_new();
570 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
571 tcg_temp_free(carry
);
574 carry_32
= gen_sub32_carry32();
578 /* We need external help to produce the carry. */
579 carry_32
= tcg_temp_new_i32();
580 gen_helper_compute_C_icc(carry_32
, cpu_env
);
584 #if TARGET_LONG_BITS == 64
585 carry
= tcg_temp_new();
586 tcg_gen_extu_i32_i64(carry
, carry_32
);
591 tcg_gen_sub_tl(dst
, src1
, src2
);
592 tcg_gen_sub_tl(dst
, dst
, carry
);
594 tcg_temp_free_i32(carry_32
);
595 #if TARGET_LONG_BITS == 64
596 tcg_temp_free(carry
);
601 tcg_gen_mov_tl(cpu_cc_src
, src1
);
602 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
603 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
604 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
605 dc
->cc_op
= CC_OP_SUBX
;
609 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
611 TCGv r_temp
, zero
, t0
;
613 r_temp
= tcg_temp_new();
620 zero
= tcg_const_tl(0);
621 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
622 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
623 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
624 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
629 // env->y = (b2 << 31) | (env->y >> 1);
630 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
631 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
634 gen_mov_reg_N(t0
, cpu_psr
);
635 gen_mov_reg_V(r_temp
, cpu_psr
);
636 tcg_gen_xor_tl(t0
, t0
, r_temp
);
637 tcg_temp_free(r_temp
);
639 // T0 = (b1 << 31) | (T0 >> 1);
641 tcg_gen_shli_tl(t0
, t0
, 31);
642 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
643 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
646 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
648 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
651 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
653 #if TARGET_LONG_BITS == 32
655 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
657 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
660 TCGv t0
= tcg_temp_new_i64();
661 TCGv t1
= tcg_temp_new_i64();
664 tcg_gen_ext32s_i64(t0
, src1
);
665 tcg_gen_ext32s_i64(t1
, src2
);
667 tcg_gen_ext32u_i64(t0
, src1
);
668 tcg_gen_ext32u_i64(t1
, src2
);
671 tcg_gen_mul_i64(dst
, t0
, t1
);
675 tcg_gen_shri_i64(cpu_y
, dst
, 32);
679 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
681 /* zero-extend truncated operands before multiplication */
682 gen_op_multiply(dst
, src1
, src2
, 0);
685 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
687 /* sign-extend truncated operands before multiplication */
688 gen_op_multiply(dst
, src1
, src2
, 1);
692 static inline void gen_op_eval_ba(TCGv dst
)
694 tcg_gen_movi_tl(dst
, 1);
698 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
700 gen_mov_reg_Z(dst
, src
);
704 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
706 TCGv t0
= tcg_temp_new();
707 gen_mov_reg_N(t0
, src
);
708 gen_mov_reg_V(dst
, src
);
709 tcg_gen_xor_tl(dst
, dst
, t0
);
710 gen_mov_reg_Z(t0
, src
);
711 tcg_gen_or_tl(dst
, dst
, t0
);
716 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
718 TCGv t0
= tcg_temp_new();
719 gen_mov_reg_V(t0
, src
);
720 gen_mov_reg_N(dst
, src
);
721 tcg_gen_xor_tl(dst
, dst
, t0
);
726 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
728 TCGv t0
= tcg_temp_new();
729 gen_mov_reg_Z(t0
, src
);
730 gen_mov_reg_C(dst
, src
);
731 tcg_gen_or_tl(dst
, dst
, t0
);
736 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
738 gen_mov_reg_C(dst
, src
);
742 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
744 gen_mov_reg_V(dst
, src
);
748 static inline void gen_op_eval_bn(TCGv dst
)
750 tcg_gen_movi_tl(dst
, 0);
754 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
756 gen_mov_reg_N(dst
, src
);
760 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
762 gen_mov_reg_Z(dst
, src
);
763 tcg_gen_xori_tl(dst
, dst
, 0x1);
767 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
769 gen_op_eval_ble(dst
, src
);
770 tcg_gen_xori_tl(dst
, dst
, 0x1);
774 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
776 gen_op_eval_bl(dst
, src
);
777 tcg_gen_xori_tl(dst
, dst
, 0x1);
781 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
783 gen_op_eval_bleu(dst
, src
);
784 tcg_gen_xori_tl(dst
, dst
, 0x1);
788 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
790 gen_mov_reg_C(dst
, src
);
791 tcg_gen_xori_tl(dst
, dst
, 0x1);
795 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
797 gen_mov_reg_N(dst
, src
);
798 tcg_gen_xori_tl(dst
, dst
, 0x1);
802 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
804 gen_mov_reg_V(dst
, src
);
805 tcg_gen_xori_tl(dst
, dst
, 0x1);
809 FPSR bit field FCC1 | FCC0:
815 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
816 unsigned int fcc_offset
)
818 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
819 tcg_gen_andi_tl(reg
, reg
, 0x1);
822 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
823 unsigned int fcc_offset
)
825 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
826 tcg_gen_andi_tl(reg
, reg
, 0x1);
830 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
831 unsigned int fcc_offset
)
833 TCGv t0
= tcg_temp_new();
834 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
835 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
836 tcg_gen_or_tl(dst
, dst
, t0
);
840 // 1 or 2: FCC0 ^ FCC1
841 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
842 unsigned int fcc_offset
)
844 TCGv t0
= tcg_temp_new();
845 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
846 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
847 tcg_gen_xor_tl(dst
, dst
, t0
);
852 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
853 unsigned int fcc_offset
)
855 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
859 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
860 unsigned int fcc_offset
)
862 TCGv t0
= tcg_temp_new();
863 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
864 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
865 tcg_gen_andc_tl(dst
, dst
, t0
);
870 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
871 unsigned int fcc_offset
)
873 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
877 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
878 unsigned int fcc_offset
)
880 TCGv t0
= tcg_temp_new();
881 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
882 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
883 tcg_gen_andc_tl(dst
, t0
, dst
);
888 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
889 unsigned int fcc_offset
)
891 TCGv t0
= tcg_temp_new();
892 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
893 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
894 tcg_gen_and_tl(dst
, dst
, t0
);
899 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
900 unsigned int fcc_offset
)
902 TCGv t0
= tcg_temp_new();
903 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
904 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
905 tcg_gen_or_tl(dst
, dst
, t0
);
906 tcg_gen_xori_tl(dst
, dst
, 0x1);
910 // 0 or 3: !(FCC0 ^ FCC1)
911 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
912 unsigned int fcc_offset
)
914 TCGv t0
= tcg_temp_new();
915 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
916 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
917 tcg_gen_xor_tl(dst
, dst
, t0
);
918 tcg_gen_xori_tl(dst
, dst
, 0x1);
923 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
924 unsigned int fcc_offset
)
926 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
927 tcg_gen_xori_tl(dst
, dst
, 0x1);
930 // !1: !(FCC0 & !FCC1)
931 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
932 unsigned int fcc_offset
)
934 TCGv t0
= tcg_temp_new();
935 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
936 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
937 tcg_gen_andc_tl(dst
, dst
, t0
);
938 tcg_gen_xori_tl(dst
, dst
, 0x1);
943 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
944 unsigned int fcc_offset
)
946 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
947 tcg_gen_xori_tl(dst
, dst
, 0x1);
950 // !2: !(!FCC0 & FCC1)
951 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
952 unsigned int fcc_offset
)
954 TCGv t0
= tcg_temp_new();
955 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
956 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
957 tcg_gen_andc_tl(dst
, t0
, dst
);
958 tcg_gen_xori_tl(dst
, dst
, 0x1);
962 // !3: !(FCC0 & FCC1)
963 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
964 unsigned int fcc_offset
)
966 TCGv t0
= tcg_temp_new();
967 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
968 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
969 tcg_gen_and_tl(dst
, dst
, t0
);
970 tcg_gen_xori_tl(dst
, dst
, 0x1);
974 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
975 target_ulong pc2
, TCGv r_cond
)
977 TCGLabel
*l1
= gen_new_label();
979 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
981 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
984 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
987 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
989 TCGLabel
*l1
= gen_new_label();
990 target_ulong npc
= dc
->npc
;
992 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
994 gen_goto_tb(dc
, 0, npc
, pc1
);
997 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
999 dc
->base
.is_jmp
= DISAS_NORETURN
;
1002 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
1004 target_ulong npc
= dc
->npc
;
1006 if (likely(npc
!= DYNAMIC_PC
)) {
1008 dc
->jump_pc
[0] = pc1
;
1009 dc
->jump_pc
[1] = npc
+ 4;
1014 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1016 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1017 t
= tcg_const_tl(pc1
);
1018 z
= tcg_const_tl(0);
1019 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, z
, t
, cpu_npc
);
1023 dc
->pc
= DYNAMIC_PC
;
1027 static inline void gen_generic_branch(DisasContext
*dc
)
1029 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
1030 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1031 TCGv zero
= tcg_const_tl(0);
1033 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1035 tcg_temp_free(npc0
);
1036 tcg_temp_free(npc1
);
1037 tcg_temp_free(zero
);
1040 /* call this function before using the condition register as it may
1041 have been set for a jump */
1042 static inline void flush_cond(DisasContext
*dc
)
1044 if (dc
->npc
== JUMP_PC
) {
1045 gen_generic_branch(dc
);
1046 dc
->npc
= DYNAMIC_PC
;
1050 static inline void save_npc(DisasContext
*dc
)
1052 if (dc
->npc
== JUMP_PC
) {
1053 gen_generic_branch(dc
);
1054 dc
->npc
= DYNAMIC_PC
;
1055 } else if (dc
->npc
!= DYNAMIC_PC
) {
1056 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1060 static inline void update_psr(DisasContext
*dc
)
1062 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1063 dc
->cc_op
= CC_OP_FLAGS
;
1064 gen_helper_compute_psr(cpu_env
);
1068 static inline void save_state(DisasContext
*dc
)
1070 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1074 static void gen_exception(DisasContext
*dc
, int which
)
1079 t
= tcg_const_i32(which
);
1080 gen_helper_raise_exception(cpu_env
, t
);
1081 tcg_temp_free_i32(t
);
1082 dc
->base
.is_jmp
= DISAS_NORETURN
;
1085 static void gen_check_align(TCGv addr
, int mask
)
1087 TCGv_i32 r_mask
= tcg_const_i32(mask
);
1088 gen_helper_check_align(cpu_env
, addr
, r_mask
);
1089 tcg_temp_free_i32(r_mask
);
1092 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1094 if (dc
->npc
== JUMP_PC
) {
1095 gen_generic_branch(dc
);
1096 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1097 dc
->pc
= DYNAMIC_PC
;
1098 } else if (dc
->npc
== DYNAMIC_PC
) {
1099 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1100 dc
->pc
= DYNAMIC_PC
;
1106 static inline void gen_op_next_insn(void)
1108 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1109 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1112 static void free_compare(DisasCompare
*cmp
)
1115 tcg_temp_free(cmp
->c1
);
1118 tcg_temp_free(cmp
->c2
);
1122 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1125 static int subcc_cond
[16] = {
1141 -1, /* no overflow */
1144 static int logic_cond
[16] = {
1146 TCG_COND_EQ
, /* eq: Z */
1147 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1148 TCG_COND_LT
, /* lt: N ^ V -> N */
1149 TCG_COND_EQ
, /* leu: C | Z -> Z */
1150 TCG_COND_NEVER
, /* ltu: C -> 0 */
1151 TCG_COND_LT
, /* neg: N */
1152 TCG_COND_NEVER
, /* vs: V -> 0 */
1154 TCG_COND_NE
, /* ne: !Z */
1155 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1156 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1157 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1158 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1159 TCG_COND_GE
, /* pos: !N */
1160 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1166 #ifdef TARGET_SPARC64
1176 switch (dc
->cc_op
) {
1178 cmp
->cond
= logic_cond
[cond
];
1180 cmp
->is_bool
= false;
1182 cmp
->c2
= tcg_const_tl(0);
1183 #ifdef TARGET_SPARC64
1186 cmp
->c1
= tcg_temp_new();
1187 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1192 cmp
->c1
= cpu_cc_dst
;
1199 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1200 goto do_compare_dst_0
;
1202 case 7: /* overflow */
1203 case 15: /* !overflow */
1207 cmp
->cond
= subcc_cond
[cond
];
1208 cmp
->is_bool
= false;
1209 #ifdef TARGET_SPARC64
1211 /* Note that sign-extension works for unsigned compares as
1212 long as both operands are sign-extended. */
1213 cmp
->g1
= cmp
->g2
= false;
1214 cmp
->c1
= tcg_temp_new();
1215 cmp
->c2
= tcg_temp_new();
1216 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1217 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1221 cmp
->g1
= cmp
->g2
= true;
1222 cmp
->c1
= cpu_cc_src
;
1223 cmp
->c2
= cpu_cc_src2
;
1230 gen_helper_compute_psr(cpu_env
);
1231 dc
->cc_op
= CC_OP_FLAGS
;
1235 /* We're going to generate a boolean result. */
1236 cmp
->cond
= TCG_COND_NE
;
1237 cmp
->is_bool
= true;
1238 cmp
->g1
= cmp
->g2
= false;
1239 cmp
->c1
= r_dst
= tcg_temp_new();
1240 cmp
->c2
= tcg_const_tl(0);
1244 gen_op_eval_bn(r_dst
);
1247 gen_op_eval_be(r_dst
, r_src
);
1250 gen_op_eval_ble(r_dst
, r_src
);
1253 gen_op_eval_bl(r_dst
, r_src
);
1256 gen_op_eval_bleu(r_dst
, r_src
);
1259 gen_op_eval_bcs(r_dst
, r_src
);
1262 gen_op_eval_bneg(r_dst
, r_src
);
1265 gen_op_eval_bvs(r_dst
, r_src
);
1268 gen_op_eval_ba(r_dst
);
1271 gen_op_eval_bne(r_dst
, r_src
);
1274 gen_op_eval_bg(r_dst
, r_src
);
1277 gen_op_eval_bge(r_dst
, r_src
);
1280 gen_op_eval_bgu(r_dst
, r_src
);
1283 gen_op_eval_bcc(r_dst
, r_src
);
1286 gen_op_eval_bpos(r_dst
, r_src
);
1289 gen_op_eval_bvc(r_dst
, r_src
);
1296 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1298 unsigned int offset
;
1301 /* For now we still generate a straight boolean result. */
1302 cmp
->cond
= TCG_COND_NE
;
1303 cmp
->is_bool
= true;
1304 cmp
->g1
= cmp
->g2
= false;
1305 cmp
->c1
= r_dst
= tcg_temp_new();
1306 cmp
->c2
= tcg_const_tl(0);
1326 gen_op_eval_bn(r_dst
);
1329 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1332 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1335 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1338 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1341 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1344 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1347 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1350 gen_op_eval_ba(r_dst
);
1353 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1356 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1359 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1362 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1365 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1368 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1371 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1376 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1380 gen_compare(&cmp
, cc
, cond
, dc
);
1382 /* The interface is to return a boolean in r_dst. */
1384 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1386 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1392 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1395 gen_fcompare(&cmp
, cc
, cond
);
1397 /* The interface is to return a boolean in r_dst. */
1399 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1401 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1407 #ifdef TARGET_SPARC64
1409 static const int gen_tcg_cond_reg
[8] = {
1420 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1422 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1423 cmp
->is_bool
= false;
1427 cmp
->c2
= tcg_const_tl(0);
1430 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1433 gen_compare_reg(&cmp
, cond
, r_src
);
1435 /* The interface is to return a boolean in r_dst. */
1436 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1442 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1444 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1445 target_ulong target
= dc
->pc
+ offset
;
1447 #ifdef TARGET_SPARC64
1448 if (unlikely(AM_CHECK(dc
))) {
1449 target
&= 0xffffffffULL
;
1453 /* unconditional not taken */
1455 dc
->pc
= dc
->npc
+ 4;
1456 dc
->npc
= dc
->pc
+ 4;
1459 dc
->npc
= dc
->pc
+ 4;
1461 } else if (cond
== 0x8) {
1462 /* unconditional taken */
1465 dc
->npc
= dc
->pc
+ 4;
1469 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1473 gen_cond(cpu_cond
, cc
, cond
, dc
);
1475 gen_branch_a(dc
, target
);
1477 gen_branch_n(dc
, target
);
1482 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1484 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1485 target_ulong target
= dc
->pc
+ offset
;
1487 #ifdef TARGET_SPARC64
1488 if (unlikely(AM_CHECK(dc
))) {
1489 target
&= 0xffffffffULL
;
1493 /* unconditional not taken */
1495 dc
->pc
= dc
->npc
+ 4;
1496 dc
->npc
= dc
->pc
+ 4;
1499 dc
->npc
= dc
->pc
+ 4;
1501 } else if (cond
== 0x8) {
1502 /* unconditional taken */
1505 dc
->npc
= dc
->pc
+ 4;
1509 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1513 gen_fcond(cpu_cond
, cc
, cond
);
1515 gen_branch_a(dc
, target
);
1517 gen_branch_n(dc
, target
);
1522 #ifdef TARGET_SPARC64
1523 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1526 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1527 target_ulong target
= dc
->pc
+ offset
;
1529 if (unlikely(AM_CHECK(dc
))) {
1530 target
&= 0xffffffffULL
;
1533 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1535 gen_branch_a(dc
, target
);
1537 gen_branch_n(dc
, target
);
1541 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1545 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1548 gen_helper_fcmps_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1551 gen_helper_fcmps_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1554 gen_helper_fcmps_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1559 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1563 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1566 gen_helper_fcmpd_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1569 gen_helper_fcmpd_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1572 gen_helper_fcmpd_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1577 static inline void gen_op_fcmpq(int fccno
)
1581 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1584 gen_helper_fcmpq_fcc1(cpu_fsr
, cpu_env
);
1587 gen_helper_fcmpq_fcc2(cpu_fsr
, cpu_env
);
1590 gen_helper_fcmpq_fcc3(cpu_fsr
, cpu_env
);
1595 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1599 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1602 gen_helper_fcmpes_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1605 gen_helper_fcmpes_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1608 gen_helper_fcmpes_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1613 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1617 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1620 gen_helper_fcmped_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1623 gen_helper_fcmped_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1626 gen_helper_fcmped_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1631 static inline void gen_op_fcmpeq(int fccno
)
1635 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1638 gen_helper_fcmpeq_fcc1(cpu_fsr
, cpu_env
);
1641 gen_helper_fcmpeq_fcc2(cpu_fsr
, cpu_env
);
1644 gen_helper_fcmpeq_fcc3(cpu_fsr
, cpu_env
);
1651 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1653 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1656 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1658 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1661 static inline void gen_op_fcmpq(int fccno
)
1663 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1666 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1668 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1671 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1673 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1676 static inline void gen_op_fcmpeq(int fccno
)
1678 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1682 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1684 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1685 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1686 gen_exception(dc
, TT_FP_EXCP
);
1689 static int gen_trap_ifnofpu(DisasContext
*dc
)
1691 #if !defined(CONFIG_USER_ONLY)
1692 if (!dc
->fpu_enabled
) {
1693 gen_exception(dc
, TT_NFPU_INSN
);
1700 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1702 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1705 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1706 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1710 src
= gen_load_fpr_F(dc
, rs
);
1711 dst
= gen_dest_fpr_F(dc
);
1713 gen(dst
, cpu_env
, src
);
1714 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1716 gen_store_fpr_F(dc
, rd
, dst
);
1719 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1720 void (*gen
)(TCGv_i32
, TCGv_i32
))
1724 src
= gen_load_fpr_F(dc
, rs
);
1725 dst
= gen_dest_fpr_F(dc
);
1729 gen_store_fpr_F(dc
, rd
, dst
);
1732 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1733 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1735 TCGv_i32 dst
, src1
, src2
;
1737 src1
= gen_load_fpr_F(dc
, rs1
);
1738 src2
= gen_load_fpr_F(dc
, rs2
);
1739 dst
= gen_dest_fpr_F(dc
);
1741 gen(dst
, cpu_env
, src1
, src2
);
1742 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1744 gen_store_fpr_F(dc
, rd
, dst
);
1747 #ifdef TARGET_SPARC64
1748 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1749 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1751 TCGv_i32 dst
, src1
, src2
;
1753 src1
= gen_load_fpr_F(dc
, rs1
);
1754 src2
= gen_load_fpr_F(dc
, rs2
);
1755 dst
= gen_dest_fpr_F(dc
);
1757 gen(dst
, src1
, src2
);
1759 gen_store_fpr_F(dc
, rd
, dst
);
1763 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1764 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1768 src
= gen_load_fpr_D(dc
, rs
);
1769 dst
= gen_dest_fpr_D(dc
, rd
);
1771 gen(dst
, cpu_env
, src
);
1772 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1774 gen_store_fpr_D(dc
, rd
, dst
);
1777 #ifdef TARGET_SPARC64
1778 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1779 void (*gen
)(TCGv_i64
, TCGv_i64
))
1783 src
= gen_load_fpr_D(dc
, rs
);
1784 dst
= gen_dest_fpr_D(dc
, rd
);
1788 gen_store_fpr_D(dc
, rd
, dst
);
1792 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1793 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1795 TCGv_i64 dst
, src1
, src2
;
1797 src1
= gen_load_fpr_D(dc
, rs1
);
1798 src2
= gen_load_fpr_D(dc
, rs2
);
1799 dst
= gen_dest_fpr_D(dc
, rd
);
1801 gen(dst
, cpu_env
, src1
, src2
);
1802 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1804 gen_store_fpr_D(dc
, rd
, dst
);
1807 #ifdef TARGET_SPARC64
1808 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1809 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1811 TCGv_i64 dst
, src1
, src2
;
1813 src1
= gen_load_fpr_D(dc
, rs1
);
1814 src2
= gen_load_fpr_D(dc
, rs2
);
1815 dst
= gen_dest_fpr_D(dc
, rd
);
1817 gen(dst
, src1
, src2
);
1819 gen_store_fpr_D(dc
, rd
, dst
);
1822 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1823 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1825 TCGv_i64 dst
, src1
, src2
;
1827 src1
= gen_load_fpr_D(dc
, rs1
);
1828 src2
= gen_load_fpr_D(dc
, rs2
);
1829 dst
= gen_dest_fpr_D(dc
, rd
);
1831 gen(dst
, cpu_gsr
, src1
, src2
);
1833 gen_store_fpr_D(dc
, rd
, dst
);
1836 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1837 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1839 TCGv_i64 dst
, src0
, src1
, src2
;
1841 src1
= gen_load_fpr_D(dc
, rs1
);
1842 src2
= gen_load_fpr_D(dc
, rs2
);
1843 src0
= gen_load_fpr_D(dc
, rd
);
1844 dst
= gen_dest_fpr_D(dc
, rd
);
1846 gen(dst
, src0
, src1
, src2
);
1848 gen_store_fpr_D(dc
, rd
, dst
);
1852 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1853 void (*gen
)(TCGv_ptr
))
1855 gen_op_load_fpr_QT1(QFPREG(rs
));
1858 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1860 gen_op_store_QT0_fpr(QFPREG(rd
));
1861 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1864 #ifdef TARGET_SPARC64
1865 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1866 void (*gen
)(TCGv_ptr
))
1868 gen_op_load_fpr_QT1(QFPREG(rs
));
1872 gen_op_store_QT0_fpr(QFPREG(rd
));
1873 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1877 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1878 void (*gen
)(TCGv_ptr
))
1880 gen_op_load_fpr_QT0(QFPREG(rs1
));
1881 gen_op_load_fpr_QT1(QFPREG(rs2
));
1884 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1886 gen_op_store_QT0_fpr(QFPREG(rd
));
1887 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1890 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1891 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1894 TCGv_i32 src1
, src2
;
1896 src1
= gen_load_fpr_F(dc
, rs1
);
1897 src2
= gen_load_fpr_F(dc
, rs2
);
1898 dst
= gen_dest_fpr_D(dc
, rd
);
1900 gen(dst
, cpu_env
, src1
, src2
);
1901 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1903 gen_store_fpr_D(dc
, rd
, dst
);
1906 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1907 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1909 TCGv_i64 src1
, src2
;
1911 src1
= gen_load_fpr_D(dc
, rs1
);
1912 src2
= gen_load_fpr_D(dc
, rs2
);
1914 gen(cpu_env
, src1
, src2
);
1915 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1917 gen_op_store_QT0_fpr(QFPREG(rd
));
1918 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1921 #ifdef TARGET_SPARC64
1922 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1923 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1928 src
= gen_load_fpr_F(dc
, rs
);
1929 dst
= gen_dest_fpr_D(dc
, rd
);
1931 gen(dst
, cpu_env
, src
);
1932 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1934 gen_store_fpr_D(dc
, rd
, dst
);
1938 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1939 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1944 src
= gen_load_fpr_F(dc
, rs
);
1945 dst
= gen_dest_fpr_D(dc
, rd
);
1947 gen(dst
, cpu_env
, src
);
1949 gen_store_fpr_D(dc
, rd
, dst
);
1952 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1953 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1958 src
= gen_load_fpr_D(dc
, rs
);
1959 dst
= gen_dest_fpr_F(dc
);
1961 gen(dst
, cpu_env
, src
);
1962 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1964 gen_store_fpr_F(dc
, rd
, dst
);
1967 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1968 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1972 gen_op_load_fpr_QT1(QFPREG(rs
));
1973 dst
= gen_dest_fpr_F(dc
);
1976 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1978 gen_store_fpr_F(dc
, rd
, dst
);
1981 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1982 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1986 gen_op_load_fpr_QT1(QFPREG(rs
));
1987 dst
= gen_dest_fpr_D(dc
, rd
);
1990 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1992 gen_store_fpr_D(dc
, rd
, dst
);
1995 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1996 void (*gen
)(TCGv_ptr
, TCGv_i32
))
2000 src
= gen_load_fpr_F(dc
, rs
);
2004 gen_op_store_QT0_fpr(QFPREG(rd
));
2005 gen_update_fprs_dirty(dc
, QFPREG(rd
));
2008 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
2009 void (*gen
)(TCGv_ptr
, TCGv_i64
))
2013 src
= gen_load_fpr_D(dc
, rs
);
2017 gen_op_store_QT0_fpr(QFPREG(rd
));
2018 gen_update_fprs_dirty(dc
, QFPREG(rd
));
2021 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
2022 TCGv addr
, int mmu_idx
, MemOp memop
)
2024 gen_address_mask(dc
, addr
);
2025 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
);
2028 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
2030 TCGv m1
= tcg_const_tl(0xff);
2031 gen_address_mask(dc
, addr
);
2032 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
2037 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2056 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
2058 int asi
= GET_FIELD(insn
, 19, 26);
2059 ASIType type
= GET_ASI_HELPER
;
2060 int mem_idx
= dc
->mem_idx
;
2062 #ifndef TARGET_SPARC64
2063 /* Before v9, all asis are immediate and privileged. */
2065 gen_exception(dc
, TT_ILL_INSN
);
2066 type
= GET_ASI_EXCP
;
2067 } else if (supervisor(dc
)
2068 /* Note that LEON accepts ASI_USERDATA in user mode, for
2069 use with CASA. Also note that previous versions of
2070 QEMU allowed (and old versions of gcc emitted) ASI_P
2071 for LEON, which is incorrect. */
2072 || (asi
== ASI_USERDATA
2073 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
2075 case ASI_USERDATA
: /* User data access */
2076 mem_idx
= MMU_USER_IDX
;
2077 type
= GET_ASI_DIRECT
;
2079 case ASI_KERNELDATA
: /* Supervisor data access */
2080 mem_idx
= MMU_KERNEL_IDX
;
2081 type
= GET_ASI_DIRECT
;
2083 case ASI_M_BYPASS
: /* MMU passthrough */
2084 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
2085 mem_idx
= MMU_PHYS_IDX
;
2086 type
= GET_ASI_DIRECT
;
2088 case ASI_M_BCOPY
: /* Block copy, sta access */
2089 mem_idx
= MMU_KERNEL_IDX
;
2090 type
= GET_ASI_BCOPY
;
2092 case ASI_M_BFILL
: /* Block fill, stda access */
2093 mem_idx
= MMU_KERNEL_IDX
;
2094 type
= GET_ASI_BFILL
;
2098 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
2099 * permissions check in get_physical_address(..).
2101 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
2103 gen_exception(dc
, TT_PRIV_INSN
);
2104 type
= GET_ASI_EXCP
;
2110 /* With v9, all asis below 0x80 are privileged. */
2111 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2112 down that bit into DisasContext. For the moment that's ok,
2113 since the direct implementations below doesn't have any ASIs
2114 in the restricted [0x30, 0x7f] range, and the check will be
2115 done properly in the helper. */
2116 if (!supervisor(dc
) && asi
< 0x80) {
2117 gen_exception(dc
, TT_PRIV_ACT
);
2118 type
= GET_ASI_EXCP
;
2121 case ASI_REAL
: /* Bypass */
2122 case ASI_REAL_IO
: /* Bypass, non-cacheable */
2123 case ASI_REAL_L
: /* Bypass LE */
2124 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
2125 case ASI_TWINX_REAL
: /* Real address, twinx */
2126 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
2127 case ASI_QUAD_LDD_PHYS
:
2128 case ASI_QUAD_LDD_PHYS_L
:
2129 mem_idx
= MMU_PHYS_IDX
;
2131 case ASI_N
: /* Nucleus */
2132 case ASI_NL
: /* Nucleus LE */
2135 case ASI_NUCLEUS_QUAD_LDD
:
2136 case ASI_NUCLEUS_QUAD_LDD_L
:
2137 if (hypervisor(dc
)) {
2138 mem_idx
= MMU_PHYS_IDX
;
2140 mem_idx
= MMU_NUCLEUS_IDX
;
2143 case ASI_AIUP
: /* As if user primary */
2144 case ASI_AIUPL
: /* As if user primary LE */
2145 case ASI_TWINX_AIUP
:
2146 case ASI_TWINX_AIUP_L
:
2147 case ASI_BLK_AIUP_4V
:
2148 case ASI_BLK_AIUP_L_4V
:
2151 mem_idx
= MMU_USER_IDX
;
2153 case ASI_AIUS
: /* As if user secondary */
2154 case ASI_AIUSL
: /* As if user secondary LE */
2155 case ASI_TWINX_AIUS
:
2156 case ASI_TWINX_AIUS_L
:
2157 case ASI_BLK_AIUS_4V
:
2158 case ASI_BLK_AIUS_L_4V
:
2161 mem_idx
= MMU_USER_SECONDARY_IDX
;
2163 case ASI_S
: /* Secondary */
2164 case ASI_SL
: /* Secondary LE */
2167 case ASI_BLK_COMMIT_S
:
2174 if (mem_idx
== MMU_USER_IDX
) {
2175 mem_idx
= MMU_USER_SECONDARY_IDX
;
2176 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2177 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2180 case ASI_P
: /* Primary */
2181 case ASI_PL
: /* Primary LE */
2184 case ASI_BLK_COMMIT_P
:
2208 type
= GET_ASI_DIRECT
;
2210 case ASI_TWINX_REAL
:
2211 case ASI_TWINX_REAL_L
:
2214 case ASI_TWINX_AIUP
:
2215 case ASI_TWINX_AIUP_L
:
2216 case ASI_TWINX_AIUS
:
2217 case ASI_TWINX_AIUS_L
:
2222 case ASI_QUAD_LDD_PHYS
:
2223 case ASI_QUAD_LDD_PHYS_L
:
2224 case ASI_NUCLEUS_QUAD_LDD
:
2225 case ASI_NUCLEUS_QUAD_LDD_L
:
2226 type
= GET_ASI_DTWINX
;
2228 case ASI_BLK_COMMIT_P
:
2229 case ASI_BLK_COMMIT_S
:
2230 case ASI_BLK_AIUP_4V
:
2231 case ASI_BLK_AIUP_L_4V
:
2234 case ASI_BLK_AIUS_4V
:
2235 case ASI_BLK_AIUS_L_4V
:
2242 type
= GET_ASI_BLOCK
;
2249 type
= GET_ASI_SHORT
;
2256 type
= GET_ASI_SHORT
;
2259 /* The little-endian asis all have bit 3 set. */
2266 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2269 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2270 int insn
, MemOp memop
)
2272 DisasASI da
= get_asi(dc
, insn
, memop
);
2277 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2278 gen_exception(dc
, TT_ILL_INSN
);
2280 case GET_ASI_DIRECT
:
2281 gen_address_mask(dc
, addr
);
2282 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
);
2286 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2287 TCGv_i32 r_mop
= tcg_const_i32(memop
);
2290 #ifdef TARGET_SPARC64
2291 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_mop
);
2294 TCGv_i64 t64
= tcg_temp_new_i64();
2295 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2296 tcg_gen_trunc_i64_tl(dst
, t64
);
2297 tcg_temp_free_i64(t64
);
2300 tcg_temp_free_i32(r_mop
);
2301 tcg_temp_free_i32(r_asi
);
2307 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2308 int insn
, MemOp memop
)
2310 DisasASI da
= get_asi(dc
, insn
, memop
);
2315 case GET_ASI_DTWINX
: /* Reserved for stda. */
2316 #ifndef TARGET_SPARC64
2317 gen_exception(dc
, TT_ILL_INSN
);
2320 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2321 /* Pre OpenSPARC CPUs don't have these */
2322 gen_exception(dc
, TT_ILL_INSN
);
2325 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2326 * are ST_BLKINIT_ ASIs */
2329 case GET_ASI_DIRECT
:
2330 gen_address_mask(dc
, addr
);
2331 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
);
2333 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2335 /* Copy 32 bytes from the address in SRC to ADDR. */
2336 /* ??? The original qemu code suggests 4-byte alignment, dropping
2337 the low bits, but the only place I can see this used is in the
2338 Linux kernel with 32 byte alignment, which would make more sense
2339 as a cacheline-style operation. */
2341 TCGv saddr
= tcg_temp_new();
2342 TCGv daddr
= tcg_temp_new();
2343 TCGv four
= tcg_const_tl(4);
2344 TCGv_i32 tmp
= tcg_temp_new_i32();
2347 tcg_gen_andi_tl(saddr
, src
, -4);
2348 tcg_gen_andi_tl(daddr
, addr
, -4);
2349 for (i
= 0; i
< 32; i
+= 4) {
2350 /* Since the loads and stores are paired, allow the
2351 copy to happen in the host endianness. */
2352 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2353 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2354 tcg_gen_add_tl(saddr
, saddr
, four
);
2355 tcg_gen_add_tl(daddr
, daddr
, four
);
2358 tcg_temp_free(saddr
);
2359 tcg_temp_free(daddr
);
2360 tcg_temp_free(four
);
2361 tcg_temp_free_i32(tmp
);
2367 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2368 TCGv_i32 r_mop
= tcg_const_i32(memop
& MO_SIZE
);
2371 #ifdef TARGET_SPARC64
2372 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_mop
);
2375 TCGv_i64 t64
= tcg_temp_new_i64();
2376 tcg_gen_extu_tl_i64(t64
, src
);
2377 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2378 tcg_temp_free_i64(t64
);
2381 tcg_temp_free_i32(r_mop
);
2382 tcg_temp_free_i32(r_asi
);
2384 /* A write to a TLB register may alter page maps. End the TB. */
2385 dc
->npc
= DYNAMIC_PC
;
2391 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2392 TCGv addr
, int insn
)
2394 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2399 case GET_ASI_DIRECT
:
2400 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2403 /* ??? Should be DAE_invalid_asi. */
2404 gen_exception(dc
, TT_DATA_ACCESS
);
2409 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2412 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2418 case GET_ASI_DIRECT
:
2419 oldv
= tcg_temp_new();
2420 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2421 da
.mem_idx
, da
.memop
);
2422 gen_store_gpr(dc
, rd
, oldv
);
2423 tcg_temp_free(oldv
);
2426 /* ??? Should be DAE_invalid_asi. */
2427 gen_exception(dc
, TT_DATA_ACCESS
);
2432 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2434 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2439 case GET_ASI_DIRECT
:
2440 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2443 /* ??? In theory, this should be raise DAE_invalid_asi.
2444 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2445 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2446 gen_helper_exit_atomic(cpu_env
);
2448 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2449 TCGv_i32 r_mop
= tcg_const_i32(MO_UB
);
2453 t64
= tcg_temp_new_i64();
2454 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2456 s64
= tcg_const_i64(0xff);
2457 gen_helper_st_asi(cpu_env
, addr
, s64
, r_asi
, r_mop
);
2458 tcg_temp_free_i64(s64
);
2459 tcg_temp_free_i32(r_mop
);
2460 tcg_temp_free_i32(r_asi
);
2462 tcg_gen_trunc_i64_tl(dst
, t64
);
2463 tcg_temp_free_i64(t64
);
2466 dc
->npc
= DYNAMIC_PC
;
2473 #ifdef TARGET_SPARC64
2474 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2475 int insn
, int size
, int rd
)
2477 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEQ
));
2485 case GET_ASI_DIRECT
:
2486 gen_address_mask(dc
, addr
);
2489 d32
= gen_dest_fpr_F(dc
);
2490 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
);
2491 gen_store_fpr_F(dc
, rd
, d32
);
2494 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2495 da
.memop
| MO_ALIGN_4
);
2498 d64
= tcg_temp_new_i64();
2499 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2500 tcg_gen_addi_tl(addr
, addr
, 8);
2501 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2502 da
.memop
| MO_ALIGN_4
);
2503 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2504 tcg_temp_free_i64(d64
);
2507 g_assert_not_reached();
2512 /* Valid for lddfa on aligned registers only. */
2513 if (size
== 8 && (rd
& 7) == 0) {
2518 gen_address_mask(dc
, addr
);
2520 /* The first operation checks required alignment. */
2521 memop
= da
.memop
| MO_ALIGN_64
;
2522 eight
= tcg_const_tl(8);
2523 for (i
= 0; ; ++i
) {
2524 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2529 tcg_gen_add_tl(addr
, addr
, eight
);
2532 tcg_temp_free(eight
);
2534 gen_exception(dc
, TT_ILL_INSN
);
2539 /* Valid for lddfa only. */
2541 gen_address_mask(dc
, addr
);
2542 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
, da
.memop
);
2544 gen_exception(dc
, TT_ILL_INSN
);
2550 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2551 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2554 /* According to the table in the UA2011 manual, the only
2555 other asis that are valid for ldfa/lddfa/ldqfa are
2556 the NO_FAULT asis. We still need a helper for these,
2557 but we can just use the integer asi helper for them. */
2560 d64
= tcg_temp_new_i64();
2561 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2562 d32
= gen_dest_fpr_F(dc
);
2563 tcg_gen_extrl_i64_i32(d32
, d64
);
2564 tcg_temp_free_i64(d64
);
2565 gen_store_fpr_F(dc
, rd
, d32
);
2568 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], cpu_env
, addr
, r_asi
, r_mop
);
2571 d64
= tcg_temp_new_i64();
2572 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2573 tcg_gen_addi_tl(addr
, addr
, 8);
2574 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], cpu_env
, addr
, r_asi
, r_mop
);
2575 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2576 tcg_temp_free_i64(d64
);
2579 g_assert_not_reached();
2581 tcg_temp_free_i32(r_mop
);
2582 tcg_temp_free_i32(r_asi
);
2588 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2589 int insn
, int size
, int rd
)
2591 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEQ
));
2598 case GET_ASI_DIRECT
:
2599 gen_address_mask(dc
, addr
);
2602 d32
= gen_load_fpr_F(dc
, rd
);
2603 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
);
2606 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2607 da
.memop
| MO_ALIGN_4
);
2610 /* Only 4-byte alignment required. However, it is legal for the
2611 cpu to signal the alignment fault, and the OS trap handler is
2612 required to fix it up. Requiring 16-byte alignment here avoids
2613 having to probe the second page before performing the first
2615 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2616 da
.memop
| MO_ALIGN_16
);
2617 tcg_gen_addi_tl(addr
, addr
, 8);
2618 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2621 g_assert_not_reached();
2626 /* Valid for stdfa on aligned registers only. */
2627 if (size
== 8 && (rd
& 7) == 0) {
2632 gen_address_mask(dc
, addr
);
2634 /* The first operation checks required alignment. */
2635 memop
= da
.memop
| MO_ALIGN_64
;
2636 eight
= tcg_const_tl(8);
2637 for (i
= 0; ; ++i
) {
2638 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2643 tcg_gen_add_tl(addr
, addr
, eight
);
2646 tcg_temp_free(eight
);
2648 gen_exception(dc
, TT_ILL_INSN
);
2653 /* Valid for stdfa only. */
2655 gen_address_mask(dc
, addr
);
2656 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
, da
.memop
);
2658 gen_exception(dc
, TT_ILL_INSN
);
2663 /* According to the table in the UA2011 manual, the only
2664 other asis that are valid for ldfa/lddfa/ldqfa are
2665 the PST* asis, which aren't currently handled. */
2666 gen_exception(dc
, TT_ILL_INSN
);
2671 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2673 DisasASI da
= get_asi(dc
, insn
, MO_TEQ
);
2674 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2675 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2681 case GET_ASI_DTWINX
:
2682 gen_address_mask(dc
, addr
);
2683 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2684 tcg_gen_addi_tl(addr
, addr
, 8);
2685 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2688 case GET_ASI_DIRECT
:
2690 TCGv_i64 tmp
= tcg_temp_new_i64();
2692 gen_address_mask(dc
, addr
);
2693 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
);
2695 /* Note that LE ldda acts as if each 32-bit register
2696 result is byte swapped. Having just performed one
2697 64-bit bswap, we need now to swap the writebacks. */
2698 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2699 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2701 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2703 tcg_temp_free_i64(tmp
);
2708 /* ??? In theory we've handled all of the ASIs that are valid
2709 for ldda, and this should raise DAE_invalid_asi. However,
2710 real hardware allows others. This can be seen with e.g.
2711 FreeBSD 10.3 wrt ASI_IC_TAG. */
2713 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2714 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2715 TCGv_i64 tmp
= tcg_temp_new_i64();
2718 gen_helper_ld_asi(tmp
, cpu_env
, addr
, r_asi
, r_mop
);
2719 tcg_temp_free_i32(r_asi
);
2720 tcg_temp_free_i32(r_mop
);
2723 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2724 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2726 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2728 tcg_temp_free_i64(tmp
);
2733 gen_store_gpr(dc
, rd
, hi
);
2734 gen_store_gpr(dc
, rd
+ 1, lo
);
2737 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2740 DisasASI da
= get_asi(dc
, insn
, MO_TEQ
);
2741 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2747 case GET_ASI_DTWINX
:
2748 gen_address_mask(dc
, addr
);
2749 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2750 tcg_gen_addi_tl(addr
, addr
, 8);
2751 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2754 case GET_ASI_DIRECT
:
2756 TCGv_i64 t64
= tcg_temp_new_i64();
2758 /* Note that LE stda acts as if each 32-bit register result is
2759 byte swapped. We will perform one 64-bit LE store, so now
2760 we must swap the order of the construction. */
2761 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2762 tcg_gen_concat32_i64(t64
, lo
, hi
);
2764 tcg_gen_concat32_i64(t64
, hi
, lo
);
2766 gen_address_mask(dc
, addr
);
2767 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2768 tcg_temp_free_i64(t64
);
2773 /* ??? In theory we've handled all of the ASIs that are valid
2774 for stda, and this should raise DAE_invalid_asi. */
2776 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2777 TCGv_i32 r_mop
= tcg_const_i32(da
.memop
);
2778 TCGv_i64 t64
= tcg_temp_new_i64();
2781 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2782 tcg_gen_concat32_i64(t64
, lo
, hi
);
2784 tcg_gen_concat32_i64(t64
, hi
, lo
);
2788 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2789 tcg_temp_free_i32(r_mop
);
2790 tcg_temp_free_i32(r_asi
);
2791 tcg_temp_free_i64(t64
);
2797 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2800 DisasASI da
= get_asi(dc
, insn
, MO_TEQ
);
2806 case GET_ASI_DIRECT
:
2807 oldv
= tcg_temp_new();
2808 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2809 da
.mem_idx
, da
.memop
);
2810 gen_store_gpr(dc
, rd
, oldv
);
2811 tcg_temp_free(oldv
);
2814 /* ??? Should be DAE_invalid_asi. */
2815 gen_exception(dc
, TT_DATA_ACCESS
);
2820 #elif !defined(CONFIG_USER_ONLY)
2821 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2823 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2824 whereby "rd + 1" elicits "error: array subscript is above array".
2825 Since we have already asserted that rd is even, the semantics
2827 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2828 TCGv hi
= gen_dest_gpr(dc
, rd
);
2829 TCGv_i64 t64
= tcg_temp_new_i64();
2830 DisasASI da
= get_asi(dc
, insn
, MO_TEQ
);
2834 tcg_temp_free_i64(t64
);
2836 case GET_ASI_DIRECT
:
2837 gen_address_mask(dc
, addr
);
2838 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2842 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2843 TCGv_i32 r_mop
= tcg_const_i32(MO_Q
);
2846 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2847 tcg_temp_free_i32(r_mop
);
2848 tcg_temp_free_i32(r_asi
);
2853 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2854 tcg_temp_free_i64(t64
);
2855 gen_store_gpr(dc
, rd
| 1, lo
);
2856 gen_store_gpr(dc
, rd
, hi
);
2859 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2862 DisasASI da
= get_asi(dc
, insn
, MO_TEQ
);
2863 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2864 TCGv_i64 t64
= tcg_temp_new_i64();
2866 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2871 case GET_ASI_DIRECT
:
2872 gen_address_mask(dc
, addr
);
2873 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
);
2876 /* Store 32 bytes of T64 to ADDR. */
2877 /* ??? The original qemu code suggests 8-byte alignment, dropping
2878 the low bits, but the only place I can see this used is in the
2879 Linux kernel with 32 byte alignment, which would make more sense
2880 as a cacheline-style operation. */
2882 TCGv d_addr
= tcg_temp_new();
2883 TCGv eight
= tcg_const_tl(8);
2886 tcg_gen_andi_tl(d_addr
, addr
, -8);
2887 for (i
= 0; i
< 32; i
+= 8) {
2888 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2889 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2892 tcg_temp_free(d_addr
);
2893 tcg_temp_free(eight
);
2898 TCGv_i32 r_asi
= tcg_const_i32(da
.asi
);
2899 TCGv_i32 r_mop
= tcg_const_i32(MO_Q
);
2902 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2903 tcg_temp_free_i32(r_mop
);
2904 tcg_temp_free_i32(r_asi
);
2909 tcg_temp_free_i64(t64
);
2913 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2915 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2916 return gen_load_gpr(dc
, rs1
);
2919 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2921 if (IS_IMM
) { /* immediate */
2922 target_long simm
= GET_FIELDs(insn
, 19, 31);
2923 TCGv t
= get_temp_tl(dc
);
2924 tcg_gen_movi_tl(t
, simm
);
2926 } else { /* register */
2927 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2928 return gen_load_gpr(dc
, rs2
);
2932 #ifdef TARGET_SPARC64
2933 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2935 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2937 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2938 or fold the comparison down to 32 bits and use movcond_i32. Choose
2940 c32
= tcg_temp_new_i32();
2942 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2944 TCGv_i64 c64
= tcg_temp_new_i64();
2945 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2946 tcg_gen_extrl_i64_i32(c32
, c64
);
2947 tcg_temp_free_i64(c64
);
2950 s1
= gen_load_fpr_F(dc
, rs
);
2951 s2
= gen_load_fpr_F(dc
, rd
);
2952 dst
= gen_dest_fpr_F(dc
);
2953 zero
= tcg_const_i32(0);
2955 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2957 tcg_temp_free_i32(c32
);
2958 tcg_temp_free_i32(zero
);
2959 gen_store_fpr_F(dc
, rd
, dst
);
2962 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2964 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2965 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2966 gen_load_fpr_D(dc
, rs
),
2967 gen_load_fpr_D(dc
, rd
));
2968 gen_store_fpr_D(dc
, rd
, dst
);
2971 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2973 int qd
= QFPREG(rd
);
2974 int qs
= QFPREG(rs
);
2976 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2977 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2978 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2979 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2981 gen_update_fprs_dirty(dc
, qd
);
2984 #ifndef CONFIG_USER_ONLY
2985 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env cpu_env
)
2987 TCGv_i32 r_tl
= tcg_temp_new_i32();
2989 /* load env->tl into r_tl */
2990 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2992 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2993 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2995 /* calculate offset to current trap state from env->ts, reuse r_tl */
2996 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2997 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2999 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
3001 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
3002 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
3003 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
3004 tcg_temp_free_ptr(r_tl_tmp
);
3007 tcg_temp_free_i32(r_tl
);
3011 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
3012 int width
, bool cc
, bool left
)
3014 TCGv lo1
, lo2
, t1
, t2
;
3015 uint64_t amask
, tabl
, tabr
;
3016 int shift
, imask
, omask
;
3019 tcg_gen_mov_tl(cpu_cc_src
, s1
);
3020 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
3021 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
3022 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3023 dc
->cc_op
= CC_OP_SUB
;
3026 /* Theory of operation: there are two tables, left and right (not to
3027 be confused with the left and right versions of the opcode). These
3028 are indexed by the low 3 bits of the inputs. To make things "easy",
3029 these tables are loaded into two constants, TABL and TABR below.
3030 The operation index = (input & imask) << shift calculates the index
3031 into the constant, while val = (table >> index) & omask calculates
3032 the value we're looking for. */
3039 tabl
= 0x80c0e0f0f8fcfeffULL
;
3040 tabr
= 0xff7f3f1f0f070301ULL
;
3042 tabl
= 0x0103070f1f3f7fffULL
;
3043 tabr
= 0xfffefcf8f0e0c080ULL
;
3063 tabl
= (2 << 2) | 3;
3064 tabr
= (3 << 2) | 1;
3066 tabl
= (1 << 2) | 3;
3067 tabr
= (3 << 2) | 2;
3074 lo1
= tcg_temp_new();
3075 lo2
= tcg_temp_new();
3076 tcg_gen_andi_tl(lo1
, s1
, imask
);
3077 tcg_gen_andi_tl(lo2
, s2
, imask
);
3078 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3079 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3081 t1
= tcg_const_tl(tabl
);
3082 t2
= tcg_const_tl(tabr
);
3083 tcg_gen_shr_tl(lo1
, t1
, lo1
);
3084 tcg_gen_shr_tl(lo2
, t2
, lo2
);
3085 tcg_gen_andi_tl(dst
, lo1
, omask
);
3086 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3090 amask
&= 0xffffffffULL
;
3092 tcg_gen_andi_tl(s1
, s1
, amask
);
3093 tcg_gen_andi_tl(s2
, s2
, amask
);
3095 /* We want to compute
3096 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3097 We've already done dst = lo1, so this reduces to
3098 dst &= (s1 == s2 ? -1 : lo2)
3103 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
3104 tcg_gen_neg_tl(t1
, t1
);
3105 tcg_gen_or_tl(lo2
, lo2
, t1
);
3106 tcg_gen_and_tl(dst
, dst
, lo2
);
3114 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
3116 TCGv tmp
= tcg_temp_new();
3118 tcg_gen_add_tl(tmp
, s1
, s2
);
3119 tcg_gen_andi_tl(dst
, tmp
, -8);
3121 tcg_gen_neg_tl(tmp
, tmp
);
3123 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3128 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
3132 t1
= tcg_temp_new();
3133 t2
= tcg_temp_new();
3134 shift
= tcg_temp_new();
3136 tcg_gen_andi_tl(shift
, gsr
, 7);
3137 tcg_gen_shli_tl(shift
, shift
, 3);
3138 tcg_gen_shl_tl(t1
, s1
, shift
);
3140 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3141 shift of (up to 63) followed by a constant shift of 1. */
3142 tcg_gen_xori_tl(shift
, shift
, 63);
3143 tcg_gen_shr_tl(t2
, s2
, shift
);
3144 tcg_gen_shri_tl(t2
, t2
, 1);
3146 tcg_gen_or_tl(dst
, t1
, t2
);
3150 tcg_temp_free(shift
);
3154 #define CHECK_IU_FEATURE(dc, FEATURE) \
3155 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3157 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3158 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3161 /* before an instruction, dc->pc must be static */
3162 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
3164 unsigned int opc
, rs1
, rs2
, rd
;
3165 TCGv cpu_src1
, cpu_src2
;
3166 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
3167 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
3170 opc
= GET_FIELD(insn
, 0, 1);
3171 rd
= GET_FIELD(insn
, 2, 6);
3174 case 0: /* branches/sethi */
3176 unsigned int xop
= GET_FIELD(insn
, 7, 9);
3179 #ifdef TARGET_SPARC64
3180 case 0x1: /* V9 BPcc */
3184 target
= GET_FIELD_SP(insn
, 0, 18);
3185 target
= sign_extend(target
, 19);
3187 cc
= GET_FIELD_SP(insn
, 20, 21);
3189 do_branch(dc
, target
, insn
, 0);
3191 do_branch(dc
, target
, insn
, 1);
3196 case 0x3: /* V9 BPr */
3198 target
= GET_FIELD_SP(insn
, 0, 13) |
3199 (GET_FIELD_SP(insn
, 20, 21) << 14);
3200 target
= sign_extend(target
, 16);
3202 cpu_src1
= get_src1(dc
, insn
);
3203 do_branch_reg(dc
, target
, insn
, cpu_src1
);
3206 case 0x5: /* V9 FBPcc */
3208 int cc
= GET_FIELD_SP(insn
, 20, 21);
3209 if (gen_trap_ifnofpu(dc
)) {
3212 target
= GET_FIELD_SP(insn
, 0, 18);
3213 target
= sign_extend(target
, 19);
3215 do_fbranch(dc
, target
, insn
, cc
);
3219 case 0x7: /* CBN+x */
3224 case 0x2: /* BN+x */
3226 target
= GET_FIELD(insn
, 10, 31);
3227 target
= sign_extend(target
, 22);
3229 do_branch(dc
, target
, insn
, 0);
3232 case 0x6: /* FBN+x */
3234 if (gen_trap_ifnofpu(dc
)) {
3237 target
= GET_FIELD(insn
, 10, 31);
3238 target
= sign_extend(target
, 22);
3240 do_fbranch(dc
, target
, insn
, 0);
3243 case 0x4: /* SETHI */
3244 /* Special-case %g0 because that's the canonical nop. */
3246 uint32_t value
= GET_FIELD(insn
, 10, 31);
3247 TCGv t
= gen_dest_gpr(dc
, rd
);
3248 tcg_gen_movi_tl(t
, value
<< 10);
3249 gen_store_gpr(dc
, rd
, t
);
3252 case 0x0: /* UNIMPL */
3261 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
3262 TCGv o7
= gen_dest_gpr(dc
, 15);
3264 tcg_gen_movi_tl(o7
, dc
->pc
);
3265 gen_store_gpr(dc
, 15, o7
);
3268 #ifdef TARGET_SPARC64
3269 if (unlikely(AM_CHECK(dc
))) {
3270 target
&= 0xffffffffULL
;
3276 case 2: /* FPU & Logical Operations */
3278 unsigned int xop
= GET_FIELD(insn
, 7, 12);
3279 TCGv cpu_dst
= get_temp_tl(dc
);
3282 if (xop
== 0x3a) { /* generate trap */
3283 int cond
= GET_FIELD(insn
, 3, 6);
3285 TCGLabel
*l1
= NULL
;
3296 /* Conditional trap. */
3298 #ifdef TARGET_SPARC64
3300 int cc
= GET_FIELD_SP(insn
, 11, 12);
3302 gen_compare(&cmp
, 0, cond
, dc
);
3303 } else if (cc
== 2) {
3304 gen_compare(&cmp
, 1, cond
, dc
);
3309 gen_compare(&cmp
, 0, cond
, dc
);
3311 l1
= gen_new_label();
3312 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
3313 cmp
.c1
, cmp
.c2
, l1
);
3317 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3318 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3320 /* Don't use the normal temporaries, as they may well have
3321 gone out of scope with the branch above. While we're
3322 doing that we might as well pre-truncate to 32-bit. */
3323 trap
= tcg_temp_new_i32();
3325 rs1
= GET_FIELD_SP(insn
, 14, 18);
3327 rs2
= GET_FIELD_SP(insn
, 0, 7);
3329 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
3330 /* Signal that the trap value is fully constant. */
3333 TCGv t1
= gen_load_gpr(dc
, rs1
);
3334 tcg_gen_trunc_tl_i32(trap
, t1
);
3335 tcg_gen_addi_i32(trap
, trap
, rs2
);
3339 rs2
= GET_FIELD_SP(insn
, 0, 4);
3340 t1
= gen_load_gpr(dc
, rs1
);
3341 t2
= gen_load_gpr(dc
, rs2
);
3342 tcg_gen_add_tl(t1
, t1
, t2
);
3343 tcg_gen_trunc_tl_i32(trap
, t1
);
3346 tcg_gen_andi_i32(trap
, trap
, mask
);
3347 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3350 gen_helper_raise_exception(cpu_env
, trap
);
3351 tcg_temp_free_i32(trap
);
3354 /* An unconditional trap ends the TB. */
3355 dc
->base
.is_jmp
= DISAS_NORETURN
;
3358 /* A conditional trap falls through to the next insn. */
3362 } else if (xop
== 0x28) {
3363 rs1
= GET_FIELD(insn
, 13, 17);
3366 #ifndef TARGET_SPARC64
3367 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3368 manual, rdy on the microSPARC
3370 case 0x0f: /* stbar in the SPARCv8 manual,
3371 rdy on the microSPARC II */
3372 case 0x10 ... 0x1f: /* implementation-dependent in the
3373 SPARCv8 manual, rdy on the
3376 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
3377 TCGv t
= gen_dest_gpr(dc
, rd
);
3378 /* Read Asr17 for a Leon3 monoprocessor */
3379 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
3380 gen_store_gpr(dc
, rd
, t
);
3384 gen_store_gpr(dc
, rd
, cpu_y
);
3386 #ifdef TARGET_SPARC64
3387 case 0x2: /* V9 rdccr */
3389 gen_helper_rdccr(cpu_dst
, cpu_env
);
3390 gen_store_gpr(dc
, rd
, cpu_dst
);
3392 case 0x3: /* V9 rdasi */
3393 tcg_gen_movi_tl(cpu_dst
, dc
->asi
);
3394 gen_store_gpr(dc
, rd
, cpu_dst
);
3396 case 0x4: /* V9 rdtick */
3401 r_tickptr
= tcg_temp_new_ptr();
3402 r_const
= tcg_const_i32(dc
->mem_idx
);
3403 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3404 offsetof(CPUSPARCState
, tick
));
3405 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3408 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3410 tcg_temp_free_ptr(r_tickptr
);
3411 tcg_temp_free_i32(r_const
);
3412 gen_store_gpr(dc
, rd
, cpu_dst
);
3413 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3418 case 0x5: /* V9 rdpc */
3420 TCGv t
= gen_dest_gpr(dc
, rd
);
3421 if (unlikely(AM_CHECK(dc
))) {
3422 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
3424 tcg_gen_movi_tl(t
, dc
->pc
);
3426 gen_store_gpr(dc
, rd
, t
);
3429 case 0x6: /* V9 rdfprs */
3430 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
3431 gen_store_gpr(dc
, rd
, cpu_dst
);
3433 case 0xf: /* V9 membar */
3434 break; /* no effect */
3435 case 0x13: /* Graphics Status */
3436 if (gen_trap_ifnofpu(dc
)) {
3439 gen_store_gpr(dc
, rd
, cpu_gsr
);
3441 case 0x16: /* Softint */
3442 tcg_gen_ld32s_tl(cpu_dst
, cpu_env
,
3443 offsetof(CPUSPARCState
, softint
));
3444 gen_store_gpr(dc
, rd
, cpu_dst
);
3446 case 0x17: /* Tick compare */
3447 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
3449 case 0x18: /* System tick */
3454 r_tickptr
= tcg_temp_new_ptr();
3455 r_const
= tcg_const_i32(dc
->mem_idx
);
3456 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3457 offsetof(CPUSPARCState
, stick
));
3458 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3461 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3463 tcg_temp_free_ptr(r_tickptr
);
3464 tcg_temp_free_i32(r_const
);
3465 gen_store_gpr(dc
, rd
, cpu_dst
);
3466 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3471 case 0x19: /* System tick compare */
3472 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
3474 case 0x1a: /* UltraSPARC-T1 Strand status */
3475 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3476 * this ASR as impl. dep
3478 CHECK_IU_FEATURE(dc
, HYPV
);
3480 TCGv t
= gen_dest_gpr(dc
, rd
);
3481 tcg_gen_movi_tl(t
, 1UL);
3482 gen_store_gpr(dc
, rd
, t
);
3485 case 0x10: /* Performance Control */
3486 case 0x11: /* Performance Instrumentation Counter */
3487 case 0x12: /* Dispatch Control */
3488 case 0x14: /* Softint set, WO */
3489 case 0x15: /* Softint clear, WO */
3494 #if !defined(CONFIG_USER_ONLY)
3495 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
3496 #ifndef TARGET_SPARC64
3497 if (!supervisor(dc
)) {
3501 gen_helper_rdpsr(cpu_dst
, cpu_env
);
3503 CHECK_IU_FEATURE(dc
, HYPV
);
3504 if (!hypervisor(dc
))
3506 rs1
= GET_FIELD(insn
, 13, 17);
3509 tcg_gen_ld_i64(cpu_dst
, cpu_env
,
3510 offsetof(CPUSPARCState
, hpstate
));
3513 // gen_op_rdhtstate();
3516 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
3519 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
3522 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
3524 case 31: // hstick_cmpr
3525 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
3531 gen_store_gpr(dc
, rd
, cpu_dst
);
3533 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
3534 if (!supervisor(dc
)) {
3537 cpu_tmp0
= get_temp_tl(dc
);
3538 #ifdef TARGET_SPARC64
3539 rs1
= GET_FIELD(insn
, 13, 17);
3545 r_tsptr
= tcg_temp_new_ptr();
3546 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3547 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3548 offsetof(trap_state
, tpc
));
3549 tcg_temp_free_ptr(r_tsptr
);
3556 r_tsptr
= tcg_temp_new_ptr();
3557 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3558 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3559 offsetof(trap_state
, tnpc
));
3560 tcg_temp_free_ptr(r_tsptr
);
3567 r_tsptr
= tcg_temp_new_ptr();
3568 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3569 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3570 offsetof(trap_state
, tstate
));
3571 tcg_temp_free_ptr(r_tsptr
);
3576 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3578 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3579 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
3580 offsetof(trap_state
, tt
));
3581 tcg_temp_free_ptr(r_tsptr
);
3589 r_tickptr
= tcg_temp_new_ptr();
3590 r_const
= tcg_const_i32(dc
->mem_idx
);
3591 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3592 offsetof(CPUSPARCState
, tick
));
3593 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3596 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
3597 r_tickptr
, r_const
);
3598 tcg_temp_free_ptr(r_tickptr
);
3599 tcg_temp_free_i32(r_const
);
3600 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
3606 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
3609 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3610 offsetof(CPUSPARCState
, pstate
));
3613 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3614 offsetof(CPUSPARCState
, tl
));
3617 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3618 offsetof(CPUSPARCState
, psrpil
));
3621 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
3624 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3625 offsetof(CPUSPARCState
, cansave
));
3627 case 11: // canrestore
3628 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3629 offsetof(CPUSPARCState
, canrestore
));
3631 case 12: // cleanwin
3632 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3633 offsetof(CPUSPARCState
, cleanwin
));
3635 case 13: // otherwin
3636 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3637 offsetof(CPUSPARCState
, otherwin
));
3640 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3641 offsetof(CPUSPARCState
, wstate
));
3643 case 16: // UA2005 gl
3644 CHECK_IU_FEATURE(dc
, GL
);
3645 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3646 offsetof(CPUSPARCState
, gl
));
3648 case 26: // UA2005 strand status
3649 CHECK_IU_FEATURE(dc
, HYPV
);
3650 if (!hypervisor(dc
))
3652 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
3655 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
3662 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
3664 gen_store_gpr(dc
, rd
, cpu_tmp0
);
3667 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3668 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
3669 #ifdef TARGET_SPARC64
3670 gen_helper_flushw(cpu_env
);
3672 if (!supervisor(dc
))
3674 gen_store_gpr(dc
, rd
, cpu_tbr
);
3678 } else if (xop
== 0x34) { /* FPU Operations */
3679 if (gen_trap_ifnofpu(dc
)) {
3682 gen_op_clear_ieee_excp_and_FTT();
3683 rs1
= GET_FIELD(insn
, 13, 17);
3684 rs2
= GET_FIELD(insn
, 27, 31);
3685 xop
= GET_FIELD(insn
, 18, 26);
3688 case 0x1: /* fmovs */
3689 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
3690 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
3692 case 0x5: /* fnegs */
3693 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
3695 case 0x9: /* fabss */
3696 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
3698 case 0x29: /* fsqrts */
3699 CHECK_FPU_FEATURE(dc
, FSQRT
);
3700 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
3702 case 0x2a: /* fsqrtd */
3703 CHECK_FPU_FEATURE(dc
, FSQRT
);
3704 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3706 case 0x2b: /* fsqrtq */
3707 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3708 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3710 case 0x41: /* fadds */
3711 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3713 case 0x42: /* faddd */
3714 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3716 case 0x43: /* faddq */
3717 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3718 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3720 case 0x45: /* fsubs */
3721 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3723 case 0x46: /* fsubd */
3724 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3726 case 0x47: /* fsubq */
3727 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3728 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3730 case 0x49: /* fmuls */
3731 CHECK_FPU_FEATURE(dc
, FMUL
);
3732 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3734 case 0x4a: /* fmuld */
3735 CHECK_FPU_FEATURE(dc
, FMUL
);
3736 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3738 case 0x4b: /* fmulq */
3739 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3740 CHECK_FPU_FEATURE(dc
, FMUL
);
3741 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3743 case 0x4d: /* fdivs */
3744 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3746 case 0x4e: /* fdivd */
3747 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3749 case 0x4f: /* fdivq */
3750 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3751 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3753 case 0x69: /* fsmuld */
3754 CHECK_FPU_FEATURE(dc
, FSMULD
);
3755 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3757 case 0x6e: /* fdmulq */
3758 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3759 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3761 case 0xc4: /* fitos */
3762 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3764 case 0xc6: /* fdtos */
3765 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3767 case 0xc7: /* fqtos */
3768 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3769 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3771 case 0xc8: /* fitod */
3772 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3774 case 0xc9: /* fstod */
3775 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3777 case 0xcb: /* fqtod */
3778 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3779 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3781 case 0xcc: /* fitoq */
3782 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3783 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3785 case 0xcd: /* fstoq */
3786 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3787 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3789 case 0xce: /* fdtoq */
3790 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3791 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3793 case 0xd1: /* fstoi */
3794 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3796 case 0xd2: /* fdtoi */
3797 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3799 case 0xd3: /* fqtoi */
3800 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3801 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3803 #ifdef TARGET_SPARC64
3804 case 0x2: /* V9 fmovd */
3805 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3806 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3808 case 0x3: /* V9 fmovq */
3809 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3810 gen_move_Q(dc
, rd
, rs2
);
3812 case 0x6: /* V9 fnegd */
3813 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3815 case 0x7: /* V9 fnegq */
3816 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3817 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3819 case 0xa: /* V9 fabsd */
3820 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3822 case 0xb: /* V9 fabsq */
3823 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3824 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3826 case 0x81: /* V9 fstox */
3827 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3829 case 0x82: /* V9 fdtox */
3830 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3832 case 0x83: /* V9 fqtox */
3833 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3834 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3836 case 0x84: /* V9 fxtos */
3837 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3839 case 0x88: /* V9 fxtod */
3840 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3842 case 0x8c: /* V9 fxtoq */
3843 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3844 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3850 } else if (xop
== 0x35) { /* FPU Operations */
3851 #ifdef TARGET_SPARC64
3854 if (gen_trap_ifnofpu(dc
)) {
3857 gen_op_clear_ieee_excp_and_FTT();
3858 rs1
= GET_FIELD(insn
, 13, 17);
3859 rs2
= GET_FIELD(insn
, 27, 31);
3860 xop
= GET_FIELD(insn
, 18, 26);
3862 #ifdef TARGET_SPARC64
3866 cond = GET_FIELD_SP(insn, 10, 12); \
3867 cpu_src1 = get_src1(dc, insn); \
3868 gen_compare_reg(&cmp, cond, cpu_src1); \
3869 gen_fmov##sz(dc, &cmp, rd, rs2); \
3870 free_compare(&cmp); \
3873 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3876 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3879 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3880 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3887 #ifdef TARGET_SPARC64
3888 #define FMOVCC(fcc, sz) \
3891 cond = GET_FIELD_SP(insn, 14, 17); \
3892 gen_fcompare(&cmp, fcc, cond); \
3893 gen_fmov##sz(dc, &cmp, rd, rs2); \
3894 free_compare(&cmp); \
3897 case 0x001: /* V9 fmovscc %fcc0 */
3900 case 0x002: /* V9 fmovdcc %fcc0 */
3903 case 0x003: /* V9 fmovqcc %fcc0 */
3904 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3907 case 0x041: /* V9 fmovscc %fcc1 */
3910 case 0x042: /* V9 fmovdcc %fcc1 */
3913 case 0x043: /* V9 fmovqcc %fcc1 */
3914 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3917 case 0x081: /* V9 fmovscc %fcc2 */
3920 case 0x082: /* V9 fmovdcc %fcc2 */
3923 case 0x083: /* V9 fmovqcc %fcc2 */
3924 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3927 case 0x0c1: /* V9 fmovscc %fcc3 */
3930 case 0x0c2: /* V9 fmovdcc %fcc3 */
3933 case 0x0c3: /* V9 fmovqcc %fcc3 */
3934 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3938 #define FMOVCC(xcc, sz) \
3941 cond = GET_FIELD_SP(insn, 14, 17); \
3942 gen_compare(&cmp, xcc, cond, dc); \
3943 gen_fmov##sz(dc, &cmp, rd, rs2); \
3944 free_compare(&cmp); \
3947 case 0x101: /* V9 fmovscc %icc */
3950 case 0x102: /* V9 fmovdcc %icc */
3953 case 0x103: /* V9 fmovqcc %icc */
3954 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3957 case 0x181: /* V9 fmovscc %xcc */
3960 case 0x182: /* V9 fmovdcc %xcc */
3963 case 0x183: /* V9 fmovqcc %xcc */
3964 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3969 case 0x51: /* fcmps, V9 %fcc */
3970 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3971 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3972 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3974 case 0x52: /* fcmpd, V9 %fcc */
3975 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3976 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3977 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3979 case 0x53: /* fcmpq, V9 %fcc */
3980 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3981 gen_op_load_fpr_QT0(QFPREG(rs1
));
3982 gen_op_load_fpr_QT1(QFPREG(rs2
));
3983 gen_op_fcmpq(rd
& 3);
3985 case 0x55: /* fcmpes, V9 %fcc */
3986 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3987 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3988 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3990 case 0x56: /* fcmped, V9 %fcc */
3991 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3992 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3993 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3995 case 0x57: /* fcmpeq, V9 %fcc */
3996 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3997 gen_op_load_fpr_QT0(QFPREG(rs1
));
3998 gen_op_load_fpr_QT1(QFPREG(rs2
));
3999 gen_op_fcmpeq(rd
& 3);
4004 } else if (xop
== 0x2) {
4005 TCGv dst
= gen_dest_gpr(dc
, rd
);
4006 rs1
= GET_FIELD(insn
, 13, 17);
4008 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
4009 if (IS_IMM
) { /* immediate */
4010 simm
= GET_FIELDs(insn
, 19, 31);
4011 tcg_gen_movi_tl(dst
, simm
);
4012 gen_store_gpr(dc
, rd
, dst
);
4013 } else { /* register */
4014 rs2
= GET_FIELD(insn
, 27, 31);
4016 tcg_gen_movi_tl(dst
, 0);
4017 gen_store_gpr(dc
, rd
, dst
);
4019 cpu_src2
= gen_load_gpr(dc
, rs2
);
4020 gen_store_gpr(dc
, rd
, cpu_src2
);
4024 cpu_src1
= get_src1(dc
, insn
);
4025 if (IS_IMM
) { /* immediate */
4026 simm
= GET_FIELDs(insn
, 19, 31);
4027 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
4028 gen_store_gpr(dc
, rd
, dst
);
4029 } else { /* register */
4030 rs2
= GET_FIELD(insn
, 27, 31);
4032 /* mov shortcut: or x, %g0, y -> mov x, y */
4033 gen_store_gpr(dc
, rd
, cpu_src1
);
4035 cpu_src2
= gen_load_gpr(dc
, rs2
);
4036 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
4037 gen_store_gpr(dc
, rd
, dst
);
4041 #ifdef TARGET_SPARC64
4042 } else if (xop
== 0x25) { /* sll, V9 sllx */
4043 cpu_src1
= get_src1(dc
, insn
);
4044 if (IS_IMM
) { /* immediate */
4045 simm
= GET_FIELDs(insn
, 20, 31);
4046 if (insn
& (1 << 12)) {
4047 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4049 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
4051 } else { /* register */
4052 rs2
= GET_FIELD(insn
, 27, 31);
4053 cpu_src2
= gen_load_gpr(dc
, rs2
);
4054 cpu_tmp0
= get_temp_tl(dc
);
4055 if (insn
& (1 << 12)) {
4056 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4058 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4060 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4062 gen_store_gpr(dc
, rd
, cpu_dst
);
4063 } else if (xop
== 0x26) { /* srl, V9 srlx */
4064 cpu_src1
= get_src1(dc
, insn
);
4065 if (IS_IMM
) { /* immediate */
4066 simm
= GET_FIELDs(insn
, 20, 31);
4067 if (insn
& (1 << 12)) {
4068 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4070 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
4071 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
4073 } else { /* register */
4074 rs2
= GET_FIELD(insn
, 27, 31);
4075 cpu_src2
= gen_load_gpr(dc
, rs2
);
4076 cpu_tmp0
= get_temp_tl(dc
);
4077 if (insn
& (1 << 12)) {
4078 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4079 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4081 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4082 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
4083 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
4086 gen_store_gpr(dc
, rd
, cpu_dst
);
4087 } else if (xop
== 0x27) { /* sra, V9 srax */
4088 cpu_src1
= get_src1(dc
, insn
);
4089 if (IS_IMM
) { /* immediate */
4090 simm
= GET_FIELDs(insn
, 20, 31);
4091 if (insn
& (1 << 12)) {
4092 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4094 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
4095 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
4097 } else { /* register */
4098 rs2
= GET_FIELD(insn
, 27, 31);
4099 cpu_src2
= gen_load_gpr(dc
, rs2
);
4100 cpu_tmp0
= get_temp_tl(dc
);
4101 if (insn
& (1 << 12)) {
4102 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4103 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4105 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4106 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
4107 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
4110 gen_store_gpr(dc
, rd
, cpu_dst
);
4112 } else if (xop
< 0x36) {
4114 cpu_src1
= get_src1(dc
, insn
);
4115 cpu_src2
= get_src2(dc
, insn
);
4116 switch (xop
& ~0x10) {
4119 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4120 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4121 dc
->cc_op
= CC_OP_ADD
;
4123 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4127 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4129 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4130 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4131 dc
->cc_op
= CC_OP_LOGIC
;
4135 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4137 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4138 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4139 dc
->cc_op
= CC_OP_LOGIC
;
4143 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4145 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4146 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4147 dc
->cc_op
= CC_OP_LOGIC
;
4152 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4153 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
4154 dc
->cc_op
= CC_OP_SUB
;
4156 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4159 case 0x5: /* andn */
4160 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4162 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4163 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4164 dc
->cc_op
= CC_OP_LOGIC
;
4168 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4170 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4171 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4172 dc
->cc_op
= CC_OP_LOGIC
;
4175 case 0x7: /* xorn */
4176 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4178 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4179 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4180 dc
->cc_op
= CC_OP_LOGIC
;
4183 case 0x8: /* addx, V9 addc */
4184 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4187 #ifdef TARGET_SPARC64
4188 case 0x9: /* V9 mulx */
4189 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
4192 case 0xa: /* umul */
4193 CHECK_IU_FEATURE(dc
, MUL
);
4194 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
4196 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4197 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4198 dc
->cc_op
= CC_OP_LOGIC
;
4201 case 0xb: /* smul */
4202 CHECK_IU_FEATURE(dc
, MUL
);
4203 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
4205 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4206 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4207 dc
->cc_op
= CC_OP_LOGIC
;
4210 case 0xc: /* subx, V9 subc */
4211 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4214 #ifdef TARGET_SPARC64
4215 case 0xd: /* V9 udivx */
4216 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4219 case 0xe: /* udiv */
4220 CHECK_IU_FEATURE(dc
, DIV
);
4222 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4224 dc
->cc_op
= CC_OP_DIV
;
4226 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
4230 case 0xf: /* sdiv */
4231 CHECK_IU_FEATURE(dc
, DIV
);
4233 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4235 dc
->cc_op
= CC_OP_DIV
;
4237 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
4244 gen_store_gpr(dc
, rd
, cpu_dst
);
4246 cpu_src1
= get_src1(dc
, insn
);
4247 cpu_src2
= get_src2(dc
, insn
);
4249 case 0x20: /* taddcc */
4250 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4251 gen_store_gpr(dc
, rd
, cpu_dst
);
4252 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
4253 dc
->cc_op
= CC_OP_TADD
;
4255 case 0x21: /* tsubcc */
4256 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4257 gen_store_gpr(dc
, rd
, cpu_dst
);
4258 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
4259 dc
->cc_op
= CC_OP_TSUB
;
4261 case 0x22: /* taddcctv */
4262 gen_helper_taddcctv(cpu_dst
, cpu_env
,
4263 cpu_src1
, cpu_src2
);
4264 gen_store_gpr(dc
, rd
, cpu_dst
);
4265 dc
->cc_op
= CC_OP_TADDTV
;
4267 case 0x23: /* tsubcctv */
4268 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
4269 cpu_src1
, cpu_src2
);
4270 gen_store_gpr(dc
, rd
, cpu_dst
);
4271 dc
->cc_op
= CC_OP_TSUBTV
;
4273 case 0x24: /* mulscc */
4275 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
4276 gen_store_gpr(dc
, rd
, cpu_dst
);
4277 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4278 dc
->cc_op
= CC_OP_ADD
;
4280 #ifndef TARGET_SPARC64
4281 case 0x25: /* sll */
4282 if (IS_IMM
) { /* immediate */
4283 simm
= GET_FIELDs(insn
, 20, 31);
4284 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4285 } else { /* register */
4286 cpu_tmp0
= get_temp_tl(dc
);
4287 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4288 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4290 gen_store_gpr(dc
, rd
, cpu_dst
);
4292 case 0x26: /* srl */
4293 if (IS_IMM
) { /* immediate */
4294 simm
= GET_FIELDs(insn
, 20, 31);
4295 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4296 } else { /* register */
4297 cpu_tmp0
= get_temp_tl(dc
);
4298 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4299 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4301 gen_store_gpr(dc
, rd
, cpu_dst
);
4303 case 0x27: /* sra */
4304 if (IS_IMM
) { /* immediate */
4305 simm
= GET_FIELDs(insn
, 20, 31);
4306 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4307 } else { /* register */
4308 cpu_tmp0
= get_temp_tl(dc
);
4309 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4310 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4312 gen_store_gpr(dc
, rd
, cpu_dst
);
4317 cpu_tmp0
= get_temp_tl(dc
);
4320 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4321 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
4323 #ifndef TARGET_SPARC64
4324 case 0x01 ... 0x0f: /* undefined in the
4328 case 0x10 ... 0x1f: /* implementation-dependent
4332 if ((rd
== 0x13) && (dc
->def
->features
&
4333 CPU_FEATURE_POWERDOWN
)) {
4334 /* LEON3 power-down */
4336 gen_helper_power_down(cpu_env
);
4340 case 0x2: /* V9 wrccr */
4341 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4342 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
4343 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4344 dc
->cc_op
= CC_OP_FLAGS
;
4346 case 0x3: /* V9 wrasi */
4347 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4348 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
4349 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4350 offsetof(CPUSPARCState
, asi
));
4351 /* End TB to notice changed ASI. */
4354 tcg_gen_exit_tb(NULL
, 0);
4355 dc
->base
.is_jmp
= DISAS_NORETURN
;
4357 case 0x6: /* V9 wrfprs */
4358 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4359 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
4363 tcg_gen_exit_tb(NULL
, 0);
4364 dc
->base
.is_jmp
= DISAS_NORETURN
;
4366 case 0xf: /* V9 sir, nop if user */
4367 #if !defined(CONFIG_USER_ONLY)
4368 if (supervisor(dc
)) {
4373 case 0x13: /* Graphics Status */
4374 if (gen_trap_ifnofpu(dc
)) {
4377 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
4379 case 0x14: /* Softint set */
4380 if (!supervisor(dc
))
4382 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4383 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
4385 case 0x15: /* Softint clear */
4386 if (!supervisor(dc
))
4388 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4389 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
4391 case 0x16: /* Softint write */
4392 if (!supervisor(dc
))
4394 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4395 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
4397 case 0x17: /* Tick compare */
4398 #if !defined(CONFIG_USER_ONLY)
4399 if (!supervisor(dc
))
4405 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
4407 r_tickptr
= tcg_temp_new_ptr();
4408 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4409 offsetof(CPUSPARCState
, tick
));
4410 if (tb_cflags(dc
->base
.tb
) &
4414 gen_helper_tick_set_limit(r_tickptr
,
4416 tcg_temp_free_ptr(r_tickptr
);
4417 /* End TB to handle timer interrupt */
4418 dc
->base
.is_jmp
= DISAS_EXIT
;
4421 case 0x18: /* System tick */
4422 #if !defined(CONFIG_USER_ONLY)
4423 if (!supervisor(dc
))
4429 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
4431 r_tickptr
= tcg_temp_new_ptr();
4432 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4433 offsetof(CPUSPARCState
, stick
));
4434 if (tb_cflags(dc
->base
.tb
) &
4438 gen_helper_tick_set_count(r_tickptr
,
4440 tcg_temp_free_ptr(r_tickptr
);
4441 /* End TB to handle timer interrupt */
4442 dc
->base
.is_jmp
= DISAS_EXIT
;
4445 case 0x19: /* System tick compare */
4446 #if !defined(CONFIG_USER_ONLY)
4447 if (!supervisor(dc
))
4453 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
4455 r_tickptr
= tcg_temp_new_ptr();
4456 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4457 offsetof(CPUSPARCState
, stick
));
4458 if (tb_cflags(dc
->base
.tb
) &
4462 gen_helper_tick_set_limit(r_tickptr
,
4464 tcg_temp_free_ptr(r_tickptr
);
4465 /* End TB to handle timer interrupt */
4466 dc
->base
.is_jmp
= DISAS_EXIT
;
4470 case 0x10: /* Performance Control */
4471 case 0x11: /* Performance Instrumentation
4473 case 0x12: /* Dispatch Control */
4480 #if !defined(CONFIG_USER_ONLY)
4481 case 0x31: /* wrpsr, V9 saved, restored */
4483 if (!supervisor(dc
))
4485 #ifdef TARGET_SPARC64
4488 gen_helper_saved(cpu_env
);
4491 gen_helper_restored(cpu_env
);
4493 case 2: /* UA2005 allclean */
4494 case 3: /* UA2005 otherw */
4495 case 4: /* UA2005 normalw */
4496 case 5: /* UA2005 invalw */
4502 cpu_tmp0
= get_temp_tl(dc
);
4503 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4504 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
4505 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4506 dc
->cc_op
= CC_OP_FLAGS
;
4509 tcg_gen_exit_tb(NULL
, 0);
4510 dc
->base
.is_jmp
= DISAS_NORETURN
;
4514 case 0x32: /* wrwim, V9 wrpr */
4516 if (!supervisor(dc
))
4518 cpu_tmp0
= get_temp_tl(dc
);
4519 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4520 #ifdef TARGET_SPARC64
4526 r_tsptr
= tcg_temp_new_ptr();
4527 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4528 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4529 offsetof(trap_state
, tpc
));
4530 tcg_temp_free_ptr(r_tsptr
);
4537 r_tsptr
= tcg_temp_new_ptr();
4538 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4539 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4540 offsetof(trap_state
, tnpc
));
4541 tcg_temp_free_ptr(r_tsptr
);
4548 r_tsptr
= tcg_temp_new_ptr();
4549 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4550 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4551 offsetof(trap_state
,
4553 tcg_temp_free_ptr(r_tsptr
);
4560 r_tsptr
= tcg_temp_new_ptr();
4561 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4562 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
4563 offsetof(trap_state
, tt
));
4564 tcg_temp_free_ptr(r_tsptr
);
4571 r_tickptr
= tcg_temp_new_ptr();
4572 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4573 offsetof(CPUSPARCState
, tick
));
4574 if (tb_cflags(dc
->base
.tb
) &
4578 gen_helper_tick_set_count(r_tickptr
,
4580 tcg_temp_free_ptr(r_tickptr
);
4581 /* End TB to handle timer interrupt */
4582 dc
->base
.is_jmp
= DISAS_EXIT
;
4586 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
4590 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4593 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
4594 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4597 dc
->npc
= DYNAMIC_PC
;
4601 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4602 offsetof(CPUSPARCState
, tl
));
4603 dc
->npc
= DYNAMIC_PC
;
4606 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4609 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
4610 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
4615 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
4618 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4619 offsetof(CPUSPARCState
,
4622 case 11: // canrestore
4623 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4624 offsetof(CPUSPARCState
,
4627 case 12: // cleanwin
4628 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4629 offsetof(CPUSPARCState
,
4632 case 13: // otherwin
4633 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4634 offsetof(CPUSPARCState
,
4638 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4639 offsetof(CPUSPARCState
,
4642 case 16: // UA2005 gl
4643 CHECK_IU_FEATURE(dc
, GL
);
4644 gen_helper_wrgl(cpu_env
, cpu_tmp0
);
4646 case 26: // UA2005 strand status
4647 CHECK_IU_FEATURE(dc
, HYPV
);
4648 if (!hypervisor(dc
))
4650 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
4656 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
4657 if (dc
->def
->nwindows
!= 32) {
4658 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
4659 (1 << dc
->def
->nwindows
) - 1);
4664 case 0x33: /* wrtbr, UA2005 wrhpr */
4666 #ifndef TARGET_SPARC64
4667 if (!supervisor(dc
))
4669 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
4671 CHECK_IU_FEATURE(dc
, HYPV
);
4672 if (!hypervisor(dc
))
4674 cpu_tmp0
= get_temp_tl(dc
);
4675 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4678 tcg_gen_st_i64(cpu_tmp0
, cpu_env
,
4679 offsetof(CPUSPARCState
,
4683 tcg_gen_exit_tb(NULL
, 0);
4684 dc
->base
.is_jmp
= DISAS_NORETURN
;
4687 // XXX gen_op_wrhtstate();
4690 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
4693 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
4695 case 31: // hstick_cmpr
4699 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
4700 r_tickptr
= tcg_temp_new_ptr();
4701 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4702 offsetof(CPUSPARCState
, hstick
));
4703 if (tb_cflags(dc
->base
.tb
) &
4707 gen_helper_tick_set_limit(r_tickptr
,
4709 tcg_temp_free_ptr(r_tickptr
);
4710 if (tb_cflags(dc
->base
.tb
) &
4714 /* End TB to handle timer interrupt */
4715 dc
->base
.is_jmp
= DISAS_EXIT
;
4718 case 6: // hver readonly
4726 #ifdef TARGET_SPARC64
4727 case 0x2c: /* V9 movcc */
4729 int cc
= GET_FIELD_SP(insn
, 11, 12);
4730 int cond
= GET_FIELD_SP(insn
, 14, 17);
4734 if (insn
& (1 << 18)) {
4736 gen_compare(&cmp
, 0, cond
, dc
);
4737 } else if (cc
== 2) {
4738 gen_compare(&cmp
, 1, cond
, dc
);
4743 gen_fcompare(&cmp
, cc
, cond
);
4746 /* The get_src2 above loaded the normal 13-bit
4747 immediate field, not the 11-bit field we have
4748 in movcc. But it did handle the reg case. */
4750 simm
= GET_FIELD_SPs(insn
, 0, 10);
4751 tcg_gen_movi_tl(cpu_src2
, simm
);
4754 dst
= gen_load_gpr(dc
, rd
);
4755 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4759 gen_store_gpr(dc
, rd
, dst
);
4762 case 0x2d: /* V9 sdivx */
4763 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4764 gen_store_gpr(dc
, rd
, cpu_dst
);
4766 case 0x2e: /* V9 popc */
4767 tcg_gen_ctpop_tl(cpu_dst
, cpu_src2
);
4768 gen_store_gpr(dc
, rd
, cpu_dst
);
4770 case 0x2f: /* V9 movr */
4772 int cond
= GET_FIELD_SP(insn
, 10, 12);
4776 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4778 /* The get_src2 above loaded the normal 13-bit
4779 immediate field, not the 10-bit field we have
4780 in movr. But it did handle the reg case. */
4782 simm
= GET_FIELD_SPs(insn
, 0, 9);
4783 tcg_gen_movi_tl(cpu_src2
, simm
);
4786 dst
= gen_load_gpr(dc
, rd
);
4787 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4791 gen_store_gpr(dc
, rd
, dst
);
4799 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4800 #ifdef TARGET_SPARC64
4801 int opf
= GET_FIELD_SP(insn
, 5, 13);
4802 rs1
= GET_FIELD(insn
, 13, 17);
4803 rs2
= GET_FIELD(insn
, 27, 31);
4804 if (gen_trap_ifnofpu(dc
)) {
4809 case 0x000: /* VIS I edge8cc */
4810 CHECK_FPU_FEATURE(dc
, VIS1
);
4811 cpu_src1
= gen_load_gpr(dc
, rs1
);
4812 cpu_src2
= gen_load_gpr(dc
, rs2
);
4813 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4814 gen_store_gpr(dc
, rd
, cpu_dst
);
4816 case 0x001: /* VIS II edge8n */
4817 CHECK_FPU_FEATURE(dc
, VIS2
);
4818 cpu_src1
= gen_load_gpr(dc
, rs1
);
4819 cpu_src2
= gen_load_gpr(dc
, rs2
);
4820 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4821 gen_store_gpr(dc
, rd
, cpu_dst
);
4823 case 0x002: /* VIS I edge8lcc */
4824 CHECK_FPU_FEATURE(dc
, VIS1
);
4825 cpu_src1
= gen_load_gpr(dc
, rs1
);
4826 cpu_src2
= gen_load_gpr(dc
, rs2
);
4827 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4828 gen_store_gpr(dc
, rd
, cpu_dst
);
4830 case 0x003: /* VIS II edge8ln */
4831 CHECK_FPU_FEATURE(dc
, VIS2
);
4832 cpu_src1
= gen_load_gpr(dc
, rs1
);
4833 cpu_src2
= gen_load_gpr(dc
, rs2
);
4834 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4835 gen_store_gpr(dc
, rd
, cpu_dst
);
4837 case 0x004: /* VIS I edge16cc */
4838 CHECK_FPU_FEATURE(dc
, VIS1
);
4839 cpu_src1
= gen_load_gpr(dc
, rs1
);
4840 cpu_src2
= gen_load_gpr(dc
, rs2
);
4841 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4842 gen_store_gpr(dc
, rd
, cpu_dst
);
4844 case 0x005: /* VIS II edge16n */
4845 CHECK_FPU_FEATURE(dc
, VIS2
);
4846 cpu_src1
= gen_load_gpr(dc
, rs1
);
4847 cpu_src2
= gen_load_gpr(dc
, rs2
);
4848 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4849 gen_store_gpr(dc
, rd
, cpu_dst
);
4851 case 0x006: /* VIS I edge16lcc */
4852 CHECK_FPU_FEATURE(dc
, VIS1
);
4853 cpu_src1
= gen_load_gpr(dc
, rs1
);
4854 cpu_src2
= gen_load_gpr(dc
, rs2
);
4855 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4856 gen_store_gpr(dc
, rd
, cpu_dst
);
4858 case 0x007: /* VIS II edge16ln */
4859 CHECK_FPU_FEATURE(dc
, VIS2
);
4860 cpu_src1
= gen_load_gpr(dc
, rs1
);
4861 cpu_src2
= gen_load_gpr(dc
, rs2
);
4862 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4863 gen_store_gpr(dc
, rd
, cpu_dst
);
4865 case 0x008: /* VIS I edge32cc */
4866 CHECK_FPU_FEATURE(dc
, VIS1
);
4867 cpu_src1
= gen_load_gpr(dc
, rs1
);
4868 cpu_src2
= gen_load_gpr(dc
, rs2
);
4869 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4870 gen_store_gpr(dc
, rd
, cpu_dst
);
4872 case 0x009: /* VIS II edge32n */
4873 CHECK_FPU_FEATURE(dc
, VIS2
);
4874 cpu_src1
= gen_load_gpr(dc
, rs1
);
4875 cpu_src2
= gen_load_gpr(dc
, rs2
);
4876 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4877 gen_store_gpr(dc
, rd
, cpu_dst
);
4879 case 0x00a: /* VIS I edge32lcc */
4880 CHECK_FPU_FEATURE(dc
, VIS1
);
4881 cpu_src1
= gen_load_gpr(dc
, rs1
);
4882 cpu_src2
= gen_load_gpr(dc
, rs2
);
4883 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4884 gen_store_gpr(dc
, rd
, cpu_dst
);
4886 case 0x00b: /* VIS II edge32ln */
4887 CHECK_FPU_FEATURE(dc
, VIS2
);
4888 cpu_src1
= gen_load_gpr(dc
, rs1
);
4889 cpu_src2
= gen_load_gpr(dc
, rs2
);
4890 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4891 gen_store_gpr(dc
, rd
, cpu_dst
);
4893 case 0x010: /* VIS I array8 */
4894 CHECK_FPU_FEATURE(dc
, VIS1
);
4895 cpu_src1
= gen_load_gpr(dc
, rs1
);
4896 cpu_src2
= gen_load_gpr(dc
, rs2
);
4897 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4898 gen_store_gpr(dc
, rd
, cpu_dst
);
4900 case 0x012: /* VIS I array16 */
4901 CHECK_FPU_FEATURE(dc
, VIS1
);
4902 cpu_src1
= gen_load_gpr(dc
, rs1
);
4903 cpu_src2
= gen_load_gpr(dc
, rs2
);
4904 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4905 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4906 gen_store_gpr(dc
, rd
, cpu_dst
);
4908 case 0x014: /* VIS I array32 */
4909 CHECK_FPU_FEATURE(dc
, VIS1
);
4910 cpu_src1
= gen_load_gpr(dc
, rs1
);
4911 cpu_src2
= gen_load_gpr(dc
, rs2
);
4912 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4913 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4914 gen_store_gpr(dc
, rd
, cpu_dst
);
4916 case 0x018: /* VIS I alignaddr */
4917 CHECK_FPU_FEATURE(dc
, VIS1
);
4918 cpu_src1
= gen_load_gpr(dc
, rs1
);
4919 cpu_src2
= gen_load_gpr(dc
, rs2
);
4920 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4921 gen_store_gpr(dc
, rd
, cpu_dst
);
4923 case 0x01a: /* VIS I alignaddrl */
4924 CHECK_FPU_FEATURE(dc
, VIS1
);
4925 cpu_src1
= gen_load_gpr(dc
, rs1
);
4926 cpu_src2
= gen_load_gpr(dc
, rs2
);
4927 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4928 gen_store_gpr(dc
, rd
, cpu_dst
);
4930 case 0x019: /* VIS II bmask */
4931 CHECK_FPU_FEATURE(dc
, VIS2
);
4932 cpu_src1
= gen_load_gpr(dc
, rs1
);
4933 cpu_src2
= gen_load_gpr(dc
, rs2
);
4934 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4935 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4936 gen_store_gpr(dc
, rd
, cpu_dst
);
4938 case 0x020: /* VIS I fcmple16 */
4939 CHECK_FPU_FEATURE(dc
, VIS1
);
4940 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4941 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4942 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4943 gen_store_gpr(dc
, rd
, cpu_dst
);
4945 case 0x022: /* VIS I fcmpne16 */
4946 CHECK_FPU_FEATURE(dc
, VIS1
);
4947 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4948 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4949 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4950 gen_store_gpr(dc
, rd
, cpu_dst
);
4952 case 0x024: /* VIS I fcmple32 */
4953 CHECK_FPU_FEATURE(dc
, VIS1
);
4954 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4955 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4956 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4957 gen_store_gpr(dc
, rd
, cpu_dst
);
4959 case 0x026: /* VIS I fcmpne32 */
4960 CHECK_FPU_FEATURE(dc
, VIS1
);
4961 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4962 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4963 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4964 gen_store_gpr(dc
, rd
, cpu_dst
);
4966 case 0x028: /* VIS I fcmpgt16 */
4967 CHECK_FPU_FEATURE(dc
, VIS1
);
4968 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4969 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4970 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4971 gen_store_gpr(dc
, rd
, cpu_dst
);
4973 case 0x02a: /* VIS I fcmpeq16 */
4974 CHECK_FPU_FEATURE(dc
, VIS1
);
4975 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4976 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4977 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4978 gen_store_gpr(dc
, rd
, cpu_dst
);
4980 case 0x02c: /* VIS I fcmpgt32 */
4981 CHECK_FPU_FEATURE(dc
, VIS1
);
4982 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4983 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4984 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4985 gen_store_gpr(dc
, rd
, cpu_dst
);
4987 case 0x02e: /* VIS I fcmpeq32 */
4988 CHECK_FPU_FEATURE(dc
, VIS1
);
4989 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4990 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4991 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4992 gen_store_gpr(dc
, rd
, cpu_dst
);
4994 case 0x031: /* VIS I fmul8x16 */
4995 CHECK_FPU_FEATURE(dc
, VIS1
);
4996 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4998 case 0x033: /* VIS I fmul8x16au */
4999 CHECK_FPU_FEATURE(dc
, VIS1
);
5000 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
5002 case 0x035: /* VIS I fmul8x16al */
5003 CHECK_FPU_FEATURE(dc
, VIS1
);
5004 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
5006 case 0x036: /* VIS I fmul8sux16 */
5007 CHECK_FPU_FEATURE(dc
, VIS1
);
5008 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
5010 case 0x037: /* VIS I fmul8ulx16 */
5011 CHECK_FPU_FEATURE(dc
, VIS1
);
5012 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
5014 case 0x038: /* VIS I fmuld8sux16 */
5015 CHECK_FPU_FEATURE(dc
, VIS1
);
5016 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
5018 case 0x039: /* VIS I fmuld8ulx16 */
5019 CHECK_FPU_FEATURE(dc
, VIS1
);
5020 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
5022 case 0x03a: /* VIS I fpack32 */
5023 CHECK_FPU_FEATURE(dc
, VIS1
);
5024 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
5026 case 0x03b: /* VIS I fpack16 */
5027 CHECK_FPU_FEATURE(dc
, VIS1
);
5028 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5029 cpu_dst_32
= gen_dest_fpr_F(dc
);
5030 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5031 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5033 case 0x03d: /* VIS I fpackfix */
5034 CHECK_FPU_FEATURE(dc
, VIS1
);
5035 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5036 cpu_dst_32
= gen_dest_fpr_F(dc
);
5037 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5038 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5040 case 0x03e: /* VIS I pdist */
5041 CHECK_FPU_FEATURE(dc
, VIS1
);
5042 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
5044 case 0x048: /* VIS I faligndata */
5045 CHECK_FPU_FEATURE(dc
, VIS1
);
5046 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
5048 case 0x04b: /* VIS I fpmerge */
5049 CHECK_FPU_FEATURE(dc
, VIS1
);
5050 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
5052 case 0x04c: /* VIS II bshuffle */
5053 CHECK_FPU_FEATURE(dc
, VIS2
);
5054 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
5056 case 0x04d: /* VIS I fexpand */
5057 CHECK_FPU_FEATURE(dc
, VIS1
);
5058 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
5060 case 0x050: /* VIS I fpadd16 */
5061 CHECK_FPU_FEATURE(dc
, VIS1
);
5062 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
5064 case 0x051: /* VIS I fpadd16s */
5065 CHECK_FPU_FEATURE(dc
, VIS1
);
5066 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
5068 case 0x052: /* VIS I fpadd32 */
5069 CHECK_FPU_FEATURE(dc
, VIS1
);
5070 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
5072 case 0x053: /* VIS I fpadd32s */
5073 CHECK_FPU_FEATURE(dc
, VIS1
);
5074 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
5076 case 0x054: /* VIS I fpsub16 */
5077 CHECK_FPU_FEATURE(dc
, VIS1
);
5078 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
5080 case 0x055: /* VIS I fpsub16s */
5081 CHECK_FPU_FEATURE(dc
, VIS1
);
5082 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
5084 case 0x056: /* VIS I fpsub32 */
5085 CHECK_FPU_FEATURE(dc
, VIS1
);
5086 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
5088 case 0x057: /* VIS I fpsub32s */
5089 CHECK_FPU_FEATURE(dc
, VIS1
);
5090 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
5092 case 0x060: /* VIS I fzero */
5093 CHECK_FPU_FEATURE(dc
, VIS1
);
5094 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5095 tcg_gen_movi_i64(cpu_dst_64
, 0);
5096 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5098 case 0x061: /* VIS I fzeros */
5099 CHECK_FPU_FEATURE(dc
, VIS1
);
5100 cpu_dst_32
= gen_dest_fpr_F(dc
);
5101 tcg_gen_movi_i32(cpu_dst_32
, 0);
5102 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5104 case 0x062: /* VIS I fnor */
5105 CHECK_FPU_FEATURE(dc
, VIS1
);
5106 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
5108 case 0x063: /* VIS I fnors */
5109 CHECK_FPU_FEATURE(dc
, VIS1
);
5110 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
5112 case 0x064: /* VIS I fandnot2 */
5113 CHECK_FPU_FEATURE(dc
, VIS1
);
5114 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
5116 case 0x065: /* VIS I fandnot2s */
5117 CHECK_FPU_FEATURE(dc
, VIS1
);
5118 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
5120 case 0x066: /* VIS I fnot2 */
5121 CHECK_FPU_FEATURE(dc
, VIS1
);
5122 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
5124 case 0x067: /* VIS I fnot2s */
5125 CHECK_FPU_FEATURE(dc
, VIS1
);
5126 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
5128 case 0x068: /* VIS I fandnot1 */
5129 CHECK_FPU_FEATURE(dc
, VIS1
);
5130 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
5132 case 0x069: /* VIS I fandnot1s */
5133 CHECK_FPU_FEATURE(dc
, VIS1
);
5134 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
5136 case 0x06a: /* VIS I fnot1 */
5137 CHECK_FPU_FEATURE(dc
, VIS1
);
5138 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
5140 case 0x06b: /* VIS I fnot1s */
5141 CHECK_FPU_FEATURE(dc
, VIS1
);
5142 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
5144 case 0x06c: /* VIS I fxor */
5145 CHECK_FPU_FEATURE(dc
, VIS1
);
5146 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
5148 case 0x06d: /* VIS I fxors */
5149 CHECK_FPU_FEATURE(dc
, VIS1
);
5150 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
5152 case 0x06e: /* VIS I fnand */
5153 CHECK_FPU_FEATURE(dc
, VIS1
);
5154 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
5156 case 0x06f: /* VIS I fnands */
5157 CHECK_FPU_FEATURE(dc
, VIS1
);
5158 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
5160 case 0x070: /* VIS I fand */
5161 CHECK_FPU_FEATURE(dc
, VIS1
);
5162 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
5164 case 0x071: /* VIS I fands */
5165 CHECK_FPU_FEATURE(dc
, VIS1
);
5166 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
5168 case 0x072: /* VIS I fxnor */
5169 CHECK_FPU_FEATURE(dc
, VIS1
);
5170 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
5172 case 0x073: /* VIS I fxnors */
5173 CHECK_FPU_FEATURE(dc
, VIS1
);
5174 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
5176 case 0x074: /* VIS I fsrc1 */
5177 CHECK_FPU_FEATURE(dc
, VIS1
);
5178 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5179 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5181 case 0x075: /* VIS I fsrc1s */
5182 CHECK_FPU_FEATURE(dc
, VIS1
);
5183 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
5184 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5186 case 0x076: /* VIS I fornot2 */
5187 CHECK_FPU_FEATURE(dc
, VIS1
);
5188 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
5190 case 0x077: /* VIS I fornot2s */
5191 CHECK_FPU_FEATURE(dc
, VIS1
);
5192 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
5194 case 0x078: /* VIS I fsrc2 */
5195 CHECK_FPU_FEATURE(dc
, VIS1
);
5196 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5197 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5199 case 0x079: /* VIS I fsrc2s */
5200 CHECK_FPU_FEATURE(dc
, VIS1
);
5201 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
5202 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5204 case 0x07a: /* VIS I fornot1 */
5205 CHECK_FPU_FEATURE(dc
, VIS1
);
5206 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
5208 case 0x07b: /* VIS I fornot1s */
5209 CHECK_FPU_FEATURE(dc
, VIS1
);
5210 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
5212 case 0x07c: /* VIS I for */
5213 CHECK_FPU_FEATURE(dc
, VIS1
);
5214 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
5216 case 0x07d: /* VIS I fors */
5217 CHECK_FPU_FEATURE(dc
, VIS1
);
5218 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
5220 case 0x07e: /* VIS I fone */
5221 CHECK_FPU_FEATURE(dc
, VIS1
);
5222 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5223 tcg_gen_movi_i64(cpu_dst_64
, -1);
5224 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5226 case 0x07f: /* VIS I fones */
5227 CHECK_FPU_FEATURE(dc
, VIS1
);
5228 cpu_dst_32
= gen_dest_fpr_F(dc
);
5229 tcg_gen_movi_i32(cpu_dst_32
, -1);
5230 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5232 case 0x080: /* VIS I shutdown */
5233 case 0x081: /* VIS II siam */
5242 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
5243 #ifdef TARGET_SPARC64
5248 #ifdef TARGET_SPARC64
5249 } else if (xop
== 0x39) { /* V9 return */
5251 cpu_src1
= get_src1(dc
, insn
);
5252 cpu_tmp0
= get_temp_tl(dc
);
5253 if (IS_IMM
) { /* immediate */
5254 simm
= GET_FIELDs(insn
, 19, 31);
5255 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5256 } else { /* register */
5257 rs2
= GET_FIELD(insn
, 27, 31);
5259 cpu_src2
= gen_load_gpr(dc
, rs2
);
5260 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5262 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5265 gen_helper_restore(cpu_env
);
5267 gen_check_align(cpu_tmp0
, 3);
5268 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5269 dc
->npc
= DYNAMIC_PC
;
5273 cpu_src1
= get_src1(dc
, insn
);
5274 cpu_tmp0
= get_temp_tl(dc
);
5275 if (IS_IMM
) { /* immediate */
5276 simm
= GET_FIELDs(insn
, 19, 31);
5277 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5278 } else { /* register */
5279 rs2
= GET_FIELD(insn
, 27, 31);
5281 cpu_src2
= gen_load_gpr(dc
, rs2
);
5282 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5284 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5288 case 0x38: /* jmpl */
5290 TCGv t
= gen_dest_gpr(dc
, rd
);
5291 tcg_gen_movi_tl(t
, dc
->pc
);
5292 gen_store_gpr(dc
, rd
, t
);
5295 gen_check_align(cpu_tmp0
, 3);
5296 gen_address_mask(dc
, cpu_tmp0
);
5297 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5298 dc
->npc
= DYNAMIC_PC
;
5301 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5302 case 0x39: /* rett, V9 return */
5304 if (!supervisor(dc
))
5307 gen_check_align(cpu_tmp0
, 3);
5308 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5309 dc
->npc
= DYNAMIC_PC
;
5310 gen_helper_rett(cpu_env
);
5314 case 0x3b: /* flush */
5315 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
5319 case 0x3c: /* save */
5320 gen_helper_save(cpu_env
);
5321 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5323 case 0x3d: /* restore */
5324 gen_helper_restore(cpu_env
);
5325 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5327 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5328 case 0x3e: /* V9 done/retry */
5332 if (!supervisor(dc
))
5334 dc
->npc
= DYNAMIC_PC
;
5335 dc
->pc
= DYNAMIC_PC
;
5336 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
5339 gen_helper_done(cpu_env
);
5340 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
5345 if (!supervisor(dc
))
5347 dc
->npc
= DYNAMIC_PC
;
5348 dc
->pc
= DYNAMIC_PC
;
5349 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
5352 gen_helper_retry(cpu_env
);
5353 if (tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
) {
5370 case 3: /* load/store instructions */
5372 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5373 /* ??? gen_address_mask prevents us from using a source
5374 register directly. Always generate a temporary. */
5375 TCGv cpu_addr
= get_temp_tl(dc
);
5377 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5378 if (xop
== 0x3c || xop
== 0x3e) {
5379 /* V9 casa/casxa : no offset */
5380 } else if (IS_IMM
) { /* immediate */
5381 simm
= GET_FIELDs(insn
, 19, 31);
5383 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5385 } else { /* register */
5386 rs2
= GET_FIELD(insn
, 27, 31);
5388 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5391 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5392 (xop
> 0x17 && xop
<= 0x1d ) ||
5393 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5394 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5397 case 0x0: /* ld, V9 lduw, load unsigned word */
5398 gen_address_mask(dc
, cpu_addr
);
5399 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
5401 case 0x1: /* ldub, load unsigned byte */
5402 gen_address_mask(dc
, cpu_addr
);
5403 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
5405 case 0x2: /* lduh, load unsigned halfword */
5406 gen_address_mask(dc
, cpu_addr
);
5407 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
5409 case 0x3: /* ldd, load double word */
5415 gen_address_mask(dc
, cpu_addr
);
5416 t64
= tcg_temp_new_i64();
5417 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
5418 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5419 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5420 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5421 tcg_gen_shri_i64(t64
, t64
, 32);
5422 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5423 tcg_temp_free_i64(t64
);
5424 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5427 case 0x9: /* ldsb, load signed byte */
5428 gen_address_mask(dc
, cpu_addr
);
5429 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
5431 case 0xa: /* ldsh, load signed halfword */
5432 gen_address_mask(dc
, cpu_addr
);
5433 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
5435 case 0xd: /* ldstub */
5436 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5439 /* swap, swap register with memory. Also atomically */
5440 CHECK_IU_FEATURE(dc
, SWAP
);
5441 cpu_src1
= gen_load_gpr(dc
, rd
);
5442 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5443 dc
->mem_idx
, MO_TEUL
);
5445 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5446 case 0x10: /* lda, V9 lduwa, load word alternate */
5447 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5449 case 0x11: /* lduba, load unsigned byte alternate */
5450 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5452 case 0x12: /* lduha, load unsigned halfword alternate */
5453 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5455 case 0x13: /* ldda, load double word alternate */
5459 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5461 case 0x19: /* ldsba, load signed byte alternate */
5462 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5464 case 0x1a: /* ldsha, load signed halfword alternate */
5465 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5467 case 0x1d: /* ldstuba -- XXX: should be atomically */
5468 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5470 case 0x1f: /* swapa, swap reg with alt. memory. Also
5472 CHECK_IU_FEATURE(dc
, SWAP
);
5473 cpu_src1
= gen_load_gpr(dc
, rd
);
5474 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5477 #ifndef TARGET_SPARC64
5478 case 0x30: /* ldc */
5479 case 0x31: /* ldcsr */
5480 case 0x33: /* lddc */
5484 #ifdef TARGET_SPARC64
5485 case 0x08: /* V9 ldsw */
5486 gen_address_mask(dc
, cpu_addr
);
5487 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
5489 case 0x0b: /* V9 ldx */
5490 gen_address_mask(dc
, cpu_addr
);
5491 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
5493 case 0x18: /* V9 ldswa */
5494 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5496 case 0x1b: /* V9 ldxa */
5497 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEQ
);
5499 case 0x2d: /* V9 prefetch, no effect */
5501 case 0x30: /* V9 ldfa */
5502 if (gen_trap_ifnofpu(dc
)) {
5505 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5506 gen_update_fprs_dirty(dc
, rd
);
5508 case 0x33: /* V9 lddfa */
5509 if (gen_trap_ifnofpu(dc
)) {
5512 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5513 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5515 case 0x3d: /* V9 prefetcha, no effect */
5517 case 0x32: /* V9 ldqfa */
5518 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5519 if (gen_trap_ifnofpu(dc
)) {
5522 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5523 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5529 gen_store_gpr(dc
, rd
, cpu_val
);
5530 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5533 } else if (xop
>= 0x20 && xop
< 0x24) {
5534 if (gen_trap_ifnofpu(dc
)) {
5538 case 0x20: /* ldf, load fpreg */
5539 gen_address_mask(dc
, cpu_addr
);
5540 cpu_dst_32
= gen_dest_fpr_F(dc
);
5541 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5542 dc
->mem_idx
, MO_TEUL
);
5543 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5545 case 0x21: /* ldfsr, V9 ldxfsr */
5546 #ifdef TARGET_SPARC64
5547 gen_address_mask(dc
, cpu_addr
);
5549 TCGv_i64 t64
= tcg_temp_new_i64();
5550 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5551 dc
->mem_idx
, MO_TEQ
);
5552 gen_helper_ldxfsr(cpu_fsr
, cpu_env
, cpu_fsr
, t64
);
5553 tcg_temp_free_i64(t64
);
5557 cpu_dst_32
= get_temp_i32(dc
);
5558 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5559 dc
->mem_idx
, MO_TEUL
);
5560 gen_helper_ldfsr(cpu_fsr
, cpu_env
, cpu_fsr
, cpu_dst_32
);
5562 case 0x22: /* ldqf, load quad fpreg */
5563 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5564 gen_address_mask(dc
, cpu_addr
);
5565 cpu_src1_64
= tcg_temp_new_i64();
5566 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5567 MO_TEQ
| MO_ALIGN_4
);
5568 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5569 cpu_src2_64
= tcg_temp_new_i64();
5570 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5571 MO_TEQ
| MO_ALIGN_4
);
5572 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5573 tcg_temp_free_i64(cpu_src1_64
);
5574 tcg_temp_free_i64(cpu_src2_64
);
5576 case 0x23: /* lddf, load double fpreg */
5577 gen_address_mask(dc
, cpu_addr
);
5578 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5579 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5580 MO_TEQ
| MO_ALIGN_4
);
5581 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5586 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5587 xop
== 0xe || xop
== 0x1e) {
5588 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5591 case 0x4: /* st, store word */
5592 gen_address_mask(dc
, cpu_addr
);
5593 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
5595 case 0x5: /* stb, store byte */
5596 gen_address_mask(dc
, cpu_addr
);
5597 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
5599 case 0x6: /* sth, store halfword */
5600 gen_address_mask(dc
, cpu_addr
);
5601 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
5603 case 0x7: /* std, store double word */
5610 gen_address_mask(dc
, cpu_addr
);
5611 lo
= gen_load_gpr(dc
, rd
+ 1);
5612 t64
= tcg_temp_new_i64();
5613 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5614 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
5615 tcg_temp_free_i64(t64
);
5618 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5619 case 0x14: /* sta, V9 stwa, store word alternate */
5620 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5622 case 0x15: /* stba, store byte alternate */
5623 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5625 case 0x16: /* stha, store halfword alternate */
5626 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5628 case 0x17: /* stda, store double word alternate */
5632 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5635 #ifdef TARGET_SPARC64
5636 case 0x0e: /* V9 stx */
5637 gen_address_mask(dc
, cpu_addr
);
5638 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
5640 case 0x1e: /* V9 stxa */
5641 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEQ
);
5647 } else if (xop
> 0x23 && xop
< 0x28) {
5648 if (gen_trap_ifnofpu(dc
)) {
5652 case 0x24: /* stf, store fpreg */
5653 gen_address_mask(dc
, cpu_addr
);
5654 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5655 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5656 dc
->mem_idx
, MO_TEUL
);
5658 case 0x25: /* stfsr, V9 stxfsr */
5660 #ifdef TARGET_SPARC64
5661 gen_address_mask(dc
, cpu_addr
);
5663 tcg_gen_qemu_st64(cpu_fsr
, cpu_addr
, dc
->mem_idx
);
5667 tcg_gen_qemu_st32(cpu_fsr
, cpu_addr
, dc
->mem_idx
);
5671 #ifdef TARGET_SPARC64
5672 /* V9 stqf, store quad fpreg */
5673 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5674 gen_address_mask(dc
, cpu_addr
);
5675 /* ??? While stqf only requires 4-byte alignment, it is
5676 legal for the cpu to signal the unaligned exception.
5677 The OS trap handler is then required to fix it up.
5678 For qemu, this avoids having to probe the second page
5679 before performing the first write. */
5680 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5681 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5682 dc
->mem_idx
, MO_TEQ
| MO_ALIGN_16
);
5683 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5684 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5685 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5686 dc
->mem_idx
, MO_TEQ
);
5688 #else /* !TARGET_SPARC64 */
5689 /* stdfq, store floating point queue */
5690 #if defined(CONFIG_USER_ONLY)
5693 if (!supervisor(dc
))
5695 if (gen_trap_ifnofpu(dc
)) {
5701 case 0x27: /* stdf, store double fpreg */
5702 gen_address_mask(dc
, cpu_addr
);
5703 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5704 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5705 MO_TEQ
| MO_ALIGN_4
);
5710 } else if (xop
> 0x33 && xop
< 0x3f) {
5712 #ifdef TARGET_SPARC64
5713 case 0x34: /* V9 stfa */
5714 if (gen_trap_ifnofpu(dc
)) {
5717 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5719 case 0x36: /* V9 stqfa */
5721 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5722 if (gen_trap_ifnofpu(dc
)) {
5725 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5728 case 0x37: /* V9 stdfa */
5729 if (gen_trap_ifnofpu(dc
)) {
5732 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5734 case 0x3e: /* V9 casxa */
5735 rs2
= GET_FIELD(insn
, 27, 31);
5736 cpu_src2
= gen_load_gpr(dc
, rs2
);
5737 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5740 case 0x34: /* stc */
5741 case 0x35: /* stcsr */
5742 case 0x36: /* stdcq */
5743 case 0x37: /* stdc */
5746 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5747 case 0x3c: /* V9 or LEON3 casa */
5748 #ifndef TARGET_SPARC64
5749 CHECK_IU_FEATURE(dc
, CASA
);
5751 rs2
= GET_FIELD(insn
, 27, 31);
5752 cpu_src2
= gen_load_gpr(dc
, rs2
);
5753 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5765 /* default case for non jump instructions */
5766 if (dc
->npc
== DYNAMIC_PC
) {
5767 dc
->pc
= DYNAMIC_PC
;
5769 } else if (dc
->npc
== JUMP_PC
) {
5770 /* we can do a static jump */
5771 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5772 dc
->base
.is_jmp
= DISAS_NORETURN
;
5775 dc
->npc
= dc
->npc
+ 4;
5780 gen_exception(dc
, TT_ILL_INSN
);
5783 gen_exception(dc
, TT_UNIMP_FLUSH
);
5785 #if !defined(CONFIG_USER_ONLY)
5787 gen_exception(dc
, TT_PRIV_INSN
);
5791 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5793 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5795 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5798 #ifndef TARGET_SPARC64
5800 gen_exception(dc
, TT_NCP_INSN
);
5804 if (dc
->n_t32
!= 0) {
5806 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5807 tcg_temp_free_i32(dc
->t32
[i
]);
5811 if (dc
->n_ttl
!= 0) {
5813 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5814 tcg_temp_free(dc
->ttl
[i
]);
5820 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5822 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5823 CPUSPARCState
*env
= cs
->env_ptr
;
5826 dc
->pc
= dc
->base
.pc_first
;
5827 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5828 dc
->cc_op
= CC_OP_DYNAMIC
;
5829 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5830 dc
->def
= &env
->def
;
5831 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5832 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5833 #ifndef CONFIG_USER_ONLY
5834 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5836 #ifdef TARGET_SPARC64
5838 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5839 #ifndef CONFIG_USER_ONLY
5840 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5844 * if we reach a page boundary, we stop generation so that the
5845 * PC of a TT_TFAULT exception is always in the right page
5847 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5848 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5851 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5855 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5857 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5859 if (dc
->npc
& JUMP_PC
) {
5860 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5861 tcg_gen_insn_start(dc
->pc
, dc
->jump_pc
[0] | JUMP_PC
);
5863 tcg_gen_insn_start(dc
->pc
, dc
->npc
);
5867 static bool sparc_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
5868 const CPUBreakpoint
*bp
)
5870 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5872 if (dc
->pc
!= dc
->base
.pc_first
) {
5875 gen_helper_debug(cpu_env
);
5876 tcg_gen_exit_tb(NULL
, 0);
5877 dc
->base
.is_jmp
= DISAS_NORETURN
;
5878 /* update pc_next so that the current instruction is included in tb->size */
5879 dc
->base
.pc_next
+= 4;
5883 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5885 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5886 CPUSPARCState
*env
= cs
->env_ptr
;
5889 insn
= translator_ldl(env
, dc
->pc
);
5890 dc
->base
.pc_next
+= 4;
5891 disas_sparc_insn(dc
, insn
);
5893 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5896 if (dc
->pc
!= dc
->base
.pc_next
) {
5897 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5901 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5903 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5905 switch (dc
->base
.is_jmp
) {
5907 case DISAS_TOO_MANY
:
5908 if (dc
->pc
!= DYNAMIC_PC
&&
5909 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5910 /* static PC and NPC: we can use direct chaining */
5911 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5913 if (dc
->pc
!= DYNAMIC_PC
) {
5914 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5917 tcg_gen_exit_tb(NULL
, 0);
5921 case DISAS_NORETURN
:
5927 tcg_gen_exit_tb(NULL
, 0);
5931 g_assert_not_reached();
5935 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
5937 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5938 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5941 static const TranslatorOps sparc_tr_ops
= {
5942 .init_disas_context
= sparc_tr_init_disas_context
,
5943 .tb_start
= sparc_tr_tb_start
,
5944 .insn_start
= sparc_tr_insn_start
,
5945 .breakpoint_check
= sparc_tr_breakpoint_check
,
5946 .translate_insn
= sparc_tr_translate_insn
,
5947 .tb_stop
= sparc_tr_tb_stop
,
5948 .disas_log
= sparc_tr_disas_log
,
5951 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
5953 DisasContext dc
= {};
5955 translator_loop(&sparc_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
5958 void sparc_tcg_init(void)
5960 static const char gregnames
[32][4] = {
5961 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5962 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5963 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5964 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5966 static const char fregnames
[32][4] = {
5967 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5968 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5969 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5970 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5973 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5974 #ifdef TARGET_SPARC64
5975 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5976 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5978 { &cpu_wim
, offsetof(CPUSPARCState
, wim
), "wim" },
5980 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5981 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5984 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5985 #ifdef TARGET_SPARC64
5986 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5987 { &cpu_tick_cmpr
, offsetof(CPUSPARCState
, tick_cmpr
), "tick_cmpr" },
5988 { &cpu_stick_cmpr
, offsetof(CPUSPARCState
, stick_cmpr
), "stick_cmpr" },
5989 { &cpu_hstick_cmpr
, offsetof(CPUSPARCState
, hstick_cmpr
),
5991 { &cpu_hintp
, offsetof(CPUSPARCState
, hintp
), "hintp" },
5992 { &cpu_htba
, offsetof(CPUSPARCState
, htba
), "htba" },
5993 { &cpu_hver
, offsetof(CPUSPARCState
, hver
), "hver" },
5994 { &cpu_ssr
, offsetof(CPUSPARCState
, ssr
), "ssr" },
5995 { &cpu_ver
, offsetof(CPUSPARCState
, version
), "ver" },
5997 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5998 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5999 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
6000 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
6001 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
6002 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
6003 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
6004 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
6005 #ifndef CONFIG_USER_ONLY
6006 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
6012 cpu_regwptr
= tcg_global_mem_new_ptr(cpu_env
,
6013 offsetof(CPUSPARCState
, regwptr
),
6016 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
6017 *r32
[i
].ptr
= tcg_global_mem_new_i32(cpu_env
, r32
[i
].off
, r32
[i
].name
);
6020 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
6021 *rtl
[i
].ptr
= tcg_global_mem_new(cpu_env
, rtl
[i
].off
, rtl
[i
].name
);
6025 for (i
= 1; i
< 8; ++i
) {
6026 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
6027 offsetof(CPUSPARCState
, gregs
[i
]),
6031 for (i
= 8; i
< 32; ++i
) {
6032 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
6033 (i
- 8) * sizeof(target_ulong
),
6037 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
6038 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
6039 offsetof(CPUSPARCState
, fpr
[i
]),
6044 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
,
6047 target_ulong pc
= data
[0];
6048 target_ulong npc
= data
[1];
6051 if (npc
== DYNAMIC_PC
) {
6052 /* dynamic NPC: already stored */
6053 } else if (npc
& JUMP_PC
) {
6054 /* jump PC: use 'cond' and the jump targets of the translation */
6056 env
->npc
= npc
& ~3;