4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
41 /* global register indexes */
42 static TCGv_env cpu_env
;
44 #include "exec/gen-icount.h"
45 #include "exec/helper-proto.h"
46 #include "exec/helper-gen.h"
48 #include "trace-tcg.h"
52 /* Information that (most) every instruction needs to manipulate. */
53 typedef struct DisasContext DisasContext
;
54 typedef struct DisasInsn DisasInsn
;
55 typedef struct DisasFields DisasFields
;
58 struct TranslationBlock
*tb
;
59 const DisasInsn
*insn
;
65 bool singlestep_enabled
;
68 /* Information carried about a condition to be evaluated. */
75 struct { TCGv_i64 a
, b
; } s64
;
76 struct { TCGv_i32 a
, b
; } s32
;
82 #ifdef DEBUG_INLINE_BRANCHES
83 static uint64_t inline_branch_hit
[CC_OP_MAX
];
84 static uint64_t inline_branch_miss
[CC_OP_MAX
];
87 static uint64_t pc_to_link_info(DisasContext
*s
, uint64_t pc
)
89 if (!(s
->tb
->flags
& FLAG_MASK_64
)) {
90 if (s
->tb
->flags
& FLAG_MASK_32
) {
91 return pc
| 0x80000000;
97 static TCGv_i64 psw_addr
;
98 static TCGv_i64 psw_mask
;
101 static TCGv_i32 cc_op
;
102 static TCGv_i64 cc_src
;
103 static TCGv_i64 cc_dst
;
104 static TCGv_i64 cc_vr
;
106 static char cpu_reg_names
[32][4];
107 static TCGv_i64 regs
[16];
108 static TCGv_i64 fregs
[16];
110 void s390x_translate_init(void)
114 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
115 tcg_ctx
.tcg_env
= cpu_env
;
116 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
117 offsetof(CPUS390XState
, psw
.addr
),
119 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
120 offsetof(CPUS390XState
, psw
.mask
),
122 gbea
= tcg_global_mem_new_i64(cpu_env
,
123 offsetof(CPUS390XState
, gbea
),
126 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
128 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
130 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
132 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
135 for (i
= 0; i
< 16; i
++) {
136 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
137 regs
[i
] = tcg_global_mem_new(cpu_env
,
138 offsetof(CPUS390XState
, regs
[i
]),
142 for (i
= 0; i
< 16; i
++) {
143 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
144 fregs
[i
] = tcg_global_mem_new(cpu_env
,
145 offsetof(CPUS390XState
, vregs
[i
][0].d
),
146 cpu_reg_names
[i
+ 16]);
150 static TCGv_i64
load_reg(int reg
)
152 TCGv_i64 r
= tcg_temp_new_i64();
153 tcg_gen_mov_i64(r
, regs
[reg
]);
157 static TCGv_i64
load_freg32_i64(int reg
)
159 TCGv_i64 r
= tcg_temp_new_i64();
160 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
164 static void store_reg(int reg
, TCGv_i64 v
)
166 tcg_gen_mov_i64(regs
[reg
], v
);
169 static void store_freg(int reg
, TCGv_i64 v
)
171 tcg_gen_mov_i64(fregs
[reg
], v
);
174 static void store_reg32_i64(int reg
, TCGv_i64 v
)
176 /* 32 bit register writes keep the upper half */
177 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
180 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
182 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
185 static void store_freg32_i64(int reg
, TCGv_i64 v
)
187 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
190 static void return_low128(TCGv_i64 dest
)
192 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
195 static void update_psw_addr(DisasContext
*s
)
198 tcg_gen_movi_i64(psw_addr
, s
->pc
);
201 static void per_branch(DisasContext
*s
, bool to_next
)
203 #ifndef CONFIG_USER_ONLY
204 tcg_gen_movi_i64(gbea
, s
->pc
);
206 if (s
->tb
->flags
& FLAG_MASK_PER
) {
207 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->next_pc
) : psw_addr
;
208 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
210 tcg_temp_free_i64(next_pc
);
216 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
217 TCGv_i64 arg1
, TCGv_i64 arg2
)
219 #ifndef CONFIG_USER_ONLY
220 if (s
->tb
->flags
& FLAG_MASK_PER
) {
221 TCGLabel
*lab
= gen_new_label();
222 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
224 tcg_gen_movi_i64(gbea
, s
->pc
);
225 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
229 TCGv_i64 pc
= tcg_const_i64(s
->pc
);
230 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
231 tcg_temp_free_i64(pc
);
236 static void per_breaking_event(DisasContext
*s
)
238 tcg_gen_movi_i64(gbea
, s
->pc
);
241 static void update_cc_op(DisasContext
*s
)
243 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
244 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
248 static void potential_page_fault(DisasContext
*s
)
254 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
256 return (uint64_t)cpu_lduw_code(env
, pc
);
259 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
261 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
264 static int get_mem_index(DisasContext
*s
)
266 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
267 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
269 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
271 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
279 static void gen_exception(int excp
)
281 TCGv_i32 tmp
= tcg_const_i32(excp
);
282 gen_helper_exception(cpu_env
, tmp
);
283 tcg_temp_free_i32(tmp
);
286 static void gen_program_exception(DisasContext
*s
, int code
)
290 /* Remember what pgm exeption this was. */
291 tmp
= tcg_const_i32(code
);
292 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
293 tcg_temp_free_i32(tmp
);
295 tmp
= tcg_const_i32(s
->ilen
);
296 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
297 tcg_temp_free_i32(tmp
);
305 /* Trigger exception. */
306 gen_exception(EXCP_PGM
);
309 static inline void gen_illegal_opcode(DisasContext
*s
)
311 gen_program_exception(s
, PGM_OPERATION
);
314 static inline void gen_trap(DisasContext
*s
)
318 /* Set DXC to 0xff. */
319 t
= tcg_temp_new_i32();
320 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
321 tcg_gen_ori_i32(t
, t
, 0xff00);
322 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, fpc
));
323 tcg_temp_free_i32(t
);
325 gen_program_exception(s
, PGM_DATA
);
328 #ifndef CONFIG_USER_ONLY
329 static void check_privileged(DisasContext
*s
)
331 if (s
->tb
->flags
& FLAG_MASK_PSTATE
) {
332 gen_program_exception(s
, PGM_PRIVILEGED
);
337 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
339 TCGv_i64 tmp
= tcg_temp_new_i64();
340 bool need_31
= !(s
->tb
->flags
& FLAG_MASK_64
);
342 /* Note that d2 is limited to 20 bits, signed. If we crop negative
343 displacements early we create larger immedate addends. */
345 /* Note that addi optimizes the imm==0 case. */
347 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
348 tcg_gen_addi_i64(tmp
, tmp
, d2
);
350 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
352 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
358 tcg_gen_movi_i64(tmp
, d2
);
361 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
367 static inline bool live_cc_data(DisasContext
*s
)
369 return (s
->cc_op
!= CC_OP_DYNAMIC
370 && s
->cc_op
!= CC_OP_STATIC
374 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
376 if (live_cc_data(s
)) {
377 tcg_gen_discard_i64(cc_src
);
378 tcg_gen_discard_i64(cc_dst
);
379 tcg_gen_discard_i64(cc_vr
);
381 s
->cc_op
= CC_OP_CONST0
+ val
;
384 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
386 if (live_cc_data(s
)) {
387 tcg_gen_discard_i64(cc_src
);
388 tcg_gen_discard_i64(cc_vr
);
390 tcg_gen_mov_i64(cc_dst
, dst
);
394 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
397 if (live_cc_data(s
)) {
398 tcg_gen_discard_i64(cc_vr
);
400 tcg_gen_mov_i64(cc_src
, src
);
401 tcg_gen_mov_i64(cc_dst
, dst
);
405 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
406 TCGv_i64 dst
, TCGv_i64 vr
)
408 tcg_gen_mov_i64(cc_src
, src
);
409 tcg_gen_mov_i64(cc_dst
, dst
);
410 tcg_gen_mov_i64(cc_vr
, vr
);
414 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
416 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
419 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
421 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
424 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
426 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
429 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
431 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
434 /* CC value is in env->cc_op */
435 static void set_cc_static(DisasContext
*s
)
437 if (live_cc_data(s
)) {
438 tcg_gen_discard_i64(cc_src
);
439 tcg_gen_discard_i64(cc_dst
);
440 tcg_gen_discard_i64(cc_vr
);
442 s
->cc_op
= CC_OP_STATIC
;
445 /* calculates cc into cc_op */
446 static void gen_op_calc_cc(DisasContext
*s
)
448 TCGv_i32 local_cc_op
;
451 TCGV_UNUSED_I32(local_cc_op
);
452 TCGV_UNUSED_I64(dummy
);
455 dummy
= tcg_const_i64(0);
469 local_cc_op
= tcg_const_i32(s
->cc_op
);
485 /* s->cc_op is the cc value */
486 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
489 /* env->cc_op already is the cc value */
504 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
509 case CC_OP_LTUGTU_32
:
510 case CC_OP_LTUGTU_64
:
517 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
532 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
535 /* unknown operation - assume 3 arguments and cc_op in env */
536 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
542 if (!TCGV_IS_UNUSED_I32(local_cc_op
)) {
543 tcg_temp_free_i32(local_cc_op
);
545 if (!TCGV_IS_UNUSED_I64(dummy
)) {
546 tcg_temp_free_i64(dummy
);
549 /* We now have cc in cc_op as constant */
553 static bool use_exit_tb(DisasContext
*s
)
555 return (s
->singlestep_enabled
||
556 (s
->tb
->cflags
& CF_LAST_IO
) ||
557 (s
->tb
->flags
& FLAG_MASK_PER
));
560 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
562 if (unlikely(use_exit_tb(s
))) {
565 #ifndef CONFIG_USER_ONLY
566 return (dest
& TARGET_PAGE_MASK
) == (s
->tb
->pc
& TARGET_PAGE_MASK
) ||
567 (dest
& TARGET_PAGE_MASK
) == (s
->pc
& TARGET_PAGE_MASK
);
573 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
575 #ifdef DEBUG_INLINE_BRANCHES
576 inline_branch_miss
[cc_op
]++;
580 static void account_inline_branch(DisasContext
*s
, int cc_op
)
582 #ifdef DEBUG_INLINE_BRANCHES
583 inline_branch_hit
[cc_op
]++;
587 /* Table of mask values to comparison codes, given a comparison as input.
588 For such, CC=3 should not be possible. */
589 static const TCGCond ltgt_cond
[16] = {
590 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
591 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
592 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
593 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
594 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
595 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
596 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
597 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
600 /* Table of mask values to comparison codes, given a logic op as input.
601 For such, only CC=0 and CC=1 should be possible. */
602 static const TCGCond nz_cond
[16] = {
603 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
604 TCG_COND_NEVER
, TCG_COND_NEVER
,
605 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
606 TCG_COND_NE
, TCG_COND_NE
,
607 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
608 TCG_COND_EQ
, TCG_COND_EQ
,
609 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
610 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
613 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
614 details required to generate a TCG comparison. */
615 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
618 enum cc_op old_cc_op
= s
->cc_op
;
620 if (mask
== 15 || mask
== 0) {
621 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
624 c
->g1
= c
->g2
= true;
629 /* Find the TCG condition for the mask + cc op. */
635 cond
= ltgt_cond
[mask
];
636 if (cond
== TCG_COND_NEVER
) {
639 account_inline_branch(s
, old_cc_op
);
642 case CC_OP_LTUGTU_32
:
643 case CC_OP_LTUGTU_64
:
644 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
645 if (cond
== TCG_COND_NEVER
) {
648 account_inline_branch(s
, old_cc_op
);
652 cond
= nz_cond
[mask
];
653 if (cond
== TCG_COND_NEVER
) {
656 account_inline_branch(s
, old_cc_op
);
671 account_inline_branch(s
, old_cc_op
);
686 account_inline_branch(s
, old_cc_op
);
690 switch (mask
& 0xa) {
691 case 8: /* src == 0 -> no one bit found */
694 case 2: /* src != 0 -> one bit found */
700 account_inline_branch(s
, old_cc_op
);
706 case 8 | 2: /* vr == 0 */
709 case 4 | 1: /* vr != 0 */
712 case 8 | 4: /* no carry -> vr >= src */
715 case 2 | 1: /* carry -> vr < src */
721 account_inline_branch(s
, old_cc_op
);
726 /* Note that CC=0 is impossible; treat it as dont-care. */
728 case 2: /* zero -> op1 == op2 */
731 case 4 | 1: /* !zero -> op1 != op2 */
734 case 4: /* borrow (!carry) -> op1 < op2 */
737 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
743 account_inline_branch(s
, old_cc_op
);
748 /* Calculate cc value. */
753 /* Jump based on CC. We'll load up the real cond below;
754 the assignment here merely avoids a compiler warning. */
755 account_noninline_branch(s
, old_cc_op
);
756 old_cc_op
= CC_OP_STATIC
;
757 cond
= TCG_COND_NEVER
;
761 /* Load up the arguments of the comparison. */
763 c
->g1
= c
->g2
= false;
767 c
->u
.s32
.a
= tcg_temp_new_i32();
768 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
769 c
->u
.s32
.b
= tcg_const_i32(0);
772 case CC_OP_LTUGTU_32
:
775 c
->u
.s32
.a
= tcg_temp_new_i32();
776 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
777 c
->u
.s32
.b
= tcg_temp_new_i32();
778 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
785 c
->u
.s64
.b
= tcg_const_i64(0);
789 case CC_OP_LTUGTU_64
:
793 c
->g1
= c
->g2
= true;
799 c
->u
.s64
.a
= tcg_temp_new_i64();
800 c
->u
.s64
.b
= tcg_const_i64(0);
801 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
806 c
->u
.s32
.a
= tcg_temp_new_i32();
807 c
->u
.s32
.b
= tcg_temp_new_i32();
808 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
809 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
810 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
812 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
819 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
820 c
->u
.s64
.b
= tcg_const_i64(0);
832 case 0x8 | 0x4 | 0x2: /* cc != 3 */
834 c
->u
.s32
.b
= tcg_const_i32(3);
836 case 0x8 | 0x4 | 0x1: /* cc != 2 */
838 c
->u
.s32
.b
= tcg_const_i32(2);
840 case 0x8 | 0x2 | 0x1: /* cc != 1 */
842 c
->u
.s32
.b
= tcg_const_i32(1);
844 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
847 c
->u
.s32
.a
= tcg_temp_new_i32();
848 c
->u
.s32
.b
= tcg_const_i32(0);
849 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
851 case 0x8 | 0x4: /* cc < 2 */
853 c
->u
.s32
.b
= tcg_const_i32(2);
855 case 0x8: /* cc == 0 */
857 c
->u
.s32
.b
= tcg_const_i32(0);
859 case 0x4 | 0x2 | 0x1: /* cc != 0 */
861 c
->u
.s32
.b
= tcg_const_i32(0);
863 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
866 c
->u
.s32
.a
= tcg_temp_new_i32();
867 c
->u
.s32
.b
= tcg_const_i32(0);
868 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
870 case 0x4: /* cc == 1 */
872 c
->u
.s32
.b
= tcg_const_i32(1);
874 case 0x2 | 0x1: /* cc > 1 */
876 c
->u
.s32
.b
= tcg_const_i32(1);
878 case 0x2: /* cc == 2 */
880 c
->u
.s32
.b
= tcg_const_i32(2);
882 case 0x1: /* cc == 3 */
884 c
->u
.s32
.b
= tcg_const_i32(3);
887 /* CC is masked by something else: (8 >> cc) & mask. */
890 c
->u
.s32
.a
= tcg_const_i32(8);
891 c
->u
.s32
.b
= tcg_const_i32(0);
892 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
893 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
904 static void free_compare(DisasCompare
*c
)
908 tcg_temp_free_i64(c
->u
.s64
.a
);
910 tcg_temp_free_i32(c
->u
.s32
.a
);
915 tcg_temp_free_i64(c
->u
.s64
.b
);
917 tcg_temp_free_i32(c
->u
.s32
.b
);
922 /* ====================================================================== */
923 /* Define the insn format enumeration. */
924 #define F0(N) FMT_##N,
925 #define F1(N, X1) F0(N)
926 #define F2(N, X1, X2) F0(N)
927 #define F3(N, X1, X2, X3) F0(N)
928 #define F4(N, X1, X2, X3, X4) F0(N)
929 #define F5(N, X1, X2, X3, X4, X5) F0(N)
932 #include "insn-format.def"
942 /* Define a structure to hold the decoded fields. We'll store each inside
943 an array indexed by an enum. In order to conserve memory, we'll arrange
944 for fields that do not exist at the same time to overlap, thus the "C"
945 for compact. For checking purposes there is an "O" for original index
946 as well that will be applied to availability bitmaps. */
948 enum DisasFieldIndexO
{
971 enum DisasFieldIndexC
{
1002 struct DisasFields
{
1006 unsigned presentC
:16;
1007 unsigned int presentO
;
1011 /* This is the way fields are to be accessed out of DisasFields. */
1012 #define have_field(S, F) have_field1((S), FLD_O_##F)
1013 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1015 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1017 return (f
->presentO
>> c
) & 1;
1020 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1021 enum DisasFieldIndexC c
)
1023 assert(have_field1(f
, o
));
1027 /* Describe the layout of each field in each format. */
1028 typedef struct DisasField
{
1030 unsigned int size
:8;
1031 unsigned int type
:2;
1032 unsigned int indexC
:6;
1033 enum DisasFieldIndexO indexO
:8;
1036 typedef struct DisasFormatInfo
{
1037 DisasField op
[NUM_C_FIELD
];
1040 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1041 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1042 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1044 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1047 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1048 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1049 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1051 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1052 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1053 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1055 #define F0(N) { { } },
1056 #define F1(N, X1) { { X1 } },
1057 #define F2(N, X1, X2) { { X1, X2 } },
1058 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1059 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1060 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1062 static const DisasFormatInfo format_info
[] = {
1063 #include "insn-format.def"
1081 /* Generally, we'll extract operands into this structures, operate upon
1082 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1083 of routines below for more details. */
1085 bool g_out
, g_out2
, g_in1
, g_in2
;
1086 TCGv_i64 out
, out2
, in1
, in2
;
1090 /* Instructions can place constraints on their operands, raising specification
1091 exceptions if they are violated. To make this easy to automate, each "in1",
1092 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1093 of the following, or 0. To make this easy to document, we'll put the
1094 SPEC_<name> defines next to <name>. */
1096 #define SPEC_r1_even 1
1097 #define SPEC_r2_even 2
1098 #define SPEC_r3_even 4
1099 #define SPEC_r1_f128 8
1100 #define SPEC_r2_f128 16
1102 /* Return values from translate_one, indicating the state of the TB. */
1104 /* Continue the TB. */
1106 /* We have emitted one or more goto_tb. No fixup required. */
1108 /* We are not using a goto_tb (for whatever reason), but have updated
1109 the PC (for whatever reason), so there's no need to do it again on
1112 /* We have updated the PC and CC values. */
1114 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1115 updated the PC for the next instruction to be executed. */
1117 /* We are exiting the TB to the main loop. */
1118 EXIT_PC_STALE_NOCHAIN
,
1119 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1120 No following code will be executed. */
1132 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1133 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1136 void (*help_cout
)(DisasContext
*, DisasOps
*);
1137 ExitStatus (*help_op
)(DisasContext
*, DisasOps
*);
1142 /* ====================================================================== */
1143 /* Miscellaneous helpers, used by several operations. */
1145 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1146 DisasOps
*o
, int mask
)
1148 int b2
= get_field(f
, b2
);
1149 int d2
= get_field(f
, d2
);
1152 o
->in2
= tcg_const_i64(d2
& mask
);
1154 o
->in2
= get_address(s
, 0, b2
, d2
);
1155 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1159 static ExitStatus
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1161 if (dest
== s
->next_pc
) {
1162 per_branch(s
, true);
1165 if (use_goto_tb(s
, dest
)) {
1167 per_breaking_event(s
);
1169 tcg_gen_movi_i64(psw_addr
, dest
);
1170 tcg_gen_exit_tb((uintptr_t)s
->tb
);
1171 return EXIT_GOTO_TB
;
1173 tcg_gen_movi_i64(psw_addr
, dest
);
1174 per_branch(s
, false);
1175 return EXIT_PC_UPDATED
;
1179 static ExitStatus
help_branch(DisasContext
*s
, DisasCompare
*c
,
1180 bool is_imm
, int imm
, TCGv_i64 cdest
)
1183 uint64_t dest
= s
->pc
+ 2 * imm
;
1186 /* Take care of the special cases first. */
1187 if (c
->cond
== TCG_COND_NEVER
) {
1192 if (dest
== s
->next_pc
) {
1193 /* Branch to next. */
1194 per_branch(s
, true);
1198 if (c
->cond
== TCG_COND_ALWAYS
) {
1199 ret
= help_goto_direct(s
, dest
);
1203 if (TCGV_IS_UNUSED_I64(cdest
)) {
1204 /* E.g. bcr %r0 -> no branch. */
1208 if (c
->cond
== TCG_COND_ALWAYS
) {
1209 tcg_gen_mov_i64(psw_addr
, cdest
);
1210 per_branch(s
, false);
1211 ret
= EXIT_PC_UPDATED
;
1216 if (use_goto_tb(s
, s
->next_pc
)) {
1217 if (is_imm
&& use_goto_tb(s
, dest
)) {
1218 /* Both exits can use goto_tb. */
1221 lab
= gen_new_label();
1223 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1225 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1228 /* Branch not taken. */
1230 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1231 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1235 per_breaking_event(s
);
1237 tcg_gen_movi_i64(psw_addr
, dest
);
1238 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 1);
1242 /* Fallthru can use goto_tb, but taken branch cannot. */
1243 /* Store taken branch destination before the brcond. This
1244 avoids having to allocate a new local temp to hold it.
1245 We'll overwrite this in the not taken case anyway. */
1247 tcg_gen_mov_i64(psw_addr
, cdest
);
1250 lab
= gen_new_label();
1252 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1254 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1257 /* Branch not taken. */
1260 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
1261 tcg_gen_exit_tb((uintptr_t)s
->tb
+ 0);
1265 tcg_gen_movi_i64(psw_addr
, dest
);
1267 per_breaking_event(s
);
1268 ret
= EXIT_PC_UPDATED
;
1271 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1272 Most commonly we're single-stepping or some other condition that
1273 disables all use of goto_tb. Just update the PC and exit. */
1275 TCGv_i64 next
= tcg_const_i64(s
->next_pc
);
1277 cdest
= tcg_const_i64(dest
);
1281 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1283 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1285 TCGv_i32 t0
= tcg_temp_new_i32();
1286 TCGv_i64 t1
= tcg_temp_new_i64();
1287 TCGv_i64 z
= tcg_const_i64(0);
1288 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1289 tcg_gen_extu_i32_i64(t1
, t0
);
1290 tcg_temp_free_i32(t0
);
1291 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1292 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1293 tcg_temp_free_i64(t1
);
1294 tcg_temp_free_i64(z
);
1298 tcg_temp_free_i64(cdest
);
1300 tcg_temp_free_i64(next
);
1302 ret
= EXIT_PC_UPDATED
;
1310 /* ====================================================================== */
1311 /* The operations. These perform the bulk of the work for any insn,
1312 usually after the operands have been loaded and output initialized. */
1314 static ExitStatus
op_abs(DisasContext
*s
, DisasOps
*o
)
1317 z
= tcg_const_i64(0);
1318 n
= tcg_temp_new_i64();
1319 tcg_gen_neg_i64(n
, o
->in2
);
1320 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1321 tcg_temp_free_i64(n
);
1322 tcg_temp_free_i64(z
);
1326 static ExitStatus
op_absf32(DisasContext
*s
, DisasOps
*o
)
1328 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1332 static ExitStatus
op_absf64(DisasContext
*s
, DisasOps
*o
)
1334 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1338 static ExitStatus
op_absf128(DisasContext
*s
, DisasOps
*o
)
1340 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1341 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1345 static ExitStatus
op_add(DisasContext
*s
, DisasOps
*o
)
1347 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1351 static ExitStatus
op_addc(DisasContext
*s
, DisasOps
*o
)
1356 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1358 /* The carry flag is the msb of CC, therefore the branch mask that would
1359 create that comparison is 3. Feeding the generated comparison to
1360 setcond produces the carry flag that we desire. */
1361 disas_jcc(s
, &cmp
, 3);
1362 carry
= tcg_temp_new_i64();
1364 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1366 TCGv_i32 t
= tcg_temp_new_i32();
1367 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1368 tcg_gen_extu_i32_i64(carry
, t
);
1369 tcg_temp_free_i32(t
);
1373 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1374 tcg_temp_free_i64(carry
);
1378 static ExitStatus
op_aeb(DisasContext
*s
, DisasOps
*o
)
1380 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1384 static ExitStatus
op_adb(DisasContext
*s
, DisasOps
*o
)
1386 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1390 static ExitStatus
op_axb(DisasContext
*s
, DisasOps
*o
)
1392 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1393 return_low128(o
->out2
);
1397 static ExitStatus
op_and(DisasContext
*s
, DisasOps
*o
)
1399 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1403 static ExitStatus
op_andi(DisasContext
*s
, DisasOps
*o
)
1405 int shift
= s
->insn
->data
& 0xff;
1406 int size
= s
->insn
->data
>> 8;
1407 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1410 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1411 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1412 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1414 /* Produce the CC from only the bits manipulated. */
1415 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1416 set_cc_nz_u64(s
, cc_dst
);
1420 static ExitStatus
op_bas(DisasContext
*s
, DisasOps
*o
)
1422 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1423 if (!TCGV_IS_UNUSED_I64(o
->in2
)) {
1424 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1425 per_branch(s
, false);
1426 return EXIT_PC_UPDATED
;
1432 static ExitStatus
op_basi(DisasContext
*s
, DisasOps
*o
)
1434 tcg_gen_movi_i64(o
->out
, pc_to_link_info(s
, s
->next_pc
));
1435 return help_goto_direct(s
, s
->pc
+ 2 * get_field(s
->fields
, i2
));
1438 static ExitStatus
op_bc(DisasContext
*s
, DisasOps
*o
)
1440 int m1
= get_field(s
->fields
, m1
);
1441 bool is_imm
= have_field(s
->fields
, i2
);
1442 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1445 /* BCR with R2 = 0 causes no branching */
1446 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1448 /* Perform serialization */
1449 /* FIXME: check for fast-BCR-serialization facility */
1450 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1453 /* Perform serialization */
1454 /* FIXME: perform checkpoint-synchronisation */
1455 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1460 disas_jcc(s
, &c
, m1
);
1461 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1464 static ExitStatus
op_bct32(DisasContext
*s
, DisasOps
*o
)
1466 int r1
= get_field(s
->fields
, r1
);
1467 bool is_imm
= have_field(s
->fields
, i2
);
1468 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1472 c
.cond
= TCG_COND_NE
;
1477 t
= tcg_temp_new_i64();
1478 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1479 store_reg32_i64(r1
, t
);
1480 c
.u
.s32
.a
= tcg_temp_new_i32();
1481 c
.u
.s32
.b
= tcg_const_i32(0);
1482 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1483 tcg_temp_free_i64(t
);
1485 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1488 static ExitStatus
op_bcth(DisasContext
*s
, DisasOps
*o
)
1490 int r1
= get_field(s
->fields
, r1
);
1491 int imm
= get_field(s
->fields
, i2
);
1495 c
.cond
= TCG_COND_NE
;
1500 t
= tcg_temp_new_i64();
1501 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1502 tcg_gen_subi_i64(t
, t
, 1);
1503 store_reg32h_i64(r1
, t
);
1504 c
.u
.s32
.a
= tcg_temp_new_i32();
1505 c
.u
.s32
.b
= tcg_const_i32(0);
1506 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1507 tcg_temp_free_i64(t
);
1509 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1512 static ExitStatus
op_bct64(DisasContext
*s
, DisasOps
*o
)
1514 int r1
= get_field(s
->fields
, r1
);
1515 bool is_imm
= have_field(s
->fields
, i2
);
1516 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1519 c
.cond
= TCG_COND_NE
;
1524 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1525 c
.u
.s64
.a
= regs
[r1
];
1526 c
.u
.s64
.b
= tcg_const_i64(0);
1528 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1531 static ExitStatus
op_bx32(DisasContext
*s
, DisasOps
*o
)
1533 int r1
= get_field(s
->fields
, r1
);
1534 int r3
= get_field(s
->fields
, r3
);
1535 bool is_imm
= have_field(s
->fields
, i2
);
1536 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1540 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1545 t
= tcg_temp_new_i64();
1546 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1547 c
.u
.s32
.a
= tcg_temp_new_i32();
1548 c
.u
.s32
.b
= tcg_temp_new_i32();
1549 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1550 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1551 store_reg32_i64(r1
, t
);
1552 tcg_temp_free_i64(t
);
1554 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1557 static ExitStatus
op_bx64(DisasContext
*s
, DisasOps
*o
)
1559 int r1
= get_field(s
->fields
, r1
);
1560 int r3
= get_field(s
->fields
, r3
);
1561 bool is_imm
= have_field(s
->fields
, i2
);
1562 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1565 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1568 if (r1
== (r3
| 1)) {
1569 c
.u
.s64
.b
= load_reg(r3
| 1);
1572 c
.u
.s64
.b
= regs
[r3
| 1];
1576 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1577 c
.u
.s64
.a
= regs
[r1
];
1580 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1583 static ExitStatus
op_cj(DisasContext
*s
, DisasOps
*o
)
1585 int imm
, m3
= get_field(s
->fields
, m3
);
1589 c
.cond
= ltgt_cond
[m3
];
1590 if (s
->insn
->data
) {
1591 c
.cond
= tcg_unsigned_cond(c
.cond
);
1593 c
.is_64
= c
.g1
= c
.g2
= true;
1597 is_imm
= have_field(s
->fields
, i4
);
1599 imm
= get_field(s
->fields
, i4
);
1602 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1603 get_field(s
->fields
, d4
));
1606 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1609 static ExitStatus
op_ceb(DisasContext
*s
, DisasOps
*o
)
1611 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1616 static ExitStatus
op_cdb(DisasContext
*s
, DisasOps
*o
)
1618 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1623 static ExitStatus
op_cxb(DisasContext
*s
, DisasOps
*o
)
1625 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1630 static ExitStatus
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1632 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1633 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1634 tcg_temp_free_i32(m3
);
1635 gen_set_cc_nz_f32(s
, o
->in2
);
1639 static ExitStatus
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1641 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1642 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1643 tcg_temp_free_i32(m3
);
1644 gen_set_cc_nz_f64(s
, o
->in2
);
1648 static ExitStatus
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1650 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1651 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1652 tcg_temp_free_i32(m3
);
1653 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1657 static ExitStatus
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1659 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1660 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1661 tcg_temp_free_i32(m3
);
1662 gen_set_cc_nz_f32(s
, o
->in2
);
1666 static ExitStatus
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1668 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1669 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1670 tcg_temp_free_i32(m3
);
1671 gen_set_cc_nz_f64(s
, o
->in2
);
1675 static ExitStatus
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1677 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1678 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1679 tcg_temp_free_i32(m3
);
1680 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1684 static ExitStatus
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1686 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1687 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1688 tcg_temp_free_i32(m3
);
1689 gen_set_cc_nz_f32(s
, o
->in2
);
1693 static ExitStatus
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1695 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1696 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1697 tcg_temp_free_i32(m3
);
1698 gen_set_cc_nz_f64(s
, o
->in2
);
1702 static ExitStatus
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1704 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1705 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1706 tcg_temp_free_i32(m3
);
1707 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1711 static ExitStatus
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1713 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1714 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1715 tcg_temp_free_i32(m3
);
1716 gen_set_cc_nz_f32(s
, o
->in2
);
1720 static ExitStatus
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1722 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1723 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1724 tcg_temp_free_i32(m3
);
1725 gen_set_cc_nz_f64(s
, o
->in2
);
1729 static ExitStatus
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1731 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1732 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1733 tcg_temp_free_i32(m3
);
1734 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1738 static ExitStatus
op_cegb(DisasContext
*s
, DisasOps
*o
)
1740 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1741 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1742 tcg_temp_free_i32(m3
);
1746 static ExitStatus
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1748 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1749 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1750 tcg_temp_free_i32(m3
);
1754 static ExitStatus
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1756 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1757 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1758 tcg_temp_free_i32(m3
);
1759 return_low128(o
->out2
);
1763 static ExitStatus
op_celgb(DisasContext
*s
, DisasOps
*o
)
1765 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1766 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1767 tcg_temp_free_i32(m3
);
1771 static ExitStatus
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1773 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1774 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1775 tcg_temp_free_i32(m3
);
1779 static ExitStatus
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1781 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1782 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1783 tcg_temp_free_i32(m3
);
1784 return_low128(o
->out2
);
1788 static ExitStatus
op_cksm(DisasContext
*s
, DisasOps
*o
)
1790 int r2
= get_field(s
->fields
, r2
);
1791 TCGv_i64 len
= tcg_temp_new_i64();
1793 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1795 return_low128(o
->out
);
1797 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1798 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1799 tcg_temp_free_i64(len
);
1804 static ExitStatus
op_clc(DisasContext
*s
, DisasOps
*o
)
1806 int l
= get_field(s
->fields
, l1
);
1811 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1812 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1815 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1816 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1819 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1820 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1823 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1824 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1827 vl
= tcg_const_i32(l
);
1828 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1829 tcg_temp_free_i32(vl
);
1833 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1837 static ExitStatus
op_clcl(DisasContext
*s
, DisasOps
*o
)
1839 int r1
= get_field(s
->fields
, r1
);
1840 int r2
= get_field(s
->fields
, r2
);
1843 /* r1 and r2 must be even. */
1844 if (r1
& 1 || r2
& 1) {
1845 gen_program_exception(s
, PGM_SPECIFICATION
);
1846 return EXIT_NORETURN
;
1849 t1
= tcg_const_i32(r1
);
1850 t2
= tcg_const_i32(r2
);
1851 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1852 tcg_temp_free_i32(t1
);
1853 tcg_temp_free_i32(t2
);
1858 static ExitStatus
op_clcle(DisasContext
*s
, DisasOps
*o
)
1860 int r1
= get_field(s
->fields
, r1
);
1861 int r3
= get_field(s
->fields
, r3
);
1864 /* r1 and r3 must be even. */
1865 if (r1
& 1 || r3
& 1) {
1866 gen_program_exception(s
, PGM_SPECIFICATION
);
1867 return EXIT_NORETURN
;
1870 t1
= tcg_const_i32(r1
);
1871 t3
= tcg_const_i32(r3
);
1872 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1873 tcg_temp_free_i32(t1
);
1874 tcg_temp_free_i32(t3
);
1879 static ExitStatus
op_clclu(DisasContext
*s
, DisasOps
*o
)
1881 int r1
= get_field(s
->fields
, r1
);
1882 int r3
= get_field(s
->fields
, r3
);
1885 /* r1 and r3 must be even. */
1886 if (r1
& 1 || r3
& 1) {
1887 gen_program_exception(s
, PGM_SPECIFICATION
);
1888 return EXIT_NORETURN
;
1891 t1
= tcg_const_i32(r1
);
1892 t3
= tcg_const_i32(r3
);
1893 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1894 tcg_temp_free_i32(t1
);
1895 tcg_temp_free_i32(t3
);
1900 static ExitStatus
op_clm(DisasContext
*s
, DisasOps
*o
)
1902 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1903 TCGv_i32 t1
= tcg_temp_new_i32();
1904 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1905 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1907 tcg_temp_free_i32(t1
);
1908 tcg_temp_free_i32(m3
);
1912 static ExitStatus
op_clst(DisasContext
*s
, DisasOps
*o
)
1914 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1916 return_low128(o
->in2
);
1920 static ExitStatus
op_cps(DisasContext
*s
, DisasOps
*o
)
1922 TCGv_i64 t
= tcg_temp_new_i64();
1923 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1924 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1925 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1926 tcg_temp_free_i64(t
);
1930 static ExitStatus
op_cs(DisasContext
*s
, DisasOps
*o
)
1932 int d2
= get_field(s
->fields
, d2
);
1933 int b2
= get_field(s
->fields
, b2
);
1936 /* Note that in1 = R3 (new value) and
1937 in2 = (zero-extended) R1 (expected value). */
1939 addr
= get_address(s
, 0, b2
, d2
);
1940 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
1941 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
1942 tcg_temp_free_i64(addr
);
1944 /* Are the memory and expected values (un)equal? Note that this setcond
1945 produces the output CC value, thus the NE sense of the test. */
1946 cc
= tcg_temp_new_i64();
1947 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
1948 tcg_gen_extrl_i64_i32(cc_op
, cc
);
1949 tcg_temp_free_i64(cc
);
1955 static ExitStatus
op_cdsg(DisasContext
*s
, DisasOps
*o
)
1957 int r1
= get_field(s
->fields
, r1
);
1958 int r3
= get_field(s
->fields
, r3
);
1959 int d2
= get_field(s
->fields
, d2
);
1960 int b2
= get_field(s
->fields
, b2
);
1962 TCGv_i32 t_r1
, t_r3
;
1964 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1965 addr
= get_address(s
, 0, b2
, d2
);
1966 t_r1
= tcg_const_i32(r1
);
1967 t_r3
= tcg_const_i32(r3
);
1968 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
1969 tcg_temp_free_i64(addr
);
1970 tcg_temp_free_i32(t_r1
);
1971 tcg_temp_free_i32(t_r3
);
1977 static ExitStatus
op_csst(DisasContext
*s
, DisasOps
*o
)
1979 int r3
= get_field(s
->fields
, r3
);
1980 TCGv_i32 t_r3
= tcg_const_i32(r3
);
1982 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->in1
, o
->in2
);
1983 tcg_temp_free_i32(t_r3
);
1989 #ifndef CONFIG_USER_ONLY
1990 static ExitStatus
op_csp(DisasContext
*s
, DisasOps
*o
)
1992 TCGMemOp mop
= s
->insn
->data
;
1993 TCGv_i64 addr
, old
, cc
;
1994 TCGLabel
*lab
= gen_new_label();
1996 /* Note that in1 = R1 (zero-extended expected value),
1997 out = R1 (original reg), out2 = R1+1 (new value). */
1999 check_privileged(s
);
2000 addr
= tcg_temp_new_i64();
2001 old
= tcg_temp_new_i64();
2002 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2003 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2004 get_mem_index(s
), mop
| MO_ALIGN
);
2005 tcg_temp_free_i64(addr
);
2007 /* Are the memory and expected values (un)equal? */
2008 cc
= tcg_temp_new_i64();
2009 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2010 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2012 /* Write back the output now, so that it happens before the
2013 following branch, so that we don't need local temps. */
2014 if ((mop
& MO_SIZE
) == MO_32
) {
2015 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2017 tcg_gen_mov_i64(o
->out
, old
);
2019 tcg_temp_free_i64(old
);
2021 /* If the comparison was equal, and the LSB of R2 was set,
2022 then we need to flush the TLB (for all cpus). */
2023 tcg_gen_xori_i64(cc
, cc
, 1);
2024 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2025 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2026 tcg_temp_free_i64(cc
);
2028 gen_helper_purge(cpu_env
);
2035 static ExitStatus
op_cvd(DisasContext
*s
, DisasOps
*o
)
2037 TCGv_i64 t1
= tcg_temp_new_i64();
2038 TCGv_i32 t2
= tcg_temp_new_i32();
2039 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2040 gen_helper_cvd(t1
, t2
);
2041 tcg_temp_free_i32(t2
);
2042 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2043 tcg_temp_free_i64(t1
);
2047 static ExitStatus
op_ct(DisasContext
*s
, DisasOps
*o
)
2049 int m3
= get_field(s
->fields
, m3
);
2050 TCGLabel
*lab
= gen_new_label();
2053 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2054 if (s
->insn
->data
) {
2055 c
= tcg_unsigned_cond(c
);
2057 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2066 static ExitStatus
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2068 int m3
= get_field(s
->fields
, m3
);
2069 int r1
= get_field(s
->fields
, r1
);
2070 int r2
= get_field(s
->fields
, r2
);
2071 TCGv_i32 tr1
, tr2
, chk
;
2073 /* R1 and R2 must both be even. */
2074 if ((r1
| r2
) & 1) {
2075 gen_program_exception(s
, PGM_SPECIFICATION
);
2076 return EXIT_NORETURN
;
2078 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2082 tr1
= tcg_const_i32(r1
);
2083 tr2
= tcg_const_i32(r2
);
2084 chk
= tcg_const_i32(m3
);
2086 switch (s
->insn
->data
) {
2088 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2091 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2094 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2097 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2100 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2103 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2106 g_assert_not_reached();
2109 tcg_temp_free_i32(tr1
);
2110 tcg_temp_free_i32(tr2
);
2111 tcg_temp_free_i32(chk
);
2116 #ifndef CONFIG_USER_ONLY
2117 static ExitStatus
op_diag(DisasContext
*s
, DisasOps
*o
)
2119 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2120 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2121 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2123 check_privileged(s
);
2127 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2129 tcg_temp_free_i32(func_code
);
2130 tcg_temp_free_i32(r3
);
2131 tcg_temp_free_i32(r1
);
2136 static ExitStatus
op_divs32(DisasContext
*s
, DisasOps
*o
)
2138 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2139 return_low128(o
->out
);
2143 static ExitStatus
op_divu32(DisasContext
*s
, DisasOps
*o
)
2145 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2146 return_low128(o
->out
);
2150 static ExitStatus
op_divs64(DisasContext
*s
, DisasOps
*o
)
2152 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2153 return_low128(o
->out
);
2157 static ExitStatus
op_divu64(DisasContext
*s
, DisasOps
*o
)
2159 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2160 return_low128(o
->out
);
2164 static ExitStatus
op_deb(DisasContext
*s
, DisasOps
*o
)
2166 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2170 static ExitStatus
op_ddb(DisasContext
*s
, DisasOps
*o
)
2172 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2176 static ExitStatus
op_dxb(DisasContext
*s
, DisasOps
*o
)
2178 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2179 return_low128(o
->out2
);
2183 static ExitStatus
op_ear(DisasContext
*s
, DisasOps
*o
)
2185 int r2
= get_field(s
->fields
, r2
);
2186 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2190 static ExitStatus
op_ecag(DisasContext
*s
, DisasOps
*o
)
2192 /* No cache information provided. */
2193 tcg_gen_movi_i64(o
->out
, -1);
2197 static ExitStatus
op_efpc(DisasContext
*s
, DisasOps
*o
)
2199 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2203 static ExitStatus
op_epsw(DisasContext
*s
, DisasOps
*o
)
2205 int r1
= get_field(s
->fields
, r1
);
2206 int r2
= get_field(s
->fields
, r2
);
2207 TCGv_i64 t
= tcg_temp_new_i64();
2209 /* Note the "subsequently" in the PoO, which implies a defined result
2210 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2211 tcg_gen_shri_i64(t
, psw_mask
, 32);
2212 store_reg32_i64(r1
, t
);
2214 store_reg32_i64(r2
, psw_mask
);
2217 tcg_temp_free_i64(t
);
2221 static ExitStatus
op_ex(DisasContext
*s
, DisasOps
*o
)
2223 int r1
= get_field(s
->fields
, r1
);
2227 /* Nested EXECUTE is not allowed. */
2228 if (unlikely(s
->ex_value
)) {
2229 gen_program_exception(s
, PGM_EXECUTE
);
2230 return EXIT_NORETURN
;
2237 v1
= tcg_const_i64(0);
2242 ilen
= tcg_const_i32(s
->ilen
);
2243 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2244 tcg_temp_free_i32(ilen
);
2247 tcg_temp_free_i64(v1
);
2250 return EXIT_PC_CC_UPDATED
;
2253 static ExitStatus
op_fieb(DisasContext
*s
, DisasOps
*o
)
2255 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2256 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2257 tcg_temp_free_i32(m3
);
2261 static ExitStatus
op_fidb(DisasContext
*s
, DisasOps
*o
)
2263 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2264 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2265 tcg_temp_free_i32(m3
);
2269 static ExitStatus
op_fixb(DisasContext
*s
, DisasOps
*o
)
2271 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2272 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2273 return_low128(o
->out2
);
2274 tcg_temp_free_i32(m3
);
2278 static ExitStatus
op_flogr(DisasContext
*s
, DisasOps
*o
)
2280 /* We'll use the original input for cc computation, since we get to
2281 compare that against 0, which ought to be better than comparing
2282 the real output against 64. It also lets cc_dst be a convenient
2283 temporary during our computation. */
2284 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2286 /* R1 = IN ? CLZ(IN) : 64. */
2287 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2289 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2290 value by 64, which is undefined. But since the shift is 64 iff the
2291 input is zero, we still get the correct result after and'ing. */
2292 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2293 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2294 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2298 static ExitStatus
op_icm(DisasContext
*s
, DisasOps
*o
)
2300 int m3
= get_field(s
->fields
, m3
);
2301 int pos
, len
, base
= s
->insn
->data
;
2302 TCGv_i64 tmp
= tcg_temp_new_i64();
2307 /* Effectively a 32-bit load. */
2308 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2315 /* Effectively a 16-bit load. */
2316 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2324 /* Effectively an 8-bit load. */
2325 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2330 pos
= base
+ ctz32(m3
) * 8;
2331 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2332 ccm
= ((1ull << len
) - 1) << pos
;
2336 /* This is going to be a sequence of loads and inserts. */
2337 pos
= base
+ 32 - 8;
2341 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2342 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2343 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2346 m3
= (m3
<< 1) & 0xf;
2352 tcg_gen_movi_i64(tmp
, ccm
);
2353 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2354 tcg_temp_free_i64(tmp
);
2358 static ExitStatus
op_insi(DisasContext
*s
, DisasOps
*o
)
2360 int shift
= s
->insn
->data
& 0xff;
2361 int size
= s
->insn
->data
>> 8;
2362 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2366 static ExitStatus
op_ipm(DisasContext
*s
, DisasOps
*o
)
2371 tcg_gen_andi_i64(o
->out
, o
->out
, ~0xff000000ull
);
2373 t1
= tcg_temp_new_i64();
2374 tcg_gen_shli_i64(t1
, psw_mask
, 20);
2375 tcg_gen_shri_i64(t1
, t1
, 36);
2376 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2378 tcg_gen_extu_i32_i64(t1
, cc_op
);
2379 tcg_gen_shli_i64(t1
, t1
, 28);
2380 tcg_gen_or_i64(o
->out
, o
->out
, t1
);
2381 tcg_temp_free_i64(t1
);
2385 #ifndef CONFIG_USER_ONLY
2386 static ExitStatus
op_idte(DisasContext
*s
, DisasOps
*o
)
2390 check_privileged(s
);
2391 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2392 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2394 m4
= tcg_const_i32(0);
2396 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2397 tcg_temp_free_i32(m4
);
2401 static ExitStatus
op_ipte(DisasContext
*s
, DisasOps
*o
)
2405 check_privileged(s
);
2406 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2407 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2409 m4
= tcg_const_i32(0);
2411 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2412 tcg_temp_free_i32(m4
);
2416 static ExitStatus
op_iske(DisasContext
*s
, DisasOps
*o
)
2418 check_privileged(s
);
2419 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2424 static ExitStatus
op_keb(DisasContext
*s
, DisasOps
*o
)
2426 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2431 static ExitStatus
op_kdb(DisasContext
*s
, DisasOps
*o
)
2433 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2438 static ExitStatus
op_kxb(DisasContext
*s
, DisasOps
*o
)
2440 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2445 static ExitStatus
op_laa(DisasContext
*s
, DisasOps
*o
)
2447 /* The real output is indeed the original value in memory;
2448 recompute the addition for the computation of CC. */
2449 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2450 s
->insn
->data
| MO_ALIGN
);
2451 /* However, we need to recompute the addition for setting CC. */
2452 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2456 static ExitStatus
op_lan(DisasContext
*s
, DisasOps
*o
)
2458 /* The real output is indeed the original value in memory;
2459 recompute the addition for the computation of CC. */
2460 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2461 s
->insn
->data
| MO_ALIGN
);
2462 /* However, we need to recompute the operation for setting CC. */
2463 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2467 static ExitStatus
op_lao(DisasContext
*s
, DisasOps
*o
)
2469 /* The real output is indeed the original value in memory;
2470 recompute the addition for the computation of CC. */
2471 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2472 s
->insn
->data
| MO_ALIGN
);
2473 /* However, we need to recompute the operation for setting CC. */
2474 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2478 static ExitStatus
op_lax(DisasContext
*s
, DisasOps
*o
)
2480 /* The real output is indeed the original value in memory;
2481 recompute the addition for the computation of CC. */
2482 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2483 s
->insn
->data
| MO_ALIGN
);
2484 /* However, we need to recompute the operation for setting CC. */
2485 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2489 static ExitStatus
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2491 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2495 static ExitStatus
op_ledb(DisasContext
*s
, DisasOps
*o
)
2497 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2501 static ExitStatus
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2503 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2507 static ExitStatus
op_lexb(DisasContext
*s
, DisasOps
*o
)
2509 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2513 static ExitStatus
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2515 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2516 return_low128(o
->out2
);
2520 static ExitStatus
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2522 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2523 return_low128(o
->out2
);
2527 static ExitStatus
op_llgt(DisasContext
*s
, DisasOps
*o
)
2529 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2533 static ExitStatus
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2535 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2539 static ExitStatus
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2541 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2545 static ExitStatus
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2547 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2551 static ExitStatus
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2553 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2557 static ExitStatus
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2559 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2563 static ExitStatus
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2565 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2569 static ExitStatus
op_ld64(DisasContext
*s
, DisasOps
*o
)
2571 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2575 static ExitStatus
op_lat(DisasContext
*s
, DisasOps
*o
)
2577 TCGLabel
*lab
= gen_new_label();
2578 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2579 /* The value is stored even in case of trap. */
2580 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2586 static ExitStatus
op_lgat(DisasContext
*s
, DisasOps
*o
)
2588 TCGLabel
*lab
= gen_new_label();
2589 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2590 /* The value is stored even in case of trap. */
2591 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2597 static ExitStatus
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2599 TCGLabel
*lab
= gen_new_label();
2600 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2601 /* The value is stored even in case of trap. */
2602 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2608 static ExitStatus
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2610 TCGLabel
*lab
= gen_new_label();
2611 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2612 /* The value is stored even in case of trap. */
2613 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2619 static ExitStatus
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2621 TCGLabel
*lab
= gen_new_label();
2622 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2623 /* The value is stored even in case of trap. */
2624 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2630 static ExitStatus
op_loc(DisasContext
*s
, DisasOps
*o
)
2634 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2637 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2641 TCGv_i32 t32
= tcg_temp_new_i32();
2644 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2647 t
= tcg_temp_new_i64();
2648 tcg_gen_extu_i32_i64(t
, t32
);
2649 tcg_temp_free_i32(t32
);
2651 z
= tcg_const_i64(0);
2652 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2653 tcg_temp_free_i64(t
);
2654 tcg_temp_free_i64(z
);
2660 #ifndef CONFIG_USER_ONLY
2661 static ExitStatus
op_lctl(DisasContext
*s
, DisasOps
*o
)
2663 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2664 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2665 check_privileged(s
);
2666 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2667 tcg_temp_free_i32(r1
);
2668 tcg_temp_free_i32(r3
);
2672 static ExitStatus
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2674 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2675 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2676 check_privileged(s
);
2677 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2678 tcg_temp_free_i32(r1
);
2679 tcg_temp_free_i32(r3
);
2683 static ExitStatus
op_lra(DisasContext
*s
, DisasOps
*o
)
2685 check_privileged(s
);
2686 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2691 static ExitStatus
op_lpp(DisasContext
*s
, DisasOps
*o
)
2693 check_privileged(s
);
2695 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2699 static ExitStatus
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2703 check_privileged(s
);
2704 per_breaking_event(s
);
2706 t1
= tcg_temp_new_i64();
2707 t2
= tcg_temp_new_i64();
2708 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2709 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2710 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2711 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2712 tcg_gen_shli_i64(t1
, t1
, 32);
2713 gen_helper_load_psw(cpu_env
, t1
, t2
);
2714 tcg_temp_free_i64(t1
);
2715 tcg_temp_free_i64(t2
);
2716 return EXIT_NORETURN
;
2719 static ExitStatus
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2723 check_privileged(s
);
2724 per_breaking_event(s
);
2726 t1
= tcg_temp_new_i64();
2727 t2
= tcg_temp_new_i64();
2728 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2729 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2730 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2731 gen_helper_load_psw(cpu_env
, t1
, t2
);
2732 tcg_temp_free_i64(t1
);
2733 tcg_temp_free_i64(t2
);
2734 return EXIT_NORETURN
;
2738 static ExitStatus
op_lam(DisasContext
*s
, DisasOps
*o
)
2740 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2741 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2742 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2743 tcg_temp_free_i32(r1
);
2744 tcg_temp_free_i32(r3
);
2748 static ExitStatus
op_lm32(DisasContext
*s
, DisasOps
*o
)
2750 int r1
= get_field(s
->fields
, r1
);
2751 int r3
= get_field(s
->fields
, r3
);
2754 /* Only one register to read. */
2755 t1
= tcg_temp_new_i64();
2756 if (unlikely(r1
== r3
)) {
2757 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2758 store_reg32_i64(r1
, t1
);
2763 /* First load the values of the first and last registers to trigger
2764 possible page faults. */
2765 t2
= tcg_temp_new_i64();
2766 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2767 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2768 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2769 store_reg32_i64(r1
, t1
);
2770 store_reg32_i64(r3
, t2
);
2772 /* Only two registers to read. */
2773 if (((r1
+ 1) & 15) == r3
) {
2779 /* Then load the remaining registers. Page fault can't occur. */
2781 tcg_gen_movi_i64(t2
, 4);
2784 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2785 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2786 store_reg32_i64(r1
, t1
);
2794 static ExitStatus
op_lmh(DisasContext
*s
, DisasOps
*o
)
2796 int r1
= get_field(s
->fields
, r1
);
2797 int r3
= get_field(s
->fields
, r3
);
2800 /* Only one register to read. */
2801 t1
= tcg_temp_new_i64();
2802 if (unlikely(r1
== r3
)) {
2803 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2804 store_reg32h_i64(r1
, t1
);
2809 /* First load the values of the first and last registers to trigger
2810 possible page faults. */
2811 t2
= tcg_temp_new_i64();
2812 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2813 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2814 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2815 store_reg32h_i64(r1
, t1
);
2816 store_reg32h_i64(r3
, t2
);
2818 /* Only two registers to read. */
2819 if (((r1
+ 1) & 15) == r3
) {
2825 /* Then load the remaining registers. Page fault can't occur. */
2827 tcg_gen_movi_i64(t2
, 4);
2830 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2831 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2832 store_reg32h_i64(r1
, t1
);
2840 static ExitStatus
op_lm64(DisasContext
*s
, DisasOps
*o
)
2842 int r1
= get_field(s
->fields
, r1
);
2843 int r3
= get_field(s
->fields
, r3
);
2846 /* Only one register to read. */
2847 if (unlikely(r1
== r3
)) {
2848 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2852 /* First load the values of the first and last registers to trigger
2853 possible page faults. */
2854 t1
= tcg_temp_new_i64();
2855 t2
= tcg_temp_new_i64();
2856 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2857 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2858 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2859 tcg_gen_mov_i64(regs
[r1
], t1
);
2862 /* Only two registers to read. */
2863 if (((r1
+ 1) & 15) == r3
) {
2868 /* Then load the remaining registers. Page fault can't occur. */
2870 tcg_gen_movi_i64(t1
, 8);
2873 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2874 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2881 static ExitStatus
op_lpd(DisasContext
*s
, DisasOps
*o
)
2884 TCGMemOp mop
= s
->insn
->data
;
2886 /* In a parallel context, stop the world and single step. */
2887 if (parallel_cpus
) {
2888 potential_page_fault(s
);
2889 gen_exception(EXCP_ATOMIC
);
2890 return EXIT_NORETURN
;
2893 /* In a serial context, perform the two loads ... */
2894 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
2895 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
2896 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
2897 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
2898 tcg_temp_free_i64(a1
);
2899 tcg_temp_free_i64(a2
);
2901 /* ... and indicate that we performed them while interlocked. */
2902 gen_op_movi_cc(s
, 0);
2906 static ExitStatus
op_lpq(DisasContext
*s
, DisasOps
*o
)
2908 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
2909 return_low128(o
->out2
);
2913 #ifndef CONFIG_USER_ONLY
2914 static ExitStatus
op_lura(DisasContext
*s
, DisasOps
*o
)
2916 check_privileged(s
);
2917 potential_page_fault(s
);
2918 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
2922 static ExitStatus
op_lurag(DisasContext
*s
, DisasOps
*o
)
2924 check_privileged(s
);
2925 potential_page_fault(s
);
2926 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
2931 static ExitStatus
op_lzrb(DisasContext
*s
, DisasOps
*o
)
2933 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
2937 static ExitStatus
op_mov2(DisasContext
*s
, DisasOps
*o
)
2940 o
->g_out
= o
->g_in2
;
2941 TCGV_UNUSED_I64(o
->in2
);
2946 static ExitStatus
op_mov2e(DisasContext
*s
, DisasOps
*o
)
2948 int b2
= get_field(s
->fields
, b2
);
2949 TCGv ar1
= tcg_temp_new_i64();
2952 o
->g_out
= o
->g_in2
;
2953 TCGV_UNUSED_I64(o
->in2
);
2956 switch (s
->tb
->flags
& FLAG_MASK_ASC
) {
2957 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
2958 tcg_gen_movi_i64(ar1
, 0);
2960 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
2961 tcg_gen_movi_i64(ar1
, 1);
2963 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
2965 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
2967 tcg_gen_movi_i64(ar1
, 0);
2970 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
2971 tcg_gen_movi_i64(ar1
, 2);
2975 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
2976 tcg_temp_free_i64(ar1
);
2981 static ExitStatus
op_movx(DisasContext
*s
, DisasOps
*o
)
2985 o
->g_out
= o
->g_in1
;
2986 o
->g_out2
= o
->g_in2
;
2987 TCGV_UNUSED_I64(o
->in1
);
2988 TCGV_UNUSED_I64(o
->in2
);
2989 o
->g_in1
= o
->g_in2
= false;
2993 static ExitStatus
op_mvc(DisasContext
*s
, DisasOps
*o
)
2995 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
2996 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
2997 tcg_temp_free_i32(l
);
3001 static ExitStatus
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3003 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3004 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3005 tcg_temp_free_i32(l
);
3009 static ExitStatus
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3011 int r1
= get_field(s
->fields
, r1
);
3012 int r2
= get_field(s
->fields
, r2
);
3015 /* r1 and r2 must be even. */
3016 if (r1
& 1 || r2
& 1) {
3017 gen_program_exception(s
, PGM_SPECIFICATION
);
3018 return EXIT_NORETURN
;
3021 t1
= tcg_const_i32(r1
);
3022 t2
= tcg_const_i32(r2
);
3023 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3024 tcg_temp_free_i32(t1
);
3025 tcg_temp_free_i32(t2
);
3030 static ExitStatus
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3032 int r1
= get_field(s
->fields
, r1
);
3033 int r3
= get_field(s
->fields
, r3
);
3036 /* r1 and r3 must be even. */
3037 if (r1
& 1 || r3
& 1) {
3038 gen_program_exception(s
, PGM_SPECIFICATION
);
3039 return EXIT_NORETURN
;
3042 t1
= tcg_const_i32(r1
);
3043 t3
= tcg_const_i32(r3
);
3044 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3045 tcg_temp_free_i32(t1
);
3046 tcg_temp_free_i32(t3
);
3051 static ExitStatus
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3053 int r1
= get_field(s
->fields
, r1
);
3054 int r3
= get_field(s
->fields
, r3
);
3057 /* r1 and r3 must be even. */
3058 if (r1
& 1 || r3
& 1) {
3059 gen_program_exception(s
, PGM_SPECIFICATION
);
3060 return EXIT_NORETURN
;
3063 t1
= tcg_const_i32(r1
);
3064 t3
= tcg_const_i32(r3
);
3065 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3066 tcg_temp_free_i32(t1
);
3067 tcg_temp_free_i32(t3
);
3072 static ExitStatus
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3074 int r3
= get_field(s
->fields
, r3
);
3075 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3080 #ifndef CONFIG_USER_ONLY
3081 static ExitStatus
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3083 int r1
= get_field(s
->fields
, l1
);
3084 check_privileged(s
);
3085 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3090 static ExitStatus
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3092 int r1
= get_field(s
->fields
, l1
);
3093 check_privileged(s
);
3094 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3100 static ExitStatus
op_mvn(DisasContext
*s
, DisasOps
*o
)
3102 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3103 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3104 tcg_temp_free_i32(l
);
3108 static ExitStatus
op_mvo(DisasContext
*s
, DisasOps
*o
)
3110 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3111 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3112 tcg_temp_free_i32(l
);
3116 static ExitStatus
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3118 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3123 static ExitStatus
op_mvst(DisasContext
*s
, DisasOps
*o
)
3125 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3127 return_low128(o
->in2
);
3131 static ExitStatus
op_mvz(DisasContext
*s
, DisasOps
*o
)
3133 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3134 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3135 tcg_temp_free_i32(l
);
3139 static ExitStatus
op_mul(DisasContext
*s
, DisasOps
*o
)
3141 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3145 static ExitStatus
op_mul128(DisasContext
*s
, DisasOps
*o
)
3147 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3151 static ExitStatus
op_meeb(DisasContext
*s
, DisasOps
*o
)
3153 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3157 static ExitStatus
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3159 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3163 static ExitStatus
op_mdb(DisasContext
*s
, DisasOps
*o
)
3165 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3169 static ExitStatus
op_mxb(DisasContext
*s
, DisasOps
*o
)
3171 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3172 return_low128(o
->out2
);
3176 static ExitStatus
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3178 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3179 return_low128(o
->out2
);
3183 static ExitStatus
op_maeb(DisasContext
*s
, DisasOps
*o
)
3185 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3186 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3187 tcg_temp_free_i64(r3
);
3191 static ExitStatus
op_madb(DisasContext
*s
, DisasOps
*o
)
3193 int r3
= get_field(s
->fields
, r3
);
3194 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3198 static ExitStatus
op_mseb(DisasContext
*s
, DisasOps
*o
)
3200 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3201 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3202 tcg_temp_free_i64(r3
);
3206 static ExitStatus
op_msdb(DisasContext
*s
, DisasOps
*o
)
3208 int r3
= get_field(s
->fields
, r3
);
3209 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3213 static ExitStatus
op_nabs(DisasContext
*s
, DisasOps
*o
)
3216 z
= tcg_const_i64(0);
3217 n
= tcg_temp_new_i64();
3218 tcg_gen_neg_i64(n
, o
->in2
);
3219 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3220 tcg_temp_free_i64(n
);
3221 tcg_temp_free_i64(z
);
3225 static ExitStatus
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3227 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3231 static ExitStatus
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3233 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3237 static ExitStatus
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3239 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3240 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3244 static ExitStatus
op_nc(DisasContext
*s
, DisasOps
*o
)
3246 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3247 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3248 tcg_temp_free_i32(l
);
3253 static ExitStatus
op_neg(DisasContext
*s
, DisasOps
*o
)
3255 tcg_gen_neg_i64(o
->out
, o
->in2
);
3259 static ExitStatus
op_negf32(DisasContext
*s
, DisasOps
*o
)
3261 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3265 static ExitStatus
op_negf64(DisasContext
*s
, DisasOps
*o
)
3267 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3271 static ExitStatus
op_negf128(DisasContext
*s
, DisasOps
*o
)
3273 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3274 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3278 static ExitStatus
op_oc(DisasContext
*s
, DisasOps
*o
)
3280 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3281 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3282 tcg_temp_free_i32(l
);
3287 static ExitStatus
op_or(DisasContext
*s
, DisasOps
*o
)
3289 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3293 static ExitStatus
op_ori(DisasContext
*s
, DisasOps
*o
)
3295 int shift
= s
->insn
->data
& 0xff;
3296 int size
= s
->insn
->data
>> 8;
3297 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3300 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3301 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3303 /* Produce the CC from only the bits manipulated. */
3304 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3305 set_cc_nz_u64(s
, cc_dst
);
3309 static ExitStatus
op_pack(DisasContext
*s
, DisasOps
*o
)
3311 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3312 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3313 tcg_temp_free_i32(l
);
3317 static ExitStatus
op_pka(DisasContext
*s
, DisasOps
*o
)
3319 int l2
= get_field(s
->fields
, l2
) + 1;
3322 /* The length must not exceed 32 bytes. */
3324 gen_program_exception(s
, PGM_SPECIFICATION
);
3325 return EXIT_NORETURN
;
3327 l
= tcg_const_i32(l2
);
3328 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3329 tcg_temp_free_i32(l
);
3333 static ExitStatus
op_pku(DisasContext
*s
, DisasOps
*o
)
3335 int l2
= get_field(s
->fields
, l2
) + 1;
3338 /* The length must be even and should not exceed 64 bytes. */
3339 if ((l2
& 1) || (l2
> 64)) {
3340 gen_program_exception(s
, PGM_SPECIFICATION
);
3341 return EXIT_NORETURN
;
3343 l
= tcg_const_i32(l2
);
3344 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3345 tcg_temp_free_i32(l
);
3349 static ExitStatus
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3351 gen_helper_popcnt(o
->out
, o
->in2
);
3355 #ifndef CONFIG_USER_ONLY
3356 static ExitStatus
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3358 check_privileged(s
);
3359 gen_helper_ptlb(cpu_env
);
3364 static ExitStatus
op_risbg(DisasContext
*s
, DisasOps
*o
)
3366 int i3
= get_field(s
->fields
, i3
);
3367 int i4
= get_field(s
->fields
, i4
);
3368 int i5
= get_field(s
->fields
, i5
);
3369 int do_zero
= i4
& 0x80;
3370 uint64_t mask
, imask
, pmask
;
3373 /* Adjust the arguments for the specific insn. */
3374 switch (s
->fields
->op2
) {
3375 case 0x55: /* risbg */
3380 case 0x5d: /* risbhg */
3383 pmask
= 0xffffffff00000000ull
;
3385 case 0x51: /* risblg */
3388 pmask
= 0x00000000ffffffffull
;
3394 /* MASK is the set of bits to be inserted from R2.
3395 Take care for I3/I4 wraparound. */
3398 mask
^= pmask
>> i4
>> 1;
3400 mask
|= ~(pmask
>> i4
>> 1);
3404 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3405 insns, we need to keep the other half of the register. */
3406 imask
= ~mask
| ~pmask
;
3408 if (s
->fields
->op2
== 0x55) {
3418 if (s
->fields
->op2
== 0x5d) {
3422 /* In some cases we can implement this with extract. */
3423 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3424 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3428 /* In some cases we can implement this with deposit. */
3429 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3430 /* Note that we rotate the bits to be inserted to the lsb, not to
3431 the position as described in the PoO. */
3432 rot
= (rot
- pos
) & 63;
3437 /* Rotate the input as necessary. */
3438 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3440 /* Insert the selected bits into the output. */
3443 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3445 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3447 } else if (imask
== 0) {
3448 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3450 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3451 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3452 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3457 static ExitStatus
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3459 int i3
= get_field(s
->fields
, i3
);
3460 int i4
= get_field(s
->fields
, i4
);
3461 int i5
= get_field(s
->fields
, i5
);
3464 /* If this is a test-only form, arrange to discard the result. */
3466 o
->out
= tcg_temp_new_i64();
3474 /* MASK is the set of bits to be operated on from R2.
3475 Take care for I3/I4 wraparound. */
3478 mask
^= ~0ull >> i4
>> 1;
3480 mask
|= ~(~0ull >> i4
>> 1);
3483 /* Rotate the input as necessary. */
3484 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3487 switch (s
->fields
->op2
) {
3488 case 0x55: /* AND */
3489 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3490 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3493 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3494 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3496 case 0x57: /* XOR */
3497 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3498 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3505 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3506 set_cc_nz_u64(s
, cc_dst
);
3510 static ExitStatus
op_rev16(DisasContext
*s
, DisasOps
*o
)
3512 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3516 static ExitStatus
op_rev32(DisasContext
*s
, DisasOps
*o
)
3518 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3522 static ExitStatus
op_rev64(DisasContext
*s
, DisasOps
*o
)
3524 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3528 static ExitStatus
op_rll32(DisasContext
*s
, DisasOps
*o
)
3530 TCGv_i32 t1
= tcg_temp_new_i32();
3531 TCGv_i32 t2
= tcg_temp_new_i32();
3532 TCGv_i32 to
= tcg_temp_new_i32();
3533 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3534 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3535 tcg_gen_rotl_i32(to
, t1
, t2
);
3536 tcg_gen_extu_i32_i64(o
->out
, to
);
3537 tcg_temp_free_i32(t1
);
3538 tcg_temp_free_i32(t2
);
3539 tcg_temp_free_i32(to
);
3543 static ExitStatus
op_rll64(DisasContext
*s
, DisasOps
*o
)
3545 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3549 #ifndef CONFIG_USER_ONLY
3550 static ExitStatus
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3552 check_privileged(s
);
3553 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3558 static ExitStatus
op_sacf(DisasContext
*s
, DisasOps
*o
)
3560 check_privileged(s
);
3561 gen_helper_sacf(cpu_env
, o
->in2
);
3562 /* Addressing mode has changed, so end the block. */
3563 return EXIT_PC_STALE
;
3567 static ExitStatus
op_sam(DisasContext
*s
, DisasOps
*o
)
3569 int sam
= s
->insn
->data
;
3585 /* Bizarre but true, we check the address of the current insn for the
3586 specification exception, not the next to be executed. Thus the PoO
3587 documents that Bad Things Happen two bytes before the end. */
3588 if (s
->pc
& ~mask
) {
3589 gen_program_exception(s
, PGM_SPECIFICATION
);
3590 return EXIT_NORETURN
;
3594 tsam
= tcg_const_i64(sam
);
3595 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3596 tcg_temp_free_i64(tsam
);
3598 /* Always exit the TB, since we (may have) changed execution mode. */
3599 return EXIT_PC_STALE
;
3602 static ExitStatus
op_sar(DisasContext
*s
, DisasOps
*o
)
3604 int r1
= get_field(s
->fields
, r1
);
3605 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3609 static ExitStatus
op_seb(DisasContext
*s
, DisasOps
*o
)
3611 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3615 static ExitStatus
op_sdb(DisasContext
*s
, DisasOps
*o
)
3617 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3621 static ExitStatus
op_sxb(DisasContext
*s
, DisasOps
*o
)
3623 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3624 return_low128(o
->out2
);
3628 static ExitStatus
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3630 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3634 static ExitStatus
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3636 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3640 static ExitStatus
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3642 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3643 return_low128(o
->out2
);
3647 #ifndef CONFIG_USER_ONLY
3648 static ExitStatus
op_servc(DisasContext
*s
, DisasOps
*o
)
3650 check_privileged(s
);
3651 potential_page_fault(s
);
3652 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3657 static ExitStatus
op_sigp(DisasContext
*s
, DisasOps
*o
)
3659 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3660 check_privileged(s
);
3661 potential_page_fault(s
);
3662 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, o
->in1
);
3664 tcg_temp_free_i32(r1
);
3669 static ExitStatus
op_soc(DisasContext
*s
, DisasOps
*o
)
3676 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3678 /* We want to store when the condition is fulfilled, so branch
3679 out when it's not */
3680 c
.cond
= tcg_invert_cond(c
.cond
);
3682 lab
= gen_new_label();
3684 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3686 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3690 r1
= get_field(s
->fields
, r1
);
3691 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3692 switch (s
->insn
->data
) {
3694 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3697 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3699 case 2: /* STOCFH */
3700 h
= tcg_temp_new_i64();
3701 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3702 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3703 tcg_temp_free_i64(h
);
3706 g_assert_not_reached();
3708 tcg_temp_free_i64(a
);
3714 static ExitStatus
op_sla(DisasContext
*s
, DisasOps
*o
)
3716 uint64_t sign
= 1ull << s
->insn
->data
;
3717 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3718 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3719 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3720 /* The arithmetic left shift is curious in that it does not affect
3721 the sign bit. Copy that over from the source unchanged. */
3722 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3723 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3724 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3728 static ExitStatus
op_sll(DisasContext
*s
, DisasOps
*o
)
3730 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3734 static ExitStatus
op_sra(DisasContext
*s
, DisasOps
*o
)
3736 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3740 static ExitStatus
op_srl(DisasContext
*s
, DisasOps
*o
)
3742 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3746 static ExitStatus
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3748 gen_helper_sfpc(cpu_env
, o
->in2
);
3752 static ExitStatus
op_sfas(DisasContext
*s
, DisasOps
*o
)
3754 gen_helper_sfas(cpu_env
, o
->in2
);
3758 static ExitStatus
op_srnm(DisasContext
*s
, DisasOps
*o
)
3760 int b2
= get_field(s
->fields
, b2
);
3761 int d2
= get_field(s
->fields
, d2
);
3762 TCGv_i64 t1
= tcg_temp_new_i64();
3763 TCGv_i64 t2
= tcg_temp_new_i64();
3766 switch (s
->fields
->op2
) {
3767 case 0x99: /* SRNM */
3770 case 0xb8: /* SRNMB */
3773 case 0xb9: /* SRNMT */
3779 mask
= (1 << len
) - 1;
3781 /* Insert the value into the appropriate field of the FPC. */
3783 tcg_gen_movi_i64(t1
, d2
& mask
);
3785 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3786 tcg_gen_andi_i64(t1
, t1
, mask
);
3788 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3789 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3790 tcg_temp_free_i64(t1
);
3792 /* Then install the new FPC to set the rounding mode in fpu_status. */
3793 gen_helper_sfpc(cpu_env
, t2
);
3794 tcg_temp_free_i64(t2
);
3798 #ifndef CONFIG_USER_ONLY
3799 static ExitStatus
op_spka(DisasContext
*s
, DisasOps
*o
)
3801 check_privileged(s
);
3802 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3803 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3807 static ExitStatus
op_sske(DisasContext
*s
, DisasOps
*o
)
3809 check_privileged(s
);
3810 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3814 static ExitStatus
op_ssm(DisasContext
*s
, DisasOps
*o
)
3816 check_privileged(s
);
3817 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3818 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3819 return EXIT_PC_STALE_NOCHAIN
;
3822 static ExitStatus
op_stap(DisasContext
*s
, DisasOps
*o
)
3824 check_privileged(s
);
3825 /* ??? Surely cpu address != cpu number. In any case the previous
3826 version of this stored more than the required half-word, so it
3827 is unlikely this has ever been tested. */
3828 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpu_num
));
3832 static ExitStatus
op_stck(DisasContext
*s
, DisasOps
*o
)
3834 gen_helper_stck(o
->out
, cpu_env
);
3835 /* ??? We don't implement clock states. */
3836 gen_op_movi_cc(s
, 0);
3840 static ExitStatus
op_stcke(DisasContext
*s
, DisasOps
*o
)
3842 TCGv_i64 c1
= tcg_temp_new_i64();
3843 TCGv_i64 c2
= tcg_temp_new_i64();
3844 gen_helper_stck(c1
, cpu_env
);
3845 /* Shift the 64-bit value into its place as a zero-extended
3846 104-bit value. Note that "bit positions 64-103 are always
3847 non-zero so that they compare differently to STCK"; we set
3848 the least significant bit to 1. */
3849 tcg_gen_shli_i64(c2
, c1
, 56);
3850 tcg_gen_shri_i64(c1
, c1
, 8);
3851 tcg_gen_ori_i64(c2
, c2
, 0x10000);
3852 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
3853 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
3854 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
3855 tcg_temp_free_i64(c1
);
3856 tcg_temp_free_i64(c2
);
3857 /* ??? We don't implement clock states. */
3858 gen_op_movi_cc(s
, 0);
3862 static ExitStatus
op_sckc(DisasContext
*s
, DisasOps
*o
)
3864 check_privileged(s
);
3865 gen_helper_sckc(cpu_env
, o
->in2
);
3869 static ExitStatus
op_stckc(DisasContext
*s
, DisasOps
*o
)
3871 check_privileged(s
);
3872 gen_helper_stckc(o
->out
, cpu_env
);
3876 static ExitStatus
op_stctg(DisasContext
*s
, DisasOps
*o
)
3878 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3879 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3880 check_privileged(s
);
3881 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
3882 tcg_temp_free_i32(r1
);
3883 tcg_temp_free_i32(r3
);
3887 static ExitStatus
op_stctl(DisasContext
*s
, DisasOps
*o
)
3889 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3890 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3891 check_privileged(s
);
3892 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
3893 tcg_temp_free_i32(r1
);
3894 tcg_temp_free_i32(r3
);
3898 static ExitStatus
op_stidp(DisasContext
*s
, DisasOps
*o
)
3900 check_privileged(s
);
3901 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
3902 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
3906 static ExitStatus
op_spt(DisasContext
*s
, DisasOps
*o
)
3908 check_privileged(s
);
3909 gen_helper_spt(cpu_env
, o
->in2
);
3913 static ExitStatus
op_stfl(DisasContext
*s
, DisasOps
*o
)
3915 check_privileged(s
);
3916 gen_helper_stfl(cpu_env
);
3920 static ExitStatus
op_stpt(DisasContext
*s
, DisasOps
*o
)
3922 check_privileged(s
);
3923 gen_helper_stpt(o
->out
, cpu_env
);
3927 static ExitStatus
op_stsi(DisasContext
*s
, DisasOps
*o
)
3929 check_privileged(s
);
3930 potential_page_fault(s
);
3931 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
3936 static ExitStatus
op_spx(DisasContext
*s
, DisasOps
*o
)
3938 check_privileged(s
);
3939 gen_helper_spx(cpu_env
, o
->in2
);
3943 static ExitStatus
op_xsch(DisasContext
*s
, DisasOps
*o
)
3945 check_privileged(s
);
3946 potential_page_fault(s
);
3947 gen_helper_xsch(cpu_env
, regs
[1]);
3952 static ExitStatus
op_csch(DisasContext
*s
, DisasOps
*o
)
3954 check_privileged(s
);
3955 potential_page_fault(s
);
3956 gen_helper_csch(cpu_env
, regs
[1]);
3961 static ExitStatus
op_hsch(DisasContext
*s
, DisasOps
*o
)
3963 check_privileged(s
);
3964 potential_page_fault(s
);
3965 gen_helper_hsch(cpu_env
, regs
[1]);
3970 static ExitStatus
op_msch(DisasContext
*s
, DisasOps
*o
)
3972 check_privileged(s
);
3973 potential_page_fault(s
);
3974 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
3979 static ExitStatus
op_rchp(DisasContext
*s
, DisasOps
*o
)
3981 check_privileged(s
);
3982 potential_page_fault(s
);
3983 gen_helper_rchp(cpu_env
, regs
[1]);
3988 static ExitStatus
op_rsch(DisasContext
*s
, DisasOps
*o
)
3990 check_privileged(s
);
3991 potential_page_fault(s
);
3992 gen_helper_rsch(cpu_env
, regs
[1]);
3997 static ExitStatus
op_ssch(DisasContext
*s
, DisasOps
*o
)
3999 check_privileged(s
);
4000 potential_page_fault(s
);
4001 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4006 static ExitStatus
op_stsch(DisasContext
*s
, DisasOps
*o
)
4008 check_privileged(s
);
4009 potential_page_fault(s
);
4010 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4015 static ExitStatus
op_tsch(DisasContext
*s
, DisasOps
*o
)
4017 check_privileged(s
);
4018 potential_page_fault(s
);
4019 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4024 static ExitStatus
op_chsc(DisasContext
*s
, DisasOps
*o
)
4026 check_privileged(s
);
4027 potential_page_fault(s
);
4028 gen_helper_chsc(cpu_env
, o
->in2
);
4033 static ExitStatus
op_stpx(DisasContext
*s
, DisasOps
*o
)
4035 check_privileged(s
);
4036 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4037 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4041 static ExitStatus
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4043 uint64_t i2
= get_field(s
->fields
, i2
);
4046 check_privileged(s
);
4048 /* It is important to do what the instruction name says: STORE THEN.
4049 If we let the output hook perform the store then if we fault and
4050 restart, we'll have the wrong SYSTEM MASK in place. */
4051 t
= tcg_temp_new_i64();
4052 tcg_gen_shri_i64(t
, psw_mask
, 56);
4053 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4054 tcg_temp_free_i64(t
);
4056 if (s
->fields
->op
== 0xac) {
4057 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4058 (i2
<< 56) | 0x00ffffffffffffffull
);
4060 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4063 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4064 return EXIT_PC_STALE_NOCHAIN
;
4067 static ExitStatus
op_stura(DisasContext
*s
, DisasOps
*o
)
4069 check_privileged(s
);
4070 potential_page_fault(s
);
4071 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4075 static ExitStatus
op_sturg(DisasContext
*s
, DisasOps
*o
)
4077 check_privileged(s
);
4078 potential_page_fault(s
);
4079 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4084 static ExitStatus
op_stfle(DisasContext
*s
, DisasOps
*o
)
4086 potential_page_fault(s
);
4087 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4092 static ExitStatus
op_st8(DisasContext
*s
, DisasOps
*o
)
4094 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4098 static ExitStatus
op_st16(DisasContext
*s
, DisasOps
*o
)
4100 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4104 static ExitStatus
op_st32(DisasContext
*s
, DisasOps
*o
)
4106 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4110 static ExitStatus
op_st64(DisasContext
*s
, DisasOps
*o
)
4112 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4116 static ExitStatus
op_stam(DisasContext
*s
, DisasOps
*o
)
4118 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4119 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4120 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4121 tcg_temp_free_i32(r1
);
4122 tcg_temp_free_i32(r3
);
4126 static ExitStatus
op_stcm(DisasContext
*s
, DisasOps
*o
)
4128 int m3
= get_field(s
->fields
, m3
);
4129 int pos
, base
= s
->insn
->data
;
4130 TCGv_i64 tmp
= tcg_temp_new_i64();
4132 pos
= base
+ ctz32(m3
) * 8;
4135 /* Effectively a 32-bit store. */
4136 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4137 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4143 /* Effectively a 16-bit store. */
4144 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4145 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4152 /* Effectively an 8-bit store. */
4153 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4154 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4158 /* This is going to be a sequence of shifts and stores. */
4159 pos
= base
+ 32 - 8;
4162 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4163 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4164 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4166 m3
= (m3
<< 1) & 0xf;
4171 tcg_temp_free_i64(tmp
);
4175 static ExitStatus
op_stm(DisasContext
*s
, DisasOps
*o
)
4177 int r1
= get_field(s
->fields
, r1
);
4178 int r3
= get_field(s
->fields
, r3
);
4179 int size
= s
->insn
->data
;
4180 TCGv_i64 tsize
= tcg_const_i64(size
);
4184 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4186 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4191 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4195 tcg_temp_free_i64(tsize
);
4199 static ExitStatus
op_stmh(DisasContext
*s
, DisasOps
*o
)
4201 int r1
= get_field(s
->fields
, r1
);
4202 int r3
= get_field(s
->fields
, r3
);
4203 TCGv_i64 t
= tcg_temp_new_i64();
4204 TCGv_i64 t4
= tcg_const_i64(4);
4205 TCGv_i64 t32
= tcg_const_i64(32);
4208 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4209 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4213 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4217 tcg_temp_free_i64(t
);
4218 tcg_temp_free_i64(t4
);
4219 tcg_temp_free_i64(t32
);
4223 static ExitStatus
op_stpq(DisasContext
*s
, DisasOps
*o
)
4225 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4229 static ExitStatus
op_srst(DisasContext
*s
, DisasOps
*o
)
4231 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4232 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4234 gen_helper_srst(cpu_env
, r1
, r2
);
4236 tcg_temp_free_i32(r1
);
4237 tcg_temp_free_i32(r2
);
4242 static ExitStatus
op_srstu(DisasContext
*s
, DisasOps
*o
)
4244 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4245 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4247 gen_helper_srstu(cpu_env
, r1
, r2
);
4249 tcg_temp_free_i32(r1
);
4250 tcg_temp_free_i32(r2
);
4255 static ExitStatus
op_sub(DisasContext
*s
, DisasOps
*o
)
4257 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4261 static ExitStatus
op_subb(DisasContext
*s
, DisasOps
*o
)
4266 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4268 /* The !borrow flag is the msb of CC. Since we want the inverse of
4269 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4270 disas_jcc(s
, &cmp
, 8 | 4);
4271 borrow
= tcg_temp_new_i64();
4273 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4275 TCGv_i32 t
= tcg_temp_new_i32();
4276 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4277 tcg_gen_extu_i32_i64(borrow
, t
);
4278 tcg_temp_free_i32(t
);
4282 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4283 tcg_temp_free_i64(borrow
);
4287 static ExitStatus
op_svc(DisasContext
*s
, DisasOps
*o
)
4294 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4295 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4296 tcg_temp_free_i32(t
);
4298 t
= tcg_const_i32(s
->ilen
);
4299 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4300 tcg_temp_free_i32(t
);
4302 gen_exception(EXCP_SVC
);
4303 return EXIT_NORETURN
;
4306 static ExitStatus
op_tam(DisasContext
*s
, DisasOps
*o
)
4310 cc
|= (s
->tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4311 cc
|= (s
->tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4312 gen_op_movi_cc(s
, cc
);
4316 static ExitStatus
op_tceb(DisasContext
*s
, DisasOps
*o
)
4318 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4323 static ExitStatus
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4325 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4330 static ExitStatus
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4332 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4337 #ifndef CONFIG_USER_ONLY
4339 static ExitStatus
op_testblock(DisasContext
*s
, DisasOps
*o
)
4341 check_privileged(s
);
4342 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4347 static ExitStatus
op_tprot(DisasContext
*s
, DisasOps
*o
)
4349 gen_helper_tprot(cc_op
, o
->addr1
, o
->in2
);
4356 static ExitStatus
op_tp(DisasContext
*s
, DisasOps
*o
)
4358 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4359 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4360 tcg_temp_free_i32(l1
);
4365 static ExitStatus
op_tr(DisasContext
*s
, DisasOps
*o
)
4367 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4368 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4369 tcg_temp_free_i32(l
);
4374 static ExitStatus
op_tre(DisasContext
*s
, DisasOps
*o
)
4376 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4377 return_low128(o
->out2
);
4382 static ExitStatus
op_trt(DisasContext
*s
, DisasOps
*o
)
4384 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4385 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4386 tcg_temp_free_i32(l
);
4391 static ExitStatus
op_trtr(DisasContext
*s
, DisasOps
*o
)
4393 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4394 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4395 tcg_temp_free_i32(l
);
4400 static ExitStatus
op_trXX(DisasContext
*s
, DisasOps
*o
)
4402 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4403 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4404 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4405 TCGv_i32 tst
= tcg_temp_new_i32();
4406 int m3
= get_field(s
->fields
, m3
);
4408 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4412 tcg_gen_movi_i32(tst
, -1);
4414 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4415 if (s
->insn
->opc
& 3) {
4416 tcg_gen_ext8u_i32(tst
, tst
);
4418 tcg_gen_ext16u_i32(tst
, tst
);
4421 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4423 tcg_temp_free_i32(r1
);
4424 tcg_temp_free_i32(r2
);
4425 tcg_temp_free_i32(sizes
);
4426 tcg_temp_free_i32(tst
);
4431 static ExitStatus
op_ts(DisasContext
*s
, DisasOps
*o
)
4433 TCGv_i32 t1
= tcg_const_i32(0xff);
4434 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4435 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4436 tcg_temp_free_i32(t1
);
4441 static ExitStatus
op_unpk(DisasContext
*s
, DisasOps
*o
)
4443 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4444 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4445 tcg_temp_free_i32(l
);
4449 static ExitStatus
op_unpka(DisasContext
*s
, DisasOps
*o
)
4451 int l1
= get_field(s
->fields
, l1
) + 1;
4454 /* The length must not exceed 32 bytes. */
4456 gen_program_exception(s
, PGM_SPECIFICATION
);
4457 return EXIT_NORETURN
;
4459 l
= tcg_const_i32(l1
);
4460 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4461 tcg_temp_free_i32(l
);
4466 static ExitStatus
op_unpku(DisasContext
*s
, DisasOps
*o
)
4468 int l1
= get_field(s
->fields
, l1
) + 1;
4471 /* The length must be even and should not exceed 64 bytes. */
4472 if ((l1
& 1) || (l1
> 64)) {
4473 gen_program_exception(s
, PGM_SPECIFICATION
);
4474 return EXIT_NORETURN
;
4476 l
= tcg_const_i32(l1
);
4477 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4478 tcg_temp_free_i32(l
);
4484 static ExitStatus
op_xc(DisasContext
*s
, DisasOps
*o
)
4486 int d1
= get_field(s
->fields
, d1
);
4487 int d2
= get_field(s
->fields
, d2
);
4488 int b1
= get_field(s
->fields
, b1
);
4489 int b2
= get_field(s
->fields
, b2
);
4490 int l
= get_field(s
->fields
, l1
);
4493 o
->addr1
= get_address(s
, 0, b1
, d1
);
4495 /* If the addresses are identical, this is a store/memset of zero. */
4496 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4497 o
->in2
= tcg_const_i64(0);
4501 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4504 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4508 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4511 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4515 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4518 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4522 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4524 gen_op_movi_cc(s
, 0);
4528 /* But in general we'll defer to a helper. */
4529 o
->in2
= get_address(s
, 0, b2
, d2
);
4530 t32
= tcg_const_i32(l
);
4531 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4532 tcg_temp_free_i32(t32
);
4537 static ExitStatus
op_xor(DisasContext
*s
, DisasOps
*o
)
4539 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4543 static ExitStatus
op_xori(DisasContext
*s
, DisasOps
*o
)
4545 int shift
= s
->insn
->data
& 0xff;
4546 int size
= s
->insn
->data
>> 8;
4547 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4550 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4551 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4553 /* Produce the CC from only the bits manipulated. */
4554 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4555 set_cc_nz_u64(s
, cc_dst
);
4559 static ExitStatus
op_zero(DisasContext
*s
, DisasOps
*o
)
4561 o
->out
= tcg_const_i64(0);
4565 static ExitStatus
op_zero2(DisasContext
*s
, DisasOps
*o
)
4567 o
->out
= tcg_const_i64(0);
4573 /* ====================================================================== */
4574 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4575 the original inputs), update the various cc data structures in order to
4576 be able to compute the new condition code. */
4578 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4580 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4583 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4585 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4588 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4590 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4593 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4595 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4598 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4600 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4603 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4605 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4608 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4610 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4613 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4615 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4618 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4620 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4623 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4625 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4628 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4630 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4633 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4635 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4638 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4640 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4643 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4645 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4648 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4650 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4653 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4655 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4658 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4660 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4663 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4665 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4668 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4670 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4673 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4675 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4676 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4679 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4681 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4684 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4686 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4689 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4691 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4694 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4696 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4699 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
4701 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
4704 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
4706 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
4709 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
4711 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
4714 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
4716 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
4719 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
4721 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
4724 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
4726 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
4729 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
4731 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
4734 /* ====================================================================== */
4735 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4736 with the TCG register to which we will write. Used in combination with
4737 the "wout" generators, in some cases we need a new temporary, and in
4738 some cases we can write to a TCG global. */
4740 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4742 o
->out
= tcg_temp_new_i64();
4744 #define SPEC_prep_new 0
4746 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4748 o
->out
= tcg_temp_new_i64();
4749 o
->out2
= tcg_temp_new_i64();
4751 #define SPEC_prep_new_P 0
4753 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4755 o
->out
= regs
[get_field(f
, r1
)];
4758 #define SPEC_prep_r1 0
4760 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4762 int r1
= get_field(f
, r1
);
4764 o
->out2
= regs
[r1
+ 1];
4765 o
->g_out
= o
->g_out2
= true;
4767 #define SPEC_prep_r1_P SPEC_r1_even
4769 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4771 o
->out
= fregs
[get_field(f
, r1
)];
4774 #define SPEC_prep_f1 0
4776 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4778 int r1
= get_field(f
, r1
);
4780 o
->out2
= fregs
[r1
+ 2];
4781 o
->g_out
= o
->g_out2
= true;
4783 #define SPEC_prep_x1 SPEC_r1_f128
4785 /* ====================================================================== */
4786 /* The "Write OUTput" generators. These generally perform some non-trivial
4787 copy of data to TCG globals, or to main memory. The trivial cases are
4788 generally handled by having a "prep" generator install the TCG global
4789 as the destination of the operation. */
4791 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4793 store_reg(get_field(f
, r1
), o
->out
);
4795 #define SPEC_wout_r1 0
4797 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4799 int r1
= get_field(f
, r1
);
4800 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
4802 #define SPEC_wout_r1_8 0
4804 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4806 int r1
= get_field(f
, r1
);
4807 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
4809 #define SPEC_wout_r1_16 0
4811 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4813 store_reg32_i64(get_field(f
, r1
), o
->out
);
4815 #define SPEC_wout_r1_32 0
4817 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4819 store_reg32h_i64(get_field(f
, r1
), o
->out
);
4821 #define SPEC_wout_r1_32h 0
4823 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4825 int r1
= get_field(f
, r1
);
4826 store_reg32_i64(r1
, o
->out
);
4827 store_reg32_i64(r1
+ 1, o
->out2
);
4829 #define SPEC_wout_r1_P32 SPEC_r1_even
4831 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4833 int r1
= get_field(f
, r1
);
4834 store_reg32_i64(r1
+ 1, o
->out
);
4835 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
4836 store_reg32_i64(r1
, o
->out
);
4838 #define SPEC_wout_r1_D32 SPEC_r1_even
4840 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4842 int r3
= get_field(f
, r3
);
4843 store_reg32_i64(r3
, o
->out
);
4844 store_reg32_i64(r3
+ 1, o
->out2
);
4846 #define SPEC_wout_r3_P32 SPEC_r3_even
4848 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4850 int r3
= get_field(f
, r3
);
4851 store_reg(r3
, o
->out
);
4852 store_reg(r3
+ 1, o
->out2
);
4854 #define SPEC_wout_r3_P64 SPEC_r3_even
4856 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4858 store_freg32_i64(get_field(f
, r1
), o
->out
);
4860 #define SPEC_wout_e1 0
4862 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4864 store_freg(get_field(f
, r1
), o
->out
);
4866 #define SPEC_wout_f1 0
4868 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4870 int f1
= get_field(s
->fields
, r1
);
4871 store_freg(f1
, o
->out
);
4872 store_freg(f1
+ 2, o
->out2
);
4874 #define SPEC_wout_x1 SPEC_r1_f128
4876 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4878 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4879 store_reg32_i64(get_field(f
, r1
), o
->out
);
4882 #define SPEC_wout_cond_r1r2_32 0
4884 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4886 if (get_field(f
, r1
) != get_field(f
, r2
)) {
4887 store_freg32_i64(get_field(f
, r1
), o
->out
);
4890 #define SPEC_wout_cond_e1e2 0
4892 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4894 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
4896 #define SPEC_wout_m1_8 0
4898 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4900 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
4902 #define SPEC_wout_m1_16 0
4904 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4906 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
4908 #define SPEC_wout_m1_32 0
4910 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4912 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
4914 #define SPEC_wout_m1_64 0
4916 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4918 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
4920 #define SPEC_wout_m2_32 0
4922 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4924 store_reg(get_field(f
, r1
), o
->in2
);
4926 #define SPEC_wout_in2_r1 0
4928 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4930 store_reg32_i64(get_field(f
, r1
), o
->in2
);
4932 #define SPEC_wout_in2_r1_32 0
4934 /* ====================================================================== */
4935 /* The "INput 1" generators. These load the first operand to an insn. */
4937 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4939 o
->in1
= load_reg(get_field(f
, r1
));
4941 #define SPEC_in1_r1 0
4943 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4945 o
->in1
= regs
[get_field(f
, r1
)];
4948 #define SPEC_in1_r1_o 0
4950 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4952 o
->in1
= tcg_temp_new_i64();
4953 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4955 #define SPEC_in1_r1_32s 0
4957 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4959 o
->in1
= tcg_temp_new_i64();
4960 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
4962 #define SPEC_in1_r1_32u 0
4964 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4966 o
->in1
= tcg_temp_new_i64();
4967 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
4969 #define SPEC_in1_r1_sr32 0
4971 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4973 o
->in1
= load_reg(get_field(f
, r1
) + 1);
4975 #define SPEC_in1_r1p1 SPEC_r1_even
4977 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4979 o
->in1
= tcg_temp_new_i64();
4980 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4982 #define SPEC_in1_r1p1_32s SPEC_r1_even
4984 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4986 o
->in1
= tcg_temp_new_i64();
4987 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
4989 #define SPEC_in1_r1p1_32u SPEC_r1_even
4991 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
4993 int r1
= get_field(f
, r1
);
4994 o
->in1
= tcg_temp_new_i64();
4995 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
4997 #define SPEC_in1_r1_D32 SPEC_r1_even
4999 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5001 o
->in1
= load_reg(get_field(f
, r2
));
5003 #define SPEC_in1_r2 0
5005 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5007 o
->in1
= tcg_temp_new_i64();
5008 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5010 #define SPEC_in1_r2_sr32 0
5012 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5014 o
->in1
= load_reg(get_field(f
, r3
));
5016 #define SPEC_in1_r3 0
5018 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5020 o
->in1
= regs
[get_field(f
, r3
)];
5023 #define SPEC_in1_r3_o 0
5025 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5027 o
->in1
= tcg_temp_new_i64();
5028 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5030 #define SPEC_in1_r3_32s 0
5032 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5034 o
->in1
= tcg_temp_new_i64();
5035 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5037 #define SPEC_in1_r3_32u 0
5039 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5041 int r3
= get_field(f
, r3
);
5042 o
->in1
= tcg_temp_new_i64();
5043 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5045 #define SPEC_in1_r3_D32 SPEC_r3_even
5047 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5049 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5051 #define SPEC_in1_e1 0
5053 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5055 o
->in1
= fregs
[get_field(f
, r1
)];
5058 #define SPEC_in1_f1_o 0
5060 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5062 int r1
= get_field(f
, r1
);
5064 o
->out2
= fregs
[r1
+ 2];
5065 o
->g_out
= o
->g_out2
= true;
5067 #define SPEC_in1_x1_o SPEC_r1_f128
5069 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5071 o
->in1
= fregs
[get_field(f
, r3
)];
5074 #define SPEC_in1_f3_o 0
5076 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5078 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5080 #define SPEC_in1_la1 0
5082 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5084 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5085 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5087 #define SPEC_in1_la2 0
5089 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5092 o
->in1
= tcg_temp_new_i64();
5093 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5095 #define SPEC_in1_m1_8u 0
5097 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5100 o
->in1
= tcg_temp_new_i64();
5101 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5103 #define SPEC_in1_m1_16s 0
5105 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5108 o
->in1
= tcg_temp_new_i64();
5109 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5111 #define SPEC_in1_m1_16u 0
5113 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5116 o
->in1
= tcg_temp_new_i64();
5117 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5119 #define SPEC_in1_m1_32s 0
5121 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5124 o
->in1
= tcg_temp_new_i64();
5125 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5127 #define SPEC_in1_m1_32u 0
5129 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5132 o
->in1
= tcg_temp_new_i64();
5133 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5135 #define SPEC_in1_m1_64 0
5137 /* ====================================================================== */
5138 /* The "INput 2" generators. These load the second operand to an insn. */
5140 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5142 o
->in2
= regs
[get_field(f
, r1
)];
5145 #define SPEC_in2_r1_o 0
5147 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5149 o
->in2
= tcg_temp_new_i64();
5150 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5152 #define SPEC_in2_r1_16u 0
5154 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5156 o
->in2
= tcg_temp_new_i64();
5157 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5159 #define SPEC_in2_r1_32u 0
5161 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5163 int r1
= get_field(f
, r1
);
5164 o
->in2
= tcg_temp_new_i64();
5165 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5167 #define SPEC_in2_r1_D32 SPEC_r1_even
5169 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5171 o
->in2
= load_reg(get_field(f
, r2
));
5173 #define SPEC_in2_r2 0
5175 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5177 o
->in2
= regs
[get_field(f
, r2
)];
5180 #define SPEC_in2_r2_o 0
5182 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5184 int r2
= get_field(f
, r2
);
5186 o
->in2
= load_reg(r2
);
5189 #define SPEC_in2_r2_nz 0
5191 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5193 o
->in2
= tcg_temp_new_i64();
5194 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5196 #define SPEC_in2_r2_8s 0
5198 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5200 o
->in2
= tcg_temp_new_i64();
5201 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5203 #define SPEC_in2_r2_8u 0
5205 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5207 o
->in2
= tcg_temp_new_i64();
5208 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5210 #define SPEC_in2_r2_16s 0
5212 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5214 o
->in2
= tcg_temp_new_i64();
5215 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5217 #define SPEC_in2_r2_16u 0
5219 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5221 o
->in2
= load_reg(get_field(f
, r3
));
5223 #define SPEC_in2_r3 0
5225 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5227 o
->in2
= tcg_temp_new_i64();
5228 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5230 #define SPEC_in2_r3_sr32 0
5232 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5234 o
->in2
= tcg_temp_new_i64();
5235 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5237 #define SPEC_in2_r2_32s 0
5239 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5241 o
->in2
= tcg_temp_new_i64();
5242 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5244 #define SPEC_in2_r2_32u 0
5246 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5248 o
->in2
= tcg_temp_new_i64();
5249 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5251 #define SPEC_in2_r2_sr32 0
5253 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5255 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5257 #define SPEC_in2_e2 0
5259 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5261 o
->in2
= fregs
[get_field(f
, r2
)];
5264 #define SPEC_in2_f2_o 0
5266 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5268 int r2
= get_field(f
, r2
);
5270 o
->in2
= fregs
[r2
+ 2];
5271 o
->g_in1
= o
->g_in2
= true;
5273 #define SPEC_in2_x2_o SPEC_r2_f128
5275 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5277 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5279 #define SPEC_in2_ra2 0
5281 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5283 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5284 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5286 #define SPEC_in2_a2 0
5288 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5290 o
->in2
= tcg_const_i64(s
->pc
+ (int64_t)get_field(f
, i2
) * 2);
5292 #define SPEC_in2_ri2 0
5294 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5296 help_l2_shift(s
, f
, o
, 31);
5298 #define SPEC_in2_sh32 0
5300 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5302 help_l2_shift(s
, f
, o
, 63);
5304 #define SPEC_in2_sh64 0
5306 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5309 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5311 #define SPEC_in2_m2_8u 0
5313 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5316 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5318 #define SPEC_in2_m2_16s 0
5320 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5323 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5325 #define SPEC_in2_m2_16u 0
5327 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5330 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5332 #define SPEC_in2_m2_32s 0
5334 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5337 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5339 #define SPEC_in2_m2_32u 0
5341 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5344 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5346 #define SPEC_in2_m2_64 0
5348 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5351 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5353 #define SPEC_in2_mri2_16u 0
5355 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5358 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5360 #define SPEC_in2_mri2_32s 0
5362 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5365 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5367 #define SPEC_in2_mri2_32u 0
5369 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5372 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5374 #define SPEC_in2_mri2_64 0
5376 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5378 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5380 #define SPEC_in2_i2 0
5382 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5384 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5386 #define SPEC_in2_i2_8u 0
5388 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5390 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5392 #define SPEC_in2_i2_16u 0
5394 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5396 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5398 #define SPEC_in2_i2_32u 0
5400 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5402 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5403 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5405 #define SPEC_in2_i2_16u_shl 0
5407 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5409 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5410 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5412 #define SPEC_in2_i2_32u_shl 0
5414 #ifndef CONFIG_USER_ONLY
5415 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5417 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5419 #define SPEC_in2_insn 0
5422 /* ====================================================================== */
5424 /* Find opc within the table of insns. This is formulated as a switch
5425 statement so that (1) we get compile-time notice of cut-paste errors
5426 for duplicated opcodes, and (2) the compiler generates the binary
5427 search tree, rather than us having to post-process the table. */
5429 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5430 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5432 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5434 enum DisasInsnEnum
{
5435 #include "insn-data.def"
5439 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5443 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5445 .help_in1 = in1_##I1, \
5446 .help_in2 = in2_##I2, \
5447 .help_prep = prep_##P, \
5448 .help_wout = wout_##W, \
5449 .help_cout = cout_##CC, \
5450 .help_op = op_##OP, \
5454 /* Allow 0 to be used for NULL in the table below. */
5462 #define SPEC_in1_0 0
5463 #define SPEC_in2_0 0
5464 #define SPEC_prep_0 0
5465 #define SPEC_wout_0 0
5467 /* Give smaller names to the various facilities. */
5468 #define FAC_Z S390_FEAT_ZARCH
5469 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5470 #define FAC_DFP S390_FEAT_DFP
5471 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5472 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5473 #define FAC_EE S390_FEAT_EXECUTE_EXT
5474 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5475 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5476 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5477 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5478 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5479 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5480 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5481 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5482 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5483 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5484 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5485 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5486 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5487 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5488 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5489 #define FAC_SFLE S390_FEAT_STFLE
5490 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5491 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5492 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5493 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5494 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5495 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5496 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5497 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5498 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5500 static const DisasInsn insn_info
[] = {
5501 #include "insn-data.def"
5505 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5506 case OPC: return &insn_info[insn_ ## NM];
5508 static const DisasInsn
*lookup_opc(uint16_t opc
)
5511 #include "insn-data.def"
5520 /* Extract a field from the insn. The INSN should be left-aligned in
5521 the uint64_t so that we can more easily utilize the big-bit-endian
5522 definitions we extract from the Principals of Operation. */
5524 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5532 /* Zero extract the field from the insn. */
5533 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5535 /* Sign-extend, or un-swap the field as necessary. */
5537 case 0: /* unsigned */
5539 case 1: /* signed */
5540 assert(f
->size
<= 32);
5541 m
= 1u << (f
->size
- 1);
5544 case 2: /* dl+dh split, signed 20 bit. */
5545 r
= ((int8_t)r
<< 12) | (r
>> 8);
5551 /* Validate that the "compressed" encoding we selected above is valid.
5552 I.e. we havn't make two different original fields overlap. */
5553 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5554 o
->presentC
|= 1 << f
->indexC
;
5555 o
->presentO
|= 1 << f
->indexO
;
5557 o
->c
[f
->indexC
] = r
;
5560 /* Lookup the insn at the current PC, extracting the operands into O and
5561 returning the info struct for the insn. Returns NULL for invalid insn. */
5563 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5566 uint64_t insn
, pc
= s
->pc
;
5568 const DisasInsn
*info
;
5570 if (unlikely(s
->ex_value
)) {
5571 /* Drop the EX data now, so that it's clear on exception paths. */
5572 TCGv_i64 zero
= tcg_const_i64(0);
5573 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5574 tcg_temp_free_i64(zero
);
5576 /* Extract the values saved by EXECUTE. */
5577 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5578 ilen
= s
->ex_value
& 0xf;
5581 insn
= ld_code2(env
, pc
);
5582 op
= (insn
>> 8) & 0xff;
5583 ilen
= get_ilen(op
);
5589 insn
= ld_code4(env
, pc
) << 32;
5592 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5595 g_assert_not_reached();
5598 s
->next_pc
= s
->pc
+ ilen
;
5601 /* We can't actually determine the insn format until we've looked up
5602 the full insn opcode. Which we can't do without locating the
5603 secondary opcode. Assume by default that OP2 is at bit 40; for
5604 those smaller insns that don't actually have a secondary opcode
5605 this will correctly result in OP2 = 0. */
5611 case 0xb2: /* S, RRF, RRE, IE */
5612 case 0xb3: /* RRE, RRD, RRF */
5613 case 0xb9: /* RRE, RRF */
5614 case 0xe5: /* SSE, SIL */
5615 op2
= (insn
<< 8) >> 56;
5619 case 0xc0: /* RIL */
5620 case 0xc2: /* RIL */
5621 case 0xc4: /* RIL */
5622 case 0xc6: /* RIL */
5623 case 0xc8: /* SSF */
5624 case 0xcc: /* RIL */
5625 op2
= (insn
<< 12) >> 60;
5627 case 0xc5: /* MII */
5628 case 0xc7: /* SMI */
5629 case 0xd0 ... 0xdf: /* SS */
5635 case 0xee ... 0xf3: /* SS */
5636 case 0xf8 ... 0xfd: /* SS */
5640 op2
= (insn
<< 40) >> 56;
5644 memset(f
, 0, sizeof(*f
));
5649 /* Lookup the instruction. */
5650 info
= lookup_opc(op
<< 8 | op2
);
5652 /* If we found it, extract the operands. */
5654 DisasFormat fmt
= info
->fmt
;
5657 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
5658 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
5664 static ExitStatus
translate_one(CPUS390XState
*env
, DisasContext
*s
)
5666 const DisasInsn
*insn
;
5667 ExitStatus ret
= NO_EXIT
;
5671 /* Search for the insn in the table. */
5672 insn
= extract_insn(env
, s
, &f
);
5674 /* Not found means unimplemented/illegal opcode. */
5676 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
5678 gen_illegal_opcode(s
);
5679 return EXIT_NORETURN
;
5682 #ifndef CONFIG_USER_ONLY
5683 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5684 TCGv_i64 addr
= tcg_const_i64(s
->pc
);
5685 gen_helper_per_ifetch(cpu_env
, addr
);
5686 tcg_temp_free_i64(addr
);
5690 /* Check for insn specification exceptions. */
5692 int spec
= insn
->spec
, excp
= 0, r
;
5694 if (spec
& SPEC_r1_even
) {
5695 r
= get_field(&f
, r1
);
5697 excp
= PGM_SPECIFICATION
;
5700 if (spec
& SPEC_r2_even
) {
5701 r
= get_field(&f
, r2
);
5703 excp
= PGM_SPECIFICATION
;
5706 if (spec
& SPEC_r3_even
) {
5707 r
= get_field(&f
, r3
);
5709 excp
= PGM_SPECIFICATION
;
5712 if (spec
& SPEC_r1_f128
) {
5713 r
= get_field(&f
, r1
);
5715 excp
= PGM_SPECIFICATION
;
5718 if (spec
& SPEC_r2_f128
) {
5719 r
= get_field(&f
, r2
);
5721 excp
= PGM_SPECIFICATION
;
5725 gen_program_exception(s
, excp
);
5726 return EXIT_NORETURN
;
5730 /* Set up the strutures we use to communicate with the helpers. */
5733 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
5734 TCGV_UNUSED_I64(o
.out
);
5735 TCGV_UNUSED_I64(o
.out2
);
5736 TCGV_UNUSED_I64(o
.in1
);
5737 TCGV_UNUSED_I64(o
.in2
);
5738 TCGV_UNUSED_I64(o
.addr1
);
5740 /* Implement the instruction. */
5741 if (insn
->help_in1
) {
5742 insn
->help_in1(s
, &f
, &o
);
5744 if (insn
->help_in2
) {
5745 insn
->help_in2(s
, &f
, &o
);
5747 if (insn
->help_prep
) {
5748 insn
->help_prep(s
, &f
, &o
);
5750 if (insn
->help_op
) {
5751 ret
= insn
->help_op(s
, &o
);
5753 if (insn
->help_wout
) {
5754 insn
->help_wout(s
, &f
, &o
);
5756 if (insn
->help_cout
) {
5757 insn
->help_cout(s
, &o
);
5760 /* Free any temporaries created by the helpers. */
5761 if (!TCGV_IS_UNUSED_I64(o
.out
) && !o
.g_out
) {
5762 tcg_temp_free_i64(o
.out
);
5764 if (!TCGV_IS_UNUSED_I64(o
.out2
) && !o
.g_out2
) {
5765 tcg_temp_free_i64(o
.out2
);
5767 if (!TCGV_IS_UNUSED_I64(o
.in1
) && !o
.g_in1
) {
5768 tcg_temp_free_i64(o
.in1
);
5770 if (!TCGV_IS_UNUSED_I64(o
.in2
) && !o
.g_in2
) {
5771 tcg_temp_free_i64(o
.in2
);
5773 if (!TCGV_IS_UNUSED_I64(o
.addr1
)) {
5774 tcg_temp_free_i64(o
.addr1
);
5777 #ifndef CONFIG_USER_ONLY
5778 if (s
->tb
->flags
& FLAG_MASK_PER
) {
5779 /* An exception might be triggered, save PSW if not already done. */
5780 if (ret
== NO_EXIT
|| ret
== EXIT_PC_STALE
) {
5781 tcg_gen_movi_i64(psw_addr
, s
->next_pc
);
5787 /* Call the helper to check for a possible PER exception. */
5788 gen_helper_per_check_exception(cpu_env
);
5792 /* Advance to the next instruction. */
5797 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
5799 CPUS390XState
*env
= cs
->env_ptr
;
5801 target_ulong pc_start
;
5802 uint64_t next_page_start
;
5803 int num_insns
, max_insns
;
5810 if (!(tb
->flags
& FLAG_MASK_64
)) {
5811 pc_start
&= 0x7fffffff;
5816 dc
.cc_op
= CC_OP_DYNAMIC
;
5817 dc
.ex_value
= tb
->cs_base
;
5818 do_debug
= dc
.singlestep_enabled
= cs
->singlestep_enabled
;
5820 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
5823 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5824 if (max_insns
== 0) {
5825 max_insns
= CF_COUNT_MASK
;
5827 if (max_insns
> TCG_MAX_INSNS
) {
5828 max_insns
= TCG_MAX_INSNS
;
5834 tcg_gen_insn_start(dc
.pc
, dc
.cc_op
);
5837 if (unlikely(cpu_breakpoint_test(cs
, dc
.pc
, BP_ANY
))) {
5838 status
= EXIT_PC_STALE
;
5840 /* The address covered by the breakpoint must be included in
5841 [tb->pc, tb->pc + tb->size) in order to for it to be
5842 properly cleared -- thus we increment the PC here so that
5843 the logic setting tb->size below does the right thing. */
5848 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
5852 status
= translate_one(env
, &dc
);
5854 /* If we reach a page boundary, are single stepping,
5855 or exhaust instruction count, stop generation. */
5856 if (status
== NO_EXIT
5857 && (dc
.pc
>= next_page_start
5858 || tcg_op_buf_full()
5859 || num_insns
>= max_insns
5861 || cs
->singlestep_enabled
5863 status
= EXIT_PC_STALE
;
5865 } while (status
== NO_EXIT
);
5867 if (tb
->cflags
& CF_LAST_IO
) {
5876 case EXIT_PC_STALE_NOCHAIN
:
5877 update_psw_addr(&dc
);
5879 case EXIT_PC_UPDATED
:
5880 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5881 cc op type is in env */
5884 case EXIT_PC_CC_UPDATED
:
5885 /* Exit the TB, either by raising a debug exception or by return. */
5887 gen_exception(EXCP_DEBUG
);
5888 } else if (use_exit_tb(&dc
) || status
== EXIT_PC_STALE_NOCHAIN
) {
5891 tcg_gen_lookup_and_goto_ptr(psw_addr
);
5895 g_assert_not_reached();
5898 gen_tb_end(tb
, num_insns
);
5900 tb
->size
= dc
.pc
- pc_start
;
5901 tb
->icount
= num_insns
;
5903 #if defined(S390X_DEBUG_DISAS)
5904 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
5905 && qemu_log_in_addr_range(pc_start
)) {
5907 if (unlikely(dc
.ex_value
)) {
5908 /* ??? Unfortunately log_target_disas can't use host memory. */
5909 qemu_log("IN: EXECUTE %016" PRIx64
"\n", dc
.ex_value
);
5911 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5912 log_target_disas(cs
, pc_start
, dc
.pc
- pc_start
, 1);
5920 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
5923 int cc_op
= data
[1];
5924 env
->psw
.addr
= data
[0];
5925 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {