4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
49 /* Information that (most) every instruction needs to manipulate. */
50 typedef struct DisasContext DisasContext
;
51 typedef struct DisasInsn DisasInsn
;
52 typedef struct DisasFields DisasFields
;
55 DisasContextBase base
;
56 const DisasInsn
*insn
;
60 * During translate_one(), pc_tmp is used to determine the instruction
61 * to be executed after base.pc_next - e.g. next sequential instruction
70 /* Information carried about a condition to be evaluated. */
77 struct { TCGv_i64 a
, b
; } s64
;
78 struct { TCGv_i32 a
, b
; } s32
;
82 #ifdef DEBUG_INLINE_BRANCHES
83 static uint64_t inline_branch_hit
[CC_OP_MAX
];
84 static uint64_t inline_branch_miss
[CC_OP_MAX
];
87 static void pc_to_link_info(TCGv_i64 out
, DisasContext
*s
, uint64_t pc
)
91 if (s
->base
.tb
->flags
& FLAG_MASK_32
) {
92 if (s
->base
.tb
->flags
& FLAG_MASK_64
) {
93 tcg_gen_movi_i64(out
, pc
);
98 assert(!(s
->base
.tb
->flags
& FLAG_MASK_64
));
99 tmp
= tcg_const_i64(pc
);
100 tcg_gen_deposit_i64(out
, out
, tmp
, 0, 32);
101 tcg_temp_free_i64(tmp
);
104 static TCGv_i64 psw_addr
;
105 static TCGv_i64 psw_mask
;
106 static TCGv_i64 gbea
;
108 static TCGv_i32 cc_op
;
109 static TCGv_i64 cc_src
;
110 static TCGv_i64 cc_dst
;
111 static TCGv_i64 cc_vr
;
113 static char cpu_reg_names
[32][4];
114 static TCGv_i64 regs
[16];
115 static TCGv_i64 fregs
[16];
117 void s390x_translate_init(void)
121 psw_addr
= tcg_global_mem_new_i64(cpu_env
,
122 offsetof(CPUS390XState
, psw
.addr
),
124 psw_mask
= tcg_global_mem_new_i64(cpu_env
,
125 offsetof(CPUS390XState
, psw
.mask
),
127 gbea
= tcg_global_mem_new_i64(cpu_env
,
128 offsetof(CPUS390XState
, gbea
),
131 cc_op
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUS390XState
, cc_op
),
133 cc_src
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_src
),
135 cc_dst
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_dst
),
137 cc_vr
= tcg_global_mem_new_i64(cpu_env
, offsetof(CPUS390XState
, cc_vr
),
140 for (i
= 0; i
< 16; i
++) {
141 snprintf(cpu_reg_names
[i
], sizeof(cpu_reg_names
[0]), "r%d", i
);
142 regs
[i
] = tcg_global_mem_new(cpu_env
,
143 offsetof(CPUS390XState
, regs
[i
]),
147 for (i
= 0; i
< 16; i
++) {
148 snprintf(cpu_reg_names
[i
+ 16], sizeof(cpu_reg_names
[0]), "f%d", i
);
149 fregs
[i
] = tcg_global_mem_new(cpu_env
,
150 offsetof(CPUS390XState
, vregs
[i
][0].d
),
151 cpu_reg_names
[i
+ 16]);
155 static TCGv_i64
load_reg(int reg
)
157 TCGv_i64 r
= tcg_temp_new_i64();
158 tcg_gen_mov_i64(r
, regs
[reg
]);
162 static TCGv_i64
load_freg32_i64(int reg
)
164 TCGv_i64 r
= tcg_temp_new_i64();
165 tcg_gen_shri_i64(r
, fregs
[reg
], 32);
169 static void store_reg(int reg
, TCGv_i64 v
)
171 tcg_gen_mov_i64(regs
[reg
], v
);
174 static void store_freg(int reg
, TCGv_i64 v
)
176 tcg_gen_mov_i64(fregs
[reg
], v
);
179 static void store_reg32_i64(int reg
, TCGv_i64 v
)
181 /* 32 bit register writes keep the upper half */
182 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 0, 32);
185 static void store_reg32h_i64(int reg
, TCGv_i64 v
)
187 tcg_gen_deposit_i64(regs
[reg
], regs
[reg
], v
, 32, 32);
190 static void store_freg32_i64(int reg
, TCGv_i64 v
)
192 tcg_gen_deposit_i64(fregs
[reg
], fregs
[reg
], v
, 32, 32);
195 static void return_low128(TCGv_i64 dest
)
197 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUS390XState
, retxl
));
200 static void update_psw_addr(DisasContext
*s
)
203 tcg_gen_movi_i64(psw_addr
, s
->base
.pc_next
);
206 static void per_branch(DisasContext
*s
, bool to_next
)
208 #ifndef CONFIG_USER_ONLY
209 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
211 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
212 TCGv_i64 next_pc
= to_next
? tcg_const_i64(s
->pc_tmp
) : psw_addr
;
213 gen_helper_per_branch(cpu_env
, gbea
, next_pc
);
215 tcg_temp_free_i64(next_pc
);
221 static void per_branch_cond(DisasContext
*s
, TCGCond cond
,
222 TCGv_i64 arg1
, TCGv_i64 arg2
)
224 #ifndef CONFIG_USER_ONLY
225 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
226 TCGLabel
*lab
= gen_new_label();
227 tcg_gen_brcond_i64(tcg_invert_cond(cond
), arg1
, arg2
, lab
);
229 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
230 gen_helper_per_branch(cpu_env
, gbea
, psw_addr
);
234 TCGv_i64 pc
= tcg_const_i64(s
->base
.pc_next
);
235 tcg_gen_movcond_i64(cond
, gbea
, arg1
, arg2
, gbea
, pc
);
236 tcg_temp_free_i64(pc
);
241 static void per_breaking_event(DisasContext
*s
)
243 tcg_gen_movi_i64(gbea
, s
->base
.pc_next
);
246 static void update_cc_op(DisasContext
*s
)
248 if (s
->cc_op
!= CC_OP_DYNAMIC
&& s
->cc_op
!= CC_OP_STATIC
) {
249 tcg_gen_movi_i32(cc_op
, s
->cc_op
);
253 static inline uint64_t ld_code2(CPUS390XState
*env
, uint64_t pc
)
255 return (uint64_t)cpu_lduw_code(env
, pc
);
258 static inline uint64_t ld_code4(CPUS390XState
*env
, uint64_t pc
)
260 return (uint64_t)(uint32_t)cpu_ldl_code(env
, pc
);
263 static int get_mem_index(DisasContext
*s
)
265 if (!(s
->base
.tb
->flags
& FLAG_MASK_DAT
)) {
269 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
270 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
271 return MMU_PRIMARY_IDX
;
272 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
273 return MMU_SECONDARY_IDX
;
274 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
282 static void gen_exception(int excp
)
284 TCGv_i32 tmp
= tcg_const_i32(excp
);
285 gen_helper_exception(cpu_env
, tmp
);
286 tcg_temp_free_i32(tmp
);
289 static void gen_program_exception(DisasContext
*s
, int code
)
293 /* Remember what pgm exeption this was. */
294 tmp
= tcg_const_i32(code
);
295 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_code
));
296 tcg_temp_free_i32(tmp
);
298 tmp
= tcg_const_i32(s
->ilen
);
299 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUS390XState
, int_pgm_ilen
));
300 tcg_temp_free_i32(tmp
);
308 /* Trigger exception. */
309 gen_exception(EXCP_PGM
);
312 static inline void gen_illegal_opcode(DisasContext
*s
)
314 gen_program_exception(s
, PGM_OPERATION
);
317 static inline void gen_data_exception(uint8_t dxc
)
319 TCGv_i32 tmp
= tcg_const_i32(dxc
);
320 gen_helper_data_exception(cpu_env
, tmp
);
321 tcg_temp_free_i32(tmp
);
324 static inline void gen_trap(DisasContext
*s
)
326 /* Set DXC to 0xff */
327 gen_data_exception(0xff);
330 static TCGv_i64
get_address(DisasContext
*s
, int x2
, int b2
, int d2
)
332 TCGv_i64 tmp
= tcg_temp_new_i64();
333 bool need_31
= !(s
->base
.tb
->flags
& FLAG_MASK_64
);
335 /* Note that d2 is limited to 20 bits, signed. If we crop negative
336 displacements early we create larger immedate addends. */
338 /* Note that addi optimizes the imm==0 case. */
340 tcg_gen_add_i64(tmp
, regs
[b2
], regs
[x2
]);
341 tcg_gen_addi_i64(tmp
, tmp
, d2
);
343 tcg_gen_addi_i64(tmp
, regs
[b2
], d2
);
345 tcg_gen_addi_i64(tmp
, regs
[x2
], d2
);
351 tcg_gen_movi_i64(tmp
, d2
);
354 tcg_gen_andi_i64(tmp
, tmp
, 0x7fffffff);
360 static inline bool live_cc_data(DisasContext
*s
)
362 return (s
->cc_op
!= CC_OP_DYNAMIC
363 && s
->cc_op
!= CC_OP_STATIC
367 static inline void gen_op_movi_cc(DisasContext
*s
, uint32_t val
)
369 if (live_cc_data(s
)) {
370 tcg_gen_discard_i64(cc_src
);
371 tcg_gen_discard_i64(cc_dst
);
372 tcg_gen_discard_i64(cc_vr
);
374 s
->cc_op
= CC_OP_CONST0
+ val
;
377 static void gen_op_update1_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 dst
)
379 if (live_cc_data(s
)) {
380 tcg_gen_discard_i64(cc_src
);
381 tcg_gen_discard_i64(cc_vr
);
383 tcg_gen_mov_i64(cc_dst
, dst
);
387 static void gen_op_update2_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
390 if (live_cc_data(s
)) {
391 tcg_gen_discard_i64(cc_vr
);
393 tcg_gen_mov_i64(cc_src
, src
);
394 tcg_gen_mov_i64(cc_dst
, dst
);
398 static void gen_op_update3_cc_i64(DisasContext
*s
, enum cc_op op
, TCGv_i64 src
,
399 TCGv_i64 dst
, TCGv_i64 vr
)
401 tcg_gen_mov_i64(cc_src
, src
);
402 tcg_gen_mov_i64(cc_dst
, dst
);
403 tcg_gen_mov_i64(cc_vr
, vr
);
407 static void set_cc_nz_u64(DisasContext
*s
, TCGv_i64 val
)
409 gen_op_update1_cc_i64(s
, CC_OP_NZ
, val
);
412 static void gen_set_cc_nz_f32(DisasContext
*s
, TCGv_i64 val
)
414 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, val
);
417 static void gen_set_cc_nz_f64(DisasContext
*s
, TCGv_i64 val
)
419 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, val
);
422 static void gen_set_cc_nz_f128(DisasContext
*s
, TCGv_i64 vh
, TCGv_i64 vl
)
424 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, vh
, vl
);
427 /* CC value is in env->cc_op */
428 static void set_cc_static(DisasContext
*s
)
430 if (live_cc_data(s
)) {
431 tcg_gen_discard_i64(cc_src
);
432 tcg_gen_discard_i64(cc_dst
);
433 tcg_gen_discard_i64(cc_vr
);
435 s
->cc_op
= CC_OP_STATIC
;
438 /* calculates cc into cc_op */
439 static void gen_op_calc_cc(DisasContext
*s
)
441 TCGv_i32 local_cc_op
= NULL
;
442 TCGv_i64 dummy
= NULL
;
446 dummy
= tcg_const_i64(0);
460 local_cc_op
= tcg_const_i32(s
->cc_op
);
476 /* s->cc_op is the cc value */
477 tcg_gen_movi_i32(cc_op
, s
->cc_op
- CC_OP_CONST0
);
480 /* env->cc_op already is the cc value */
495 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, dummy
, cc_dst
, dummy
);
500 case CC_OP_LTUGTU_32
:
501 case CC_OP_LTUGTU_64
:
508 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, dummy
);
523 gen_helper_calc_cc(cc_op
, cpu_env
, local_cc_op
, cc_src
, cc_dst
, cc_vr
);
526 /* unknown operation - assume 3 arguments and cc_op in env */
527 gen_helper_calc_cc(cc_op
, cpu_env
, cc_op
, cc_src
, cc_dst
, cc_vr
);
534 tcg_temp_free_i32(local_cc_op
);
537 tcg_temp_free_i64(dummy
);
540 /* We now have cc in cc_op as constant */
544 static bool use_exit_tb(DisasContext
*s
)
546 return s
->base
.singlestep_enabled
||
547 (tb_cflags(s
->base
.tb
) & CF_LAST_IO
) ||
548 (s
->base
.tb
->flags
& FLAG_MASK_PER
);
551 static bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
553 if (unlikely(use_exit_tb(s
))) {
556 #ifndef CONFIG_USER_ONLY
557 return (dest
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
558 (dest
& TARGET_PAGE_MASK
) == (s
->base
.pc_next
& TARGET_PAGE_MASK
);
564 static void account_noninline_branch(DisasContext
*s
, int cc_op
)
566 #ifdef DEBUG_INLINE_BRANCHES
567 inline_branch_miss
[cc_op
]++;
571 static void account_inline_branch(DisasContext
*s
, int cc_op
)
573 #ifdef DEBUG_INLINE_BRANCHES
574 inline_branch_hit
[cc_op
]++;
578 /* Table of mask values to comparison codes, given a comparison as input.
579 For such, CC=3 should not be possible. */
580 static const TCGCond ltgt_cond
[16] = {
581 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | | x */
582 TCG_COND_GT
, TCG_COND_GT
, /* | | GT | x */
583 TCG_COND_LT
, TCG_COND_LT
, /* | LT | | x */
584 TCG_COND_NE
, TCG_COND_NE
, /* | LT | GT | x */
585 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | | x */
586 TCG_COND_GE
, TCG_COND_GE
, /* EQ | | GT | x */
587 TCG_COND_LE
, TCG_COND_LE
, /* EQ | LT | | x */
588 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | LT | GT | x */
591 /* Table of mask values to comparison codes, given a logic op as input.
592 For such, only CC=0 and CC=1 should be possible. */
593 static const TCGCond nz_cond
[16] = {
594 TCG_COND_NEVER
, TCG_COND_NEVER
, /* | | x | x */
595 TCG_COND_NEVER
, TCG_COND_NEVER
,
596 TCG_COND_NE
, TCG_COND_NE
, /* | NE | x | x */
597 TCG_COND_NE
, TCG_COND_NE
,
598 TCG_COND_EQ
, TCG_COND_EQ
, /* EQ | | x | x */
599 TCG_COND_EQ
, TCG_COND_EQ
,
600 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
, /* EQ | NE | x | x */
601 TCG_COND_ALWAYS
, TCG_COND_ALWAYS
,
604 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
605 details required to generate a TCG comparison. */
606 static void disas_jcc(DisasContext
*s
, DisasCompare
*c
, uint32_t mask
)
609 enum cc_op old_cc_op
= s
->cc_op
;
611 if (mask
== 15 || mask
== 0) {
612 c
->cond
= (mask
? TCG_COND_ALWAYS
: TCG_COND_NEVER
);
615 c
->g1
= c
->g2
= true;
620 /* Find the TCG condition for the mask + cc op. */
626 cond
= ltgt_cond
[mask
];
627 if (cond
== TCG_COND_NEVER
) {
630 account_inline_branch(s
, old_cc_op
);
633 case CC_OP_LTUGTU_32
:
634 case CC_OP_LTUGTU_64
:
635 cond
= tcg_unsigned_cond(ltgt_cond
[mask
]);
636 if (cond
== TCG_COND_NEVER
) {
639 account_inline_branch(s
, old_cc_op
);
643 cond
= nz_cond
[mask
];
644 if (cond
== TCG_COND_NEVER
) {
647 account_inline_branch(s
, old_cc_op
);
662 account_inline_branch(s
, old_cc_op
);
677 account_inline_branch(s
, old_cc_op
);
681 switch (mask
& 0xa) {
682 case 8: /* src == 0 -> no one bit found */
685 case 2: /* src != 0 -> one bit found */
691 account_inline_branch(s
, old_cc_op
);
697 case 8 | 2: /* vr == 0 */
700 case 4 | 1: /* vr != 0 */
703 case 8 | 4: /* no carry -> vr >= src */
706 case 2 | 1: /* carry -> vr < src */
712 account_inline_branch(s
, old_cc_op
);
717 /* Note that CC=0 is impossible; treat it as dont-care. */
719 case 2: /* zero -> op1 == op2 */
722 case 4 | 1: /* !zero -> op1 != op2 */
725 case 4: /* borrow (!carry) -> op1 < op2 */
728 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
734 account_inline_branch(s
, old_cc_op
);
739 /* Calculate cc value. */
744 /* Jump based on CC. We'll load up the real cond below;
745 the assignment here merely avoids a compiler warning. */
746 account_noninline_branch(s
, old_cc_op
);
747 old_cc_op
= CC_OP_STATIC
;
748 cond
= TCG_COND_NEVER
;
752 /* Load up the arguments of the comparison. */
754 c
->g1
= c
->g2
= false;
758 c
->u
.s32
.a
= tcg_temp_new_i32();
759 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_dst
);
760 c
->u
.s32
.b
= tcg_const_i32(0);
763 case CC_OP_LTUGTU_32
:
766 c
->u
.s32
.a
= tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_src
);
768 c
->u
.s32
.b
= tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_dst
);
776 c
->u
.s64
.b
= tcg_const_i64(0);
780 case CC_OP_LTUGTU_64
:
784 c
->g1
= c
->g2
= true;
790 c
->u
.s64
.a
= tcg_temp_new_i64();
791 c
->u
.s64
.b
= tcg_const_i64(0);
792 tcg_gen_and_i64(c
->u
.s64
.a
, cc_src
, cc_dst
);
797 c
->u
.s32
.a
= tcg_temp_new_i32();
798 c
->u
.s32
.b
= tcg_temp_new_i32();
799 tcg_gen_extrl_i64_i32(c
->u
.s32
.a
, cc_vr
);
800 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
801 tcg_gen_movi_i32(c
->u
.s32
.b
, 0);
803 tcg_gen_extrl_i64_i32(c
->u
.s32
.b
, cc_src
);
810 if (cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
) {
811 c
->u
.s64
.b
= tcg_const_i64(0);
823 case 0x8 | 0x4 | 0x2: /* cc != 3 */
825 c
->u
.s32
.b
= tcg_const_i32(3);
827 case 0x8 | 0x4 | 0x1: /* cc != 2 */
829 c
->u
.s32
.b
= tcg_const_i32(2);
831 case 0x8 | 0x2 | 0x1: /* cc != 1 */
833 c
->u
.s32
.b
= tcg_const_i32(1);
835 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
838 c
->u
.s32
.a
= tcg_temp_new_i32();
839 c
->u
.s32
.b
= tcg_const_i32(0);
840 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
842 case 0x8 | 0x4: /* cc < 2 */
844 c
->u
.s32
.b
= tcg_const_i32(2);
846 case 0x8: /* cc == 0 */
848 c
->u
.s32
.b
= tcg_const_i32(0);
850 case 0x4 | 0x2 | 0x1: /* cc != 0 */
852 c
->u
.s32
.b
= tcg_const_i32(0);
854 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
857 c
->u
.s32
.a
= tcg_temp_new_i32();
858 c
->u
.s32
.b
= tcg_const_i32(0);
859 tcg_gen_andi_i32(c
->u
.s32
.a
, cc_op
, 1);
861 case 0x4: /* cc == 1 */
863 c
->u
.s32
.b
= tcg_const_i32(1);
865 case 0x2 | 0x1: /* cc > 1 */
867 c
->u
.s32
.b
= tcg_const_i32(1);
869 case 0x2: /* cc == 2 */
871 c
->u
.s32
.b
= tcg_const_i32(2);
873 case 0x1: /* cc == 3 */
875 c
->u
.s32
.b
= tcg_const_i32(3);
878 /* CC is masked by something else: (8 >> cc) & mask. */
881 c
->u
.s32
.a
= tcg_const_i32(8);
882 c
->u
.s32
.b
= tcg_const_i32(0);
883 tcg_gen_shr_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, cc_op
);
884 tcg_gen_andi_i32(c
->u
.s32
.a
, c
->u
.s32
.a
, mask
);
895 static void free_compare(DisasCompare
*c
)
899 tcg_temp_free_i64(c
->u
.s64
.a
);
901 tcg_temp_free_i32(c
->u
.s32
.a
);
906 tcg_temp_free_i64(c
->u
.s64
.b
);
908 tcg_temp_free_i32(c
->u
.s32
.b
);
913 /* ====================================================================== */
914 /* Define the insn format enumeration. */
915 #define F0(N) FMT_##N,
916 #define F1(N, X1) F0(N)
917 #define F2(N, X1, X2) F0(N)
918 #define F3(N, X1, X2, X3) F0(N)
919 #define F4(N, X1, X2, X3, X4) F0(N)
920 #define F5(N, X1, X2, X3, X4, X5) F0(N)
923 #include "insn-format.def"
933 /* Define a structure to hold the decoded fields. We'll store each inside
934 an array indexed by an enum. In order to conserve memory, we'll arrange
935 for fields that do not exist at the same time to overlap, thus the "C"
936 for compact. For checking purposes there is an "O" for original index
937 as well that will be applied to availability bitmaps. */
939 enum DisasFieldIndexO
{
962 enum DisasFieldIndexC
{
997 unsigned presentC
:16;
998 unsigned int presentO
;
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1006 static bool have_field1(const DisasFields
*f
, enum DisasFieldIndexO c
)
1008 return (f
->presentO
>> c
) & 1;
1011 static int get_field1(const DisasFields
*f
, enum DisasFieldIndexO o
,
1012 enum DisasFieldIndexC c
)
1014 assert(have_field1(f
, o
));
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField
{
1021 unsigned int size
:8;
1022 unsigned int type
:2;
1023 unsigned int indexC
:6;
1024 enum DisasFieldIndexO indexO
:8;
1027 typedef struct DisasFormatInfo
{
1028 DisasField op
[NUM_C_FIELD
];
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1053 static const DisasFormatInfo format_info
[] = {
1054 #include "insn-format.def"
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1076 bool g_out
, g_out2
, g_in1
, g_in2
;
1077 TCGv_i64 out
, out2
, in1
, in2
;
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r3_even 4
1090 #define SPEC_r1_f128 8
1091 #define SPEC_r2_f128 16
1093 /* Return values from translate_one, indicating the state of the TB. */
1095 /* We are not using a goto_tb (for whatever reason), but have updated
1096 the PC (for whatever reason), so there's no need to do it again on
1098 #define DISAS_PC_UPDATED DISAS_TARGET_0
1100 /* We have emitted one or more goto_tb. No fixup required. */
1101 #define DISAS_GOTO_TB DISAS_TARGET_1
1103 /* We have updated the PC and CC values. */
1104 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1106 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1107 updated the PC for the next instruction to be executed. */
1108 #define DISAS_PC_STALE DISAS_TARGET_3
1110 /* We are exiting the TB to the main loop. */
1111 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1114 /* Instruction flags */
1115 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1116 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1117 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1118 #define IF_BFP 0x0008 /* binary floating point instruction */
1119 #define IF_DFP 0x0010 /* decimal floating point instruction */
1120 #define IF_PRIV 0x0020 /* privileged instruction */
1131 void (*help_in1
)(DisasContext
*, DisasFields
*, DisasOps
*);
1132 void (*help_in2
)(DisasContext
*, DisasFields
*, DisasOps
*);
1133 void (*help_prep
)(DisasContext
*, DisasFields
*, DisasOps
*);
1134 void (*help_wout
)(DisasContext
*, DisasFields
*, DisasOps
*);
1135 void (*help_cout
)(DisasContext
*, DisasOps
*);
1136 DisasJumpType (*help_op
)(DisasContext
*, DisasOps
*);
1141 /* ====================================================================== */
1142 /* Miscellaneous helpers, used by several operations. */
1144 static void help_l2_shift(DisasContext
*s
, DisasFields
*f
,
1145 DisasOps
*o
, int mask
)
1147 int b2
= get_field(f
, b2
);
1148 int d2
= get_field(f
, d2
);
1151 o
->in2
= tcg_const_i64(d2
& mask
);
1153 o
->in2
= get_address(s
, 0, b2
, d2
);
1154 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
1158 static DisasJumpType
help_goto_direct(DisasContext
*s
, uint64_t dest
)
1160 if (dest
== s
->pc_tmp
) {
1161 per_branch(s
, true);
1164 if (use_goto_tb(s
, dest
)) {
1166 per_breaking_event(s
);
1168 tcg_gen_movi_i64(psw_addr
, dest
);
1169 tcg_gen_exit_tb(s
->base
.tb
, 0);
1170 return DISAS_GOTO_TB
;
1172 tcg_gen_movi_i64(psw_addr
, dest
);
1173 per_branch(s
, false);
1174 return DISAS_PC_UPDATED
;
1178 static DisasJumpType
help_branch(DisasContext
*s
, DisasCompare
*c
,
1179 bool is_imm
, int imm
, TCGv_i64 cdest
)
1182 uint64_t dest
= s
->base
.pc_next
+ 2 * imm
;
1185 /* Take care of the special cases first. */
1186 if (c
->cond
== TCG_COND_NEVER
) {
1191 if (dest
== s
->pc_tmp
) {
1192 /* Branch to next. */
1193 per_branch(s
, true);
1197 if (c
->cond
== TCG_COND_ALWAYS
) {
1198 ret
= help_goto_direct(s
, dest
);
1203 /* E.g. bcr %r0 -> no branch. */
1207 if (c
->cond
== TCG_COND_ALWAYS
) {
1208 tcg_gen_mov_i64(psw_addr
, cdest
);
1209 per_branch(s
, false);
1210 ret
= DISAS_PC_UPDATED
;
1215 if (use_goto_tb(s
, s
->pc_tmp
)) {
1216 if (is_imm
&& use_goto_tb(s
, dest
)) {
1217 /* Both exits can use goto_tb. */
1220 lab
= gen_new_label();
1222 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1224 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1227 /* Branch not taken. */
1229 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1230 tcg_gen_exit_tb(s
->base
.tb
, 0);
1234 per_breaking_event(s
);
1236 tcg_gen_movi_i64(psw_addr
, dest
);
1237 tcg_gen_exit_tb(s
->base
.tb
, 1);
1239 ret
= DISAS_GOTO_TB
;
1241 /* Fallthru can use goto_tb, but taken branch cannot. */
1242 /* Store taken branch destination before the brcond. This
1243 avoids having to allocate a new local temp to hold it.
1244 We'll overwrite this in the not taken case anyway. */
1246 tcg_gen_mov_i64(psw_addr
, cdest
);
1249 lab
= gen_new_label();
1251 tcg_gen_brcond_i64(c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
, lab
);
1253 tcg_gen_brcond_i32(c
->cond
, c
->u
.s32
.a
, c
->u
.s32
.b
, lab
);
1256 /* Branch not taken. */
1259 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
1260 tcg_gen_exit_tb(s
->base
.tb
, 0);
1264 tcg_gen_movi_i64(psw_addr
, dest
);
1266 per_breaking_event(s
);
1267 ret
= DISAS_PC_UPDATED
;
1270 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1271 Most commonly we're single-stepping or some other condition that
1272 disables all use of goto_tb. Just update the PC and exit. */
1274 TCGv_i64 next
= tcg_const_i64(s
->pc_tmp
);
1276 cdest
= tcg_const_i64(dest
);
1280 tcg_gen_movcond_i64(c
->cond
, psw_addr
, c
->u
.s64
.a
, c
->u
.s64
.b
,
1282 per_branch_cond(s
, c
->cond
, c
->u
.s64
.a
, c
->u
.s64
.b
);
1284 TCGv_i32 t0
= tcg_temp_new_i32();
1285 TCGv_i64 t1
= tcg_temp_new_i64();
1286 TCGv_i64 z
= tcg_const_i64(0);
1287 tcg_gen_setcond_i32(c
->cond
, t0
, c
->u
.s32
.a
, c
->u
.s32
.b
);
1288 tcg_gen_extu_i32_i64(t1
, t0
);
1289 tcg_temp_free_i32(t0
);
1290 tcg_gen_movcond_i64(TCG_COND_NE
, psw_addr
, t1
, z
, cdest
, next
);
1291 per_branch_cond(s
, TCG_COND_NE
, t1
, z
);
1292 tcg_temp_free_i64(t1
);
1293 tcg_temp_free_i64(z
);
1297 tcg_temp_free_i64(cdest
);
1299 tcg_temp_free_i64(next
);
1301 ret
= DISAS_PC_UPDATED
;
1309 /* ====================================================================== */
1310 /* The operations. These perform the bulk of the work for any insn,
1311 usually after the operands have been loaded and output initialized. */
1313 static DisasJumpType
op_abs(DisasContext
*s
, DisasOps
*o
)
1316 z
= tcg_const_i64(0);
1317 n
= tcg_temp_new_i64();
1318 tcg_gen_neg_i64(n
, o
->in2
);
1319 tcg_gen_movcond_i64(TCG_COND_LT
, o
->out
, o
->in2
, z
, n
, o
->in2
);
1320 tcg_temp_free_i64(n
);
1321 tcg_temp_free_i64(z
);
1325 static DisasJumpType
op_absf32(DisasContext
*s
, DisasOps
*o
)
1327 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffull
);
1331 static DisasJumpType
op_absf64(DisasContext
*s
, DisasOps
*o
)
1333 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1337 static DisasJumpType
op_absf128(DisasContext
*s
, DisasOps
*o
)
1339 tcg_gen_andi_i64(o
->out
, o
->in1
, 0x7fffffffffffffffull
);
1340 tcg_gen_mov_i64(o
->out2
, o
->in2
);
1344 static DisasJumpType
op_add(DisasContext
*s
, DisasOps
*o
)
1346 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1350 static DisasJumpType
op_addc(DisasContext
*s
, DisasOps
*o
)
1355 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1357 /* The carry flag is the msb of CC, therefore the branch mask that would
1358 create that comparison is 3. Feeding the generated comparison to
1359 setcond produces the carry flag that we desire. */
1360 disas_jcc(s
, &cmp
, 3);
1361 carry
= tcg_temp_new_i64();
1363 tcg_gen_setcond_i64(cmp
.cond
, carry
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
1365 TCGv_i32 t
= tcg_temp_new_i32();
1366 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
1367 tcg_gen_extu_i32_i64(carry
, t
);
1368 tcg_temp_free_i32(t
);
1372 tcg_gen_add_i64(o
->out
, o
->out
, carry
);
1373 tcg_temp_free_i64(carry
);
1377 static DisasJumpType
op_asi(DisasContext
*s
, DisasOps
*o
)
1379 o
->in1
= tcg_temp_new_i64();
1381 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1382 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1384 /* Perform the atomic addition in memory. */
1385 tcg_gen_atomic_fetch_add_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1389 /* Recompute also for atomic case: needed for setting CC. */
1390 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
1392 if (!s390_has_feat(S390_FEAT_STFLE_45
)) {
1393 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1398 static DisasJumpType
op_aeb(DisasContext
*s
, DisasOps
*o
)
1400 gen_helper_aeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1404 static DisasJumpType
op_adb(DisasContext
*s
, DisasOps
*o
)
1406 gen_helper_adb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
1410 static DisasJumpType
op_axb(DisasContext
*s
, DisasOps
*o
)
1412 gen_helper_axb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1413 return_low128(o
->out2
);
1417 static DisasJumpType
op_and(DisasContext
*s
, DisasOps
*o
)
1419 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1423 static DisasJumpType
op_andi(DisasContext
*s
, DisasOps
*o
)
1425 int shift
= s
->insn
->data
& 0xff;
1426 int size
= s
->insn
->data
>> 8;
1427 uint64_t mask
= ((1ull << size
) - 1) << shift
;
1430 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
1431 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
1432 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1434 /* Produce the CC from only the bits manipulated. */
1435 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
1436 set_cc_nz_u64(s
, cc_dst
);
1440 static DisasJumpType
op_ni(DisasContext
*s
, DisasOps
*o
)
1442 o
->in1
= tcg_temp_new_i64();
1444 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1445 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1447 /* Perform the atomic operation in memory. */
1448 tcg_gen_atomic_fetch_and_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
1452 /* Recompute also for atomic case: needed for setting CC. */
1453 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
1455 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
1456 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
1461 static DisasJumpType
op_bas(DisasContext
*s
, DisasOps
*o
)
1463 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1465 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1466 per_branch(s
, false);
1467 return DISAS_PC_UPDATED
;
1473 static void save_link_info(DisasContext
*s
, DisasOps
*o
)
1477 if (s
->base
.tb
->flags
& (FLAG_MASK_32
| FLAG_MASK_64
)) {
1478 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1482 tcg_gen_andi_i64(o
->out
, o
->out
, 0xffffffff00000000ull
);
1483 tcg_gen_ori_i64(o
->out
, o
->out
, ((s
->ilen
/ 2) << 30) | s
->pc_tmp
);
1484 t
= tcg_temp_new_i64();
1485 tcg_gen_shri_i64(t
, psw_mask
, 16);
1486 tcg_gen_andi_i64(t
, t
, 0x0f000000);
1487 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1488 tcg_gen_extu_i32_i64(t
, cc_op
);
1489 tcg_gen_shli_i64(t
, t
, 28);
1490 tcg_gen_or_i64(o
->out
, o
->out
, t
);
1491 tcg_temp_free_i64(t
);
1494 static DisasJumpType
op_bal(DisasContext
*s
, DisasOps
*o
)
1496 save_link_info(s
, o
);
1498 tcg_gen_mov_i64(psw_addr
, o
->in2
);
1499 per_branch(s
, false);
1500 return DISAS_PC_UPDATED
;
1506 static DisasJumpType
op_basi(DisasContext
*s
, DisasOps
*o
)
1508 pc_to_link_info(o
->out
, s
, s
->pc_tmp
);
1509 return help_goto_direct(s
, s
->base
.pc_next
+ 2 * get_field(s
->fields
, i2
));
1512 static DisasJumpType
op_bc(DisasContext
*s
, DisasOps
*o
)
1514 int m1
= get_field(s
->fields
, m1
);
1515 bool is_imm
= have_field(s
->fields
, i2
);
1516 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1519 /* BCR with R2 = 0 causes no branching */
1520 if (have_field(s
->fields
, r2
) && get_field(s
->fields
, r2
) == 0) {
1522 /* Perform serialization */
1523 /* FIXME: check for fast-BCR-serialization facility */
1524 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1527 /* Perform serialization */
1528 /* FIXME: perform checkpoint-synchronisation */
1529 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1534 disas_jcc(s
, &c
, m1
);
1535 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1538 static DisasJumpType
op_bct32(DisasContext
*s
, DisasOps
*o
)
1540 int r1
= get_field(s
->fields
, r1
);
1541 bool is_imm
= have_field(s
->fields
, i2
);
1542 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1546 c
.cond
= TCG_COND_NE
;
1551 t
= tcg_temp_new_i64();
1552 tcg_gen_subi_i64(t
, regs
[r1
], 1);
1553 store_reg32_i64(r1
, t
);
1554 c
.u
.s32
.a
= tcg_temp_new_i32();
1555 c
.u
.s32
.b
= tcg_const_i32(0);
1556 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1557 tcg_temp_free_i64(t
);
1559 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1562 static DisasJumpType
op_bcth(DisasContext
*s
, DisasOps
*o
)
1564 int r1
= get_field(s
->fields
, r1
);
1565 int imm
= get_field(s
->fields
, i2
);
1569 c
.cond
= TCG_COND_NE
;
1574 t
= tcg_temp_new_i64();
1575 tcg_gen_shri_i64(t
, regs
[r1
], 32);
1576 tcg_gen_subi_i64(t
, t
, 1);
1577 store_reg32h_i64(r1
, t
);
1578 c
.u
.s32
.a
= tcg_temp_new_i32();
1579 c
.u
.s32
.b
= tcg_const_i32(0);
1580 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1581 tcg_temp_free_i64(t
);
1583 return help_branch(s
, &c
, 1, imm
, o
->in2
);
1586 static DisasJumpType
op_bct64(DisasContext
*s
, DisasOps
*o
)
1588 int r1
= get_field(s
->fields
, r1
);
1589 bool is_imm
= have_field(s
->fields
, i2
);
1590 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1593 c
.cond
= TCG_COND_NE
;
1598 tcg_gen_subi_i64(regs
[r1
], regs
[r1
], 1);
1599 c
.u
.s64
.a
= regs
[r1
];
1600 c
.u
.s64
.b
= tcg_const_i64(0);
1602 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1605 static DisasJumpType
op_bx32(DisasContext
*s
, DisasOps
*o
)
1607 int r1
= get_field(s
->fields
, r1
);
1608 int r3
= get_field(s
->fields
, r3
);
1609 bool is_imm
= have_field(s
->fields
, i2
);
1610 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1614 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1619 t
= tcg_temp_new_i64();
1620 tcg_gen_add_i64(t
, regs
[r1
], regs
[r3
]);
1621 c
.u
.s32
.a
= tcg_temp_new_i32();
1622 c
.u
.s32
.b
= tcg_temp_new_i32();
1623 tcg_gen_extrl_i64_i32(c
.u
.s32
.a
, t
);
1624 tcg_gen_extrl_i64_i32(c
.u
.s32
.b
, regs
[r3
| 1]);
1625 store_reg32_i64(r1
, t
);
1626 tcg_temp_free_i64(t
);
1628 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1631 static DisasJumpType
op_bx64(DisasContext
*s
, DisasOps
*o
)
1633 int r1
= get_field(s
->fields
, r1
);
1634 int r3
= get_field(s
->fields
, r3
);
1635 bool is_imm
= have_field(s
->fields
, i2
);
1636 int imm
= is_imm
? get_field(s
->fields
, i2
) : 0;
1639 c
.cond
= (s
->insn
->data
? TCG_COND_LE
: TCG_COND_GT
);
1642 if (r1
== (r3
| 1)) {
1643 c
.u
.s64
.b
= load_reg(r3
| 1);
1646 c
.u
.s64
.b
= regs
[r3
| 1];
1650 tcg_gen_add_i64(regs
[r1
], regs
[r1
], regs
[r3
]);
1651 c
.u
.s64
.a
= regs
[r1
];
1654 return help_branch(s
, &c
, is_imm
, imm
, o
->in2
);
1657 static DisasJumpType
op_cj(DisasContext
*s
, DisasOps
*o
)
1659 int imm
, m3
= get_field(s
->fields
, m3
);
1663 c
.cond
= ltgt_cond
[m3
];
1664 if (s
->insn
->data
) {
1665 c
.cond
= tcg_unsigned_cond(c
.cond
);
1667 c
.is_64
= c
.g1
= c
.g2
= true;
1671 is_imm
= have_field(s
->fields
, i4
);
1673 imm
= get_field(s
->fields
, i4
);
1676 o
->out
= get_address(s
, 0, get_field(s
->fields
, b4
),
1677 get_field(s
->fields
, d4
));
1680 return help_branch(s
, &c
, is_imm
, imm
, o
->out
);
1683 static DisasJumpType
op_ceb(DisasContext
*s
, DisasOps
*o
)
1685 gen_helper_ceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1690 static DisasJumpType
op_cdb(DisasContext
*s
, DisasOps
*o
)
1692 gen_helper_cdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
1697 static DisasJumpType
op_cxb(DisasContext
*s
, DisasOps
*o
)
1699 gen_helper_cxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
1704 static DisasJumpType
op_cfeb(DisasContext
*s
, DisasOps
*o
)
1706 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1707 gen_helper_cfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1708 tcg_temp_free_i32(m3
);
1709 gen_set_cc_nz_f32(s
, o
->in2
);
1713 static DisasJumpType
op_cfdb(DisasContext
*s
, DisasOps
*o
)
1715 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1716 gen_helper_cfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1717 tcg_temp_free_i32(m3
);
1718 gen_set_cc_nz_f64(s
, o
->in2
);
1722 static DisasJumpType
op_cfxb(DisasContext
*s
, DisasOps
*o
)
1724 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1725 gen_helper_cfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1726 tcg_temp_free_i32(m3
);
1727 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1731 static DisasJumpType
op_cgeb(DisasContext
*s
, DisasOps
*o
)
1733 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1734 gen_helper_cgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1735 tcg_temp_free_i32(m3
);
1736 gen_set_cc_nz_f32(s
, o
->in2
);
1740 static DisasJumpType
op_cgdb(DisasContext
*s
, DisasOps
*o
)
1742 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1743 gen_helper_cgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1744 tcg_temp_free_i32(m3
);
1745 gen_set_cc_nz_f64(s
, o
->in2
);
1749 static DisasJumpType
op_cgxb(DisasContext
*s
, DisasOps
*o
)
1751 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1752 gen_helper_cgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1753 tcg_temp_free_i32(m3
);
1754 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1758 static DisasJumpType
op_clfeb(DisasContext
*s
, DisasOps
*o
)
1760 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1761 gen_helper_clfeb(o
->out
, cpu_env
, o
->in2
, m3
);
1762 tcg_temp_free_i32(m3
);
1763 gen_set_cc_nz_f32(s
, o
->in2
);
1767 static DisasJumpType
op_clfdb(DisasContext
*s
, DisasOps
*o
)
1769 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1770 gen_helper_clfdb(o
->out
, cpu_env
, o
->in2
, m3
);
1771 tcg_temp_free_i32(m3
);
1772 gen_set_cc_nz_f64(s
, o
->in2
);
1776 static DisasJumpType
op_clfxb(DisasContext
*s
, DisasOps
*o
)
1778 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1779 gen_helper_clfxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1780 tcg_temp_free_i32(m3
);
1781 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1785 static DisasJumpType
op_clgeb(DisasContext
*s
, DisasOps
*o
)
1787 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1788 gen_helper_clgeb(o
->out
, cpu_env
, o
->in2
, m3
);
1789 tcg_temp_free_i32(m3
);
1790 gen_set_cc_nz_f32(s
, o
->in2
);
1794 static DisasJumpType
op_clgdb(DisasContext
*s
, DisasOps
*o
)
1796 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1797 gen_helper_clgdb(o
->out
, cpu_env
, o
->in2
, m3
);
1798 tcg_temp_free_i32(m3
);
1799 gen_set_cc_nz_f64(s
, o
->in2
);
1803 static DisasJumpType
op_clgxb(DisasContext
*s
, DisasOps
*o
)
1805 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1806 gen_helper_clgxb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
1807 tcg_temp_free_i32(m3
);
1808 gen_set_cc_nz_f128(s
, o
->in1
, o
->in2
);
1812 static DisasJumpType
op_cegb(DisasContext
*s
, DisasOps
*o
)
1814 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1815 gen_helper_cegb(o
->out
, cpu_env
, o
->in2
, m3
);
1816 tcg_temp_free_i32(m3
);
1820 static DisasJumpType
op_cdgb(DisasContext
*s
, DisasOps
*o
)
1822 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1823 gen_helper_cdgb(o
->out
, cpu_env
, o
->in2
, m3
);
1824 tcg_temp_free_i32(m3
);
1828 static DisasJumpType
op_cxgb(DisasContext
*s
, DisasOps
*o
)
1830 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1831 gen_helper_cxgb(o
->out
, cpu_env
, o
->in2
, m3
);
1832 tcg_temp_free_i32(m3
);
1833 return_low128(o
->out2
);
1837 static DisasJumpType
op_celgb(DisasContext
*s
, DisasOps
*o
)
1839 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1840 gen_helper_celgb(o
->out
, cpu_env
, o
->in2
, m3
);
1841 tcg_temp_free_i32(m3
);
1845 static DisasJumpType
op_cdlgb(DisasContext
*s
, DisasOps
*o
)
1847 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1848 gen_helper_cdlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1849 tcg_temp_free_i32(m3
);
1853 static DisasJumpType
op_cxlgb(DisasContext
*s
, DisasOps
*o
)
1855 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1856 gen_helper_cxlgb(o
->out
, cpu_env
, o
->in2
, m3
);
1857 tcg_temp_free_i32(m3
);
1858 return_low128(o
->out2
);
1862 static DisasJumpType
op_cksm(DisasContext
*s
, DisasOps
*o
)
1864 int r2
= get_field(s
->fields
, r2
);
1865 TCGv_i64 len
= tcg_temp_new_i64();
1867 gen_helper_cksm(len
, cpu_env
, o
->in1
, o
->in2
, regs
[r2
+ 1]);
1869 return_low128(o
->out
);
1871 tcg_gen_add_i64(regs
[r2
], regs
[r2
], len
);
1872 tcg_gen_sub_i64(regs
[r2
+ 1], regs
[r2
+ 1], len
);
1873 tcg_temp_free_i64(len
);
1878 static DisasJumpType
op_clc(DisasContext
*s
, DisasOps
*o
)
1880 int l
= get_field(s
->fields
, l1
);
1885 tcg_gen_qemu_ld8u(cc_src
, o
->addr1
, get_mem_index(s
));
1886 tcg_gen_qemu_ld8u(cc_dst
, o
->in2
, get_mem_index(s
));
1889 tcg_gen_qemu_ld16u(cc_src
, o
->addr1
, get_mem_index(s
));
1890 tcg_gen_qemu_ld16u(cc_dst
, o
->in2
, get_mem_index(s
));
1893 tcg_gen_qemu_ld32u(cc_src
, o
->addr1
, get_mem_index(s
));
1894 tcg_gen_qemu_ld32u(cc_dst
, o
->in2
, get_mem_index(s
));
1897 tcg_gen_qemu_ld64(cc_src
, o
->addr1
, get_mem_index(s
));
1898 tcg_gen_qemu_ld64(cc_dst
, o
->in2
, get_mem_index(s
));
1901 vl
= tcg_const_i32(l
);
1902 gen_helper_clc(cc_op
, cpu_env
, vl
, o
->addr1
, o
->in2
);
1903 tcg_temp_free_i32(vl
);
1907 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, cc_src
, cc_dst
);
1911 static DisasJumpType
op_clcl(DisasContext
*s
, DisasOps
*o
)
1913 int r1
= get_field(s
->fields
, r1
);
1914 int r2
= get_field(s
->fields
, r2
);
1917 /* r1 and r2 must be even. */
1918 if (r1
& 1 || r2
& 1) {
1919 gen_program_exception(s
, PGM_SPECIFICATION
);
1920 return DISAS_NORETURN
;
1923 t1
= tcg_const_i32(r1
);
1924 t2
= tcg_const_i32(r2
);
1925 gen_helper_clcl(cc_op
, cpu_env
, t1
, t2
);
1926 tcg_temp_free_i32(t1
);
1927 tcg_temp_free_i32(t2
);
1932 static DisasJumpType
op_clcle(DisasContext
*s
, DisasOps
*o
)
1934 int r1
= get_field(s
->fields
, r1
);
1935 int r3
= get_field(s
->fields
, r3
);
1938 /* r1 and r3 must be even. */
1939 if (r1
& 1 || r3
& 1) {
1940 gen_program_exception(s
, PGM_SPECIFICATION
);
1941 return DISAS_NORETURN
;
1944 t1
= tcg_const_i32(r1
);
1945 t3
= tcg_const_i32(r3
);
1946 gen_helper_clcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1947 tcg_temp_free_i32(t1
);
1948 tcg_temp_free_i32(t3
);
1953 static DisasJumpType
op_clclu(DisasContext
*s
, DisasOps
*o
)
1955 int r1
= get_field(s
->fields
, r1
);
1956 int r3
= get_field(s
->fields
, r3
);
1959 /* r1 and r3 must be even. */
1960 if (r1
& 1 || r3
& 1) {
1961 gen_program_exception(s
, PGM_SPECIFICATION
);
1962 return DISAS_NORETURN
;
1965 t1
= tcg_const_i32(r1
);
1966 t3
= tcg_const_i32(r3
);
1967 gen_helper_clclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
1968 tcg_temp_free_i32(t1
);
1969 tcg_temp_free_i32(t3
);
1974 static DisasJumpType
op_clm(DisasContext
*s
, DisasOps
*o
)
1976 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
1977 TCGv_i32 t1
= tcg_temp_new_i32();
1978 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
1979 gen_helper_clm(cc_op
, cpu_env
, t1
, m3
, o
->in2
);
1981 tcg_temp_free_i32(t1
);
1982 tcg_temp_free_i32(m3
);
1986 static DisasJumpType
op_clst(DisasContext
*s
, DisasOps
*o
)
1988 gen_helper_clst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
1990 return_low128(o
->in2
);
1994 static DisasJumpType
op_cps(DisasContext
*s
, DisasOps
*o
)
1996 TCGv_i64 t
= tcg_temp_new_i64();
1997 tcg_gen_andi_i64(t
, o
->in1
, 0x8000000000000000ull
);
1998 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffffffffffffull
);
1999 tcg_gen_or_i64(o
->out
, o
->out
, t
);
2000 tcg_temp_free_i64(t
);
2004 static DisasJumpType
op_cs(DisasContext
*s
, DisasOps
*o
)
2006 int d2
= get_field(s
->fields
, d2
);
2007 int b2
= get_field(s
->fields
, b2
);
2010 /* Note that in1 = R3 (new value) and
2011 in2 = (zero-extended) R1 (expected value). */
2013 addr
= get_address(s
, 0, b2
, d2
);
2014 tcg_gen_atomic_cmpxchg_i64(o
->out
, addr
, o
->in2
, o
->in1
,
2015 get_mem_index(s
), s
->insn
->data
| MO_ALIGN
);
2016 tcg_temp_free_i64(addr
);
2018 /* Are the memory and expected values (un)equal? Note that this setcond
2019 produces the output CC value, thus the NE sense of the test. */
2020 cc
= tcg_temp_new_i64();
2021 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in2
, o
->out
);
2022 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2023 tcg_temp_free_i64(cc
);
2029 static DisasJumpType
op_cdsg(DisasContext
*s
, DisasOps
*o
)
2031 int r1
= get_field(s
->fields
, r1
);
2032 int r3
= get_field(s
->fields
, r3
);
2033 int d2
= get_field(s
->fields
, d2
);
2034 int b2
= get_field(s
->fields
, b2
);
2036 TCGv_i32 t_r1
, t_r3
;
2038 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2039 addr
= get_address(s
, 0, b2
, d2
);
2040 t_r1
= tcg_const_i32(r1
);
2041 t_r3
= tcg_const_i32(r3
);
2042 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2043 gen_helper_cdsg_parallel(cpu_env
, addr
, t_r1
, t_r3
);
2045 gen_helper_cdsg(cpu_env
, addr
, t_r1
, t_r3
);
2047 tcg_temp_free_i64(addr
);
2048 tcg_temp_free_i32(t_r1
);
2049 tcg_temp_free_i32(t_r3
);
2055 static DisasJumpType
op_csst(DisasContext
*s
, DisasOps
*o
)
2057 int r3
= get_field(s
->fields
, r3
);
2058 TCGv_i32 t_r3
= tcg_const_i32(r3
);
2060 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2061 gen_helper_csst_parallel(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2063 gen_helper_csst(cc_op
, cpu_env
, t_r3
, o
->addr1
, o
->in2
);
2065 tcg_temp_free_i32(t_r3
);
2071 #ifndef CONFIG_USER_ONLY
2072 static DisasJumpType
op_csp(DisasContext
*s
, DisasOps
*o
)
2074 TCGMemOp mop
= s
->insn
->data
;
2075 TCGv_i64 addr
, old
, cc
;
2076 TCGLabel
*lab
= gen_new_label();
2078 /* Note that in1 = R1 (zero-extended expected value),
2079 out = R1 (original reg), out2 = R1+1 (new value). */
2081 addr
= tcg_temp_new_i64();
2082 old
= tcg_temp_new_i64();
2083 tcg_gen_andi_i64(addr
, o
->in2
, -1ULL << (mop
& MO_SIZE
));
2084 tcg_gen_atomic_cmpxchg_i64(old
, addr
, o
->in1
, o
->out2
,
2085 get_mem_index(s
), mop
| MO_ALIGN
);
2086 tcg_temp_free_i64(addr
);
2088 /* Are the memory and expected values (un)equal? */
2089 cc
= tcg_temp_new_i64();
2090 tcg_gen_setcond_i64(TCG_COND_NE
, cc
, o
->in1
, old
);
2091 tcg_gen_extrl_i64_i32(cc_op
, cc
);
2093 /* Write back the output now, so that it happens before the
2094 following branch, so that we don't need local temps. */
2095 if ((mop
& MO_SIZE
) == MO_32
) {
2096 tcg_gen_deposit_i64(o
->out
, o
->out
, old
, 0, 32);
2098 tcg_gen_mov_i64(o
->out
, old
);
2100 tcg_temp_free_i64(old
);
2102 /* If the comparison was equal, and the LSB of R2 was set,
2103 then we need to flush the TLB (for all cpus). */
2104 tcg_gen_xori_i64(cc
, cc
, 1);
2105 tcg_gen_and_i64(cc
, cc
, o
->in2
);
2106 tcg_gen_brcondi_i64(TCG_COND_EQ
, cc
, 0, lab
);
2107 tcg_temp_free_i64(cc
);
2109 gen_helper_purge(cpu_env
);
2116 static DisasJumpType
op_cvd(DisasContext
*s
, DisasOps
*o
)
2118 TCGv_i64 t1
= tcg_temp_new_i64();
2119 TCGv_i32 t2
= tcg_temp_new_i32();
2120 tcg_gen_extrl_i64_i32(t2
, o
->in1
);
2121 gen_helper_cvd(t1
, t2
);
2122 tcg_temp_free_i32(t2
);
2123 tcg_gen_qemu_st64(t1
, o
->in2
, get_mem_index(s
));
2124 tcg_temp_free_i64(t1
);
2128 static DisasJumpType
op_ct(DisasContext
*s
, DisasOps
*o
)
2130 int m3
= get_field(s
->fields
, m3
);
2131 TCGLabel
*lab
= gen_new_label();
2134 c
= tcg_invert_cond(ltgt_cond
[m3
]);
2135 if (s
->insn
->data
) {
2136 c
= tcg_unsigned_cond(c
);
2138 tcg_gen_brcond_i64(c
, o
->in1
, o
->in2
, lab
);
2147 static DisasJumpType
op_cuXX(DisasContext
*s
, DisasOps
*o
)
2149 int m3
= get_field(s
->fields
, m3
);
2150 int r1
= get_field(s
->fields
, r1
);
2151 int r2
= get_field(s
->fields
, r2
);
2152 TCGv_i32 tr1
, tr2
, chk
;
2154 /* R1 and R2 must both be even. */
2155 if ((r1
| r2
) & 1) {
2156 gen_program_exception(s
, PGM_SPECIFICATION
);
2157 return DISAS_NORETURN
;
2159 if (!s390_has_feat(S390_FEAT_ETF3_ENH
)) {
2163 tr1
= tcg_const_i32(r1
);
2164 tr2
= tcg_const_i32(r2
);
2165 chk
= tcg_const_i32(m3
);
2167 switch (s
->insn
->data
) {
2169 gen_helper_cu12(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2172 gen_helper_cu14(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2175 gen_helper_cu21(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2178 gen_helper_cu24(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2181 gen_helper_cu41(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2184 gen_helper_cu42(cc_op
, cpu_env
, tr1
, tr2
, chk
);
2187 g_assert_not_reached();
2190 tcg_temp_free_i32(tr1
);
2191 tcg_temp_free_i32(tr2
);
2192 tcg_temp_free_i32(chk
);
2197 #ifndef CONFIG_USER_ONLY
2198 static DisasJumpType
op_diag(DisasContext
*s
, DisasOps
*o
)
2200 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2201 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2202 TCGv_i32 func_code
= tcg_const_i32(get_field(s
->fields
, i2
));
2204 gen_helper_diag(cpu_env
, r1
, r3
, func_code
);
2206 tcg_temp_free_i32(func_code
);
2207 tcg_temp_free_i32(r3
);
2208 tcg_temp_free_i32(r1
);
2213 static DisasJumpType
op_divs32(DisasContext
*s
, DisasOps
*o
)
2215 gen_helper_divs32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2216 return_low128(o
->out
);
2220 static DisasJumpType
op_divu32(DisasContext
*s
, DisasOps
*o
)
2222 gen_helper_divu32(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2223 return_low128(o
->out
);
2227 static DisasJumpType
op_divs64(DisasContext
*s
, DisasOps
*o
)
2229 gen_helper_divs64(o
->out2
, cpu_env
, o
->in1
, o
->in2
);
2230 return_low128(o
->out
);
2234 static DisasJumpType
op_divu64(DisasContext
*s
, DisasOps
*o
)
2236 gen_helper_divu64(o
->out2
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
2237 return_low128(o
->out
);
2241 static DisasJumpType
op_deb(DisasContext
*s
, DisasOps
*o
)
2243 gen_helper_deb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2247 static DisasJumpType
op_ddb(DisasContext
*s
, DisasOps
*o
)
2249 gen_helper_ddb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2253 static DisasJumpType
op_dxb(DisasContext
*s
, DisasOps
*o
)
2255 gen_helper_dxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2256 return_low128(o
->out2
);
2260 static DisasJumpType
op_ear(DisasContext
*s
, DisasOps
*o
)
2262 int r2
= get_field(s
->fields
, r2
);
2263 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, aregs
[r2
]));
2267 static DisasJumpType
op_ecag(DisasContext
*s
, DisasOps
*o
)
2269 /* No cache information provided. */
2270 tcg_gen_movi_i64(o
->out
, -1);
2274 static DisasJumpType
op_efpc(DisasContext
*s
, DisasOps
*o
)
2276 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, fpc
));
2280 static DisasJumpType
op_epsw(DisasContext
*s
, DisasOps
*o
)
2282 int r1
= get_field(s
->fields
, r1
);
2283 int r2
= get_field(s
->fields
, r2
);
2284 TCGv_i64 t
= tcg_temp_new_i64();
2286 /* Note the "subsequently" in the PoO, which implies a defined result
2287 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2288 tcg_gen_shri_i64(t
, psw_mask
, 32);
2289 store_reg32_i64(r1
, t
);
2291 store_reg32_i64(r2
, psw_mask
);
2294 tcg_temp_free_i64(t
);
2298 static DisasJumpType
op_ex(DisasContext
*s
, DisasOps
*o
)
2300 int r1
= get_field(s
->fields
, r1
);
2304 /* Nested EXECUTE is not allowed. */
2305 if (unlikely(s
->ex_value
)) {
2306 gen_program_exception(s
, PGM_EXECUTE
);
2307 return DISAS_NORETURN
;
2314 v1
= tcg_const_i64(0);
2319 ilen
= tcg_const_i32(s
->ilen
);
2320 gen_helper_ex(cpu_env
, ilen
, v1
, o
->in2
);
2321 tcg_temp_free_i32(ilen
);
2324 tcg_temp_free_i64(v1
);
2327 return DISAS_PC_CC_UPDATED
;
2330 static DisasJumpType
op_fieb(DisasContext
*s
, DisasOps
*o
)
2332 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2333 gen_helper_fieb(o
->out
, cpu_env
, o
->in2
, m3
);
2334 tcg_temp_free_i32(m3
);
2338 static DisasJumpType
op_fidb(DisasContext
*s
, DisasOps
*o
)
2340 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2341 gen_helper_fidb(o
->out
, cpu_env
, o
->in2
, m3
);
2342 tcg_temp_free_i32(m3
);
2346 static DisasJumpType
op_fixb(DisasContext
*s
, DisasOps
*o
)
2348 TCGv_i32 m3
= tcg_const_i32(get_field(s
->fields
, m3
));
2349 gen_helper_fixb(o
->out
, cpu_env
, o
->in1
, o
->in2
, m3
);
2350 return_low128(o
->out2
);
2351 tcg_temp_free_i32(m3
);
2355 static DisasJumpType
op_flogr(DisasContext
*s
, DisasOps
*o
)
2357 /* We'll use the original input for cc computation, since we get to
2358 compare that against 0, which ought to be better than comparing
2359 the real output against 64. It also lets cc_dst be a convenient
2360 temporary during our computation. */
2361 gen_op_update1_cc_i64(s
, CC_OP_FLOGR
, o
->in2
);
2363 /* R1 = IN ? CLZ(IN) : 64. */
2364 tcg_gen_clzi_i64(o
->out
, o
->in2
, 64);
2366 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2367 value by 64, which is undefined. But since the shift is 64 iff the
2368 input is zero, we still get the correct result after and'ing. */
2369 tcg_gen_movi_i64(o
->out2
, 0x8000000000000000ull
);
2370 tcg_gen_shr_i64(o
->out2
, o
->out2
, o
->out
);
2371 tcg_gen_andc_i64(o
->out2
, cc_dst
, o
->out2
);
2375 static DisasJumpType
op_icm(DisasContext
*s
, DisasOps
*o
)
2377 int m3
= get_field(s
->fields
, m3
);
2378 int pos
, len
, base
= s
->insn
->data
;
2379 TCGv_i64 tmp
= tcg_temp_new_i64();
2384 /* Effectively a 32-bit load. */
2385 tcg_gen_qemu_ld32u(tmp
, o
->in2
, get_mem_index(s
));
2392 /* Effectively a 16-bit load. */
2393 tcg_gen_qemu_ld16u(tmp
, o
->in2
, get_mem_index(s
));
2401 /* Effectively an 8-bit load. */
2402 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2407 pos
= base
+ ctz32(m3
) * 8;
2408 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, len
);
2409 ccm
= ((1ull << len
) - 1) << pos
;
2413 /* This is going to be a sequence of loads and inserts. */
2414 pos
= base
+ 32 - 8;
2418 tcg_gen_qemu_ld8u(tmp
, o
->in2
, get_mem_index(s
));
2419 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
2420 tcg_gen_deposit_i64(o
->out
, o
->out
, tmp
, pos
, 8);
2423 m3
= (m3
<< 1) & 0xf;
2429 tcg_gen_movi_i64(tmp
, ccm
);
2430 gen_op_update2_cc_i64(s
, CC_OP_ICM
, tmp
, o
->out
);
2431 tcg_temp_free_i64(tmp
);
2435 static DisasJumpType
op_insi(DisasContext
*s
, DisasOps
*o
)
2437 int shift
= s
->insn
->data
& 0xff;
2438 int size
= s
->insn
->data
>> 8;
2439 tcg_gen_deposit_i64(o
->out
, o
->in1
, o
->in2
, shift
, size
);
2443 static DisasJumpType
op_ipm(DisasContext
*s
, DisasOps
*o
)
2448 t1
= tcg_temp_new_i64();
2449 tcg_gen_extract_i64(t1
, psw_mask
, 40, 4);
2450 t2
= tcg_temp_new_i64();
2451 tcg_gen_extu_i32_i64(t2
, cc_op
);
2452 tcg_gen_deposit_i64(t1
, t1
, t2
, 4, 60);
2453 tcg_gen_deposit_i64(o
->out
, o
->out
, t1
, 24, 8);
2454 tcg_temp_free_i64(t1
);
2455 tcg_temp_free_i64(t2
);
2459 #ifndef CONFIG_USER_ONLY
2460 static DisasJumpType
op_idte(DisasContext
*s
, DisasOps
*o
)
2464 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2465 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2467 m4
= tcg_const_i32(0);
2469 gen_helper_idte(cpu_env
, o
->in1
, o
->in2
, m4
);
2470 tcg_temp_free_i32(m4
);
2474 static DisasJumpType
op_ipte(DisasContext
*s
, DisasOps
*o
)
2478 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING
)) {
2479 m4
= tcg_const_i32(get_field(s
->fields
, m4
));
2481 m4
= tcg_const_i32(0);
2483 gen_helper_ipte(cpu_env
, o
->in1
, o
->in2
, m4
);
2484 tcg_temp_free_i32(m4
);
2488 static DisasJumpType
op_iske(DisasContext
*s
, DisasOps
*o
)
2490 gen_helper_iske(o
->out
, cpu_env
, o
->in2
);
2495 static DisasJumpType
op_msa(DisasContext
*s
, DisasOps
*o
)
2497 int r1
= have_field(s
->fields
, r1
) ? get_field(s
->fields
, r1
) : 0;
2498 int r2
= have_field(s
->fields
, r2
) ? get_field(s
->fields
, r2
) : 0;
2499 int r3
= have_field(s
->fields
, r3
) ? get_field(s
->fields
, r3
) : 0;
2500 TCGv_i32 t_r1
, t_r2
, t_r3
, type
;
2502 switch (s
->insn
->data
) {
2503 case S390_FEAT_TYPE_KMCTR
:
2504 if (r3
& 1 || !r3
) {
2505 gen_program_exception(s
, PGM_SPECIFICATION
);
2506 return DISAS_NORETURN
;
2509 case S390_FEAT_TYPE_PPNO
:
2510 case S390_FEAT_TYPE_KMF
:
2511 case S390_FEAT_TYPE_KMC
:
2512 case S390_FEAT_TYPE_KMO
:
2513 case S390_FEAT_TYPE_KM
:
2514 if (r1
& 1 || !r1
) {
2515 gen_program_exception(s
, PGM_SPECIFICATION
);
2516 return DISAS_NORETURN
;
2519 case S390_FEAT_TYPE_KMAC
:
2520 case S390_FEAT_TYPE_KIMD
:
2521 case S390_FEAT_TYPE_KLMD
:
2522 if (r2
& 1 || !r2
) {
2523 gen_program_exception(s
, PGM_SPECIFICATION
);
2524 return DISAS_NORETURN
;
2527 case S390_FEAT_TYPE_PCKMO
:
2528 case S390_FEAT_TYPE_PCC
:
2531 g_assert_not_reached();
2534 t_r1
= tcg_const_i32(r1
);
2535 t_r2
= tcg_const_i32(r2
);
2536 t_r3
= tcg_const_i32(r3
);
2537 type
= tcg_const_i32(s
->insn
->data
);
2538 gen_helper_msa(cc_op
, cpu_env
, t_r1
, t_r2
, t_r3
, type
);
2540 tcg_temp_free_i32(t_r1
);
2541 tcg_temp_free_i32(t_r2
);
2542 tcg_temp_free_i32(t_r3
);
2543 tcg_temp_free_i32(type
);
2547 static DisasJumpType
op_keb(DisasContext
*s
, DisasOps
*o
)
2549 gen_helper_keb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2554 static DisasJumpType
op_kdb(DisasContext
*s
, DisasOps
*o
)
2556 gen_helper_kdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
2561 static DisasJumpType
op_kxb(DisasContext
*s
, DisasOps
*o
)
2563 gen_helper_kxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
2568 static DisasJumpType
op_laa(DisasContext
*s
, DisasOps
*o
)
2570 /* The real output is indeed the original value in memory;
2571 recompute the addition for the computation of CC. */
2572 tcg_gen_atomic_fetch_add_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2573 s
->insn
->data
| MO_ALIGN
);
2574 /* However, we need to recompute the addition for setting CC. */
2575 tcg_gen_add_i64(o
->out
, o
->in1
, o
->in2
);
2579 static DisasJumpType
op_lan(DisasContext
*s
, DisasOps
*o
)
2581 /* The real output is indeed the original value in memory;
2582 recompute the addition for the computation of CC. */
2583 tcg_gen_atomic_fetch_and_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2584 s
->insn
->data
| MO_ALIGN
);
2585 /* However, we need to recompute the operation for setting CC. */
2586 tcg_gen_and_i64(o
->out
, o
->in1
, o
->in2
);
2590 static DisasJumpType
op_lao(DisasContext
*s
, DisasOps
*o
)
2592 /* The real output is indeed the original value in memory;
2593 recompute the addition for the computation of CC. */
2594 tcg_gen_atomic_fetch_or_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2595 s
->insn
->data
| MO_ALIGN
);
2596 /* However, we need to recompute the operation for setting CC. */
2597 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
2601 static DisasJumpType
op_lax(DisasContext
*s
, DisasOps
*o
)
2603 /* The real output is indeed the original value in memory;
2604 recompute the addition for the computation of CC. */
2605 tcg_gen_atomic_fetch_xor_i64(o
->in2
, o
->in2
, o
->in1
, get_mem_index(s
),
2606 s
->insn
->data
| MO_ALIGN
);
2607 /* However, we need to recompute the operation for setting CC. */
2608 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
2612 static DisasJumpType
op_ldeb(DisasContext
*s
, DisasOps
*o
)
2614 gen_helper_ldeb(o
->out
, cpu_env
, o
->in2
);
2618 static DisasJumpType
op_ledb(DisasContext
*s
, DisasOps
*o
)
2620 gen_helper_ledb(o
->out
, cpu_env
, o
->in2
);
2624 static DisasJumpType
op_ldxb(DisasContext
*s
, DisasOps
*o
)
2626 gen_helper_ldxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2630 static DisasJumpType
op_lexb(DisasContext
*s
, DisasOps
*o
)
2632 gen_helper_lexb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
2636 static DisasJumpType
op_lxdb(DisasContext
*s
, DisasOps
*o
)
2638 gen_helper_lxdb(o
->out
, cpu_env
, o
->in2
);
2639 return_low128(o
->out2
);
2643 static DisasJumpType
op_lxeb(DisasContext
*s
, DisasOps
*o
)
2645 gen_helper_lxeb(o
->out
, cpu_env
, o
->in2
);
2646 return_low128(o
->out2
);
2650 static DisasJumpType
op_llgt(DisasContext
*s
, DisasOps
*o
)
2652 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2656 static DisasJumpType
op_ld8s(DisasContext
*s
, DisasOps
*o
)
2658 tcg_gen_qemu_ld8s(o
->out
, o
->in2
, get_mem_index(s
));
2662 static DisasJumpType
op_ld8u(DisasContext
*s
, DisasOps
*o
)
2664 tcg_gen_qemu_ld8u(o
->out
, o
->in2
, get_mem_index(s
));
2668 static DisasJumpType
op_ld16s(DisasContext
*s
, DisasOps
*o
)
2670 tcg_gen_qemu_ld16s(o
->out
, o
->in2
, get_mem_index(s
));
2674 static DisasJumpType
op_ld16u(DisasContext
*s
, DisasOps
*o
)
2676 tcg_gen_qemu_ld16u(o
->out
, o
->in2
, get_mem_index(s
));
2680 static DisasJumpType
op_ld32s(DisasContext
*s
, DisasOps
*o
)
2682 tcg_gen_qemu_ld32s(o
->out
, o
->in2
, get_mem_index(s
));
2686 static DisasJumpType
op_ld32u(DisasContext
*s
, DisasOps
*o
)
2688 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2692 static DisasJumpType
op_ld64(DisasContext
*s
, DisasOps
*o
)
2694 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2698 static DisasJumpType
op_lat(DisasContext
*s
, DisasOps
*o
)
2700 TCGLabel
*lab
= gen_new_label();
2701 store_reg32_i64(get_field(s
->fields
, r1
), o
->in2
);
2702 /* The value is stored even in case of trap. */
2703 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2709 static DisasJumpType
op_lgat(DisasContext
*s
, DisasOps
*o
)
2711 TCGLabel
*lab
= gen_new_label();
2712 tcg_gen_qemu_ld64(o
->out
, o
->in2
, get_mem_index(s
));
2713 /* The value is stored even in case of trap. */
2714 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2720 static DisasJumpType
op_lfhat(DisasContext
*s
, DisasOps
*o
)
2722 TCGLabel
*lab
= gen_new_label();
2723 store_reg32h_i64(get_field(s
->fields
, r1
), o
->in2
);
2724 /* The value is stored even in case of trap. */
2725 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->in2
, 0, lab
);
2731 static DisasJumpType
op_llgfat(DisasContext
*s
, DisasOps
*o
)
2733 TCGLabel
*lab
= gen_new_label();
2734 tcg_gen_qemu_ld32u(o
->out
, o
->in2
, get_mem_index(s
));
2735 /* The value is stored even in case of trap. */
2736 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2742 static DisasJumpType
op_llgtat(DisasContext
*s
, DisasOps
*o
)
2744 TCGLabel
*lab
= gen_new_label();
2745 tcg_gen_andi_i64(o
->out
, o
->in2
, 0x7fffffff);
2746 /* The value is stored even in case of trap. */
2747 tcg_gen_brcondi_i64(TCG_COND_NE
, o
->out
, 0, lab
);
2753 static DisasJumpType
op_loc(DisasContext
*s
, DisasOps
*o
)
2757 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
2760 tcg_gen_movcond_i64(c
.cond
, o
->out
, c
.u
.s64
.a
, c
.u
.s64
.b
,
2764 TCGv_i32 t32
= tcg_temp_new_i32();
2767 tcg_gen_setcond_i32(c
.cond
, t32
, c
.u
.s32
.a
, c
.u
.s32
.b
);
2770 t
= tcg_temp_new_i64();
2771 tcg_gen_extu_i32_i64(t
, t32
);
2772 tcg_temp_free_i32(t32
);
2774 z
= tcg_const_i64(0);
2775 tcg_gen_movcond_i64(TCG_COND_NE
, o
->out
, t
, z
, o
->in2
, o
->in1
);
2776 tcg_temp_free_i64(t
);
2777 tcg_temp_free_i64(z
);
2783 #ifndef CONFIG_USER_ONLY
2784 static DisasJumpType
op_lctl(DisasContext
*s
, DisasOps
*o
)
2786 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2787 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2788 gen_helper_lctl(cpu_env
, r1
, o
->in2
, r3
);
2789 tcg_temp_free_i32(r1
);
2790 tcg_temp_free_i32(r3
);
2791 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2792 return DISAS_PC_STALE_NOCHAIN
;
2795 static DisasJumpType
op_lctlg(DisasContext
*s
, DisasOps
*o
)
2797 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2798 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2799 gen_helper_lctlg(cpu_env
, r1
, o
->in2
, r3
);
2800 tcg_temp_free_i32(r1
);
2801 tcg_temp_free_i32(r3
);
2802 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2803 return DISAS_PC_STALE_NOCHAIN
;
2806 static DisasJumpType
op_lra(DisasContext
*s
, DisasOps
*o
)
2808 gen_helper_lra(o
->out
, cpu_env
, o
->in2
);
2813 static DisasJumpType
op_lpp(DisasContext
*s
, DisasOps
*o
)
2815 tcg_gen_st_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, pp
));
2819 static DisasJumpType
op_lpsw(DisasContext
*s
, DisasOps
*o
)
2823 per_breaking_event(s
);
2825 t1
= tcg_temp_new_i64();
2826 t2
= tcg_temp_new_i64();
2827 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2828 MO_TEUL
| MO_ALIGN_8
);
2829 tcg_gen_addi_i64(o
->in2
, o
->in2
, 4);
2830 tcg_gen_qemu_ld32u(t2
, o
->in2
, get_mem_index(s
));
2831 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2832 tcg_gen_shli_i64(t1
, t1
, 32);
2833 gen_helper_load_psw(cpu_env
, t1
, t2
);
2834 tcg_temp_free_i64(t1
);
2835 tcg_temp_free_i64(t2
);
2836 return DISAS_NORETURN
;
2839 static DisasJumpType
op_lpswe(DisasContext
*s
, DisasOps
*o
)
2843 per_breaking_event(s
);
2845 t1
= tcg_temp_new_i64();
2846 t2
= tcg_temp_new_i64();
2847 tcg_gen_qemu_ld_i64(t1
, o
->in2
, get_mem_index(s
),
2848 MO_TEQ
| MO_ALIGN_8
);
2849 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
2850 tcg_gen_qemu_ld64(t2
, o
->in2
, get_mem_index(s
));
2851 gen_helper_load_psw(cpu_env
, t1
, t2
);
2852 tcg_temp_free_i64(t1
);
2853 tcg_temp_free_i64(t2
);
2854 return DISAS_NORETURN
;
2858 static DisasJumpType
op_lam(DisasContext
*s
, DisasOps
*o
)
2860 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
2861 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
2862 gen_helper_lam(cpu_env
, r1
, o
->in2
, r3
);
2863 tcg_temp_free_i32(r1
);
2864 tcg_temp_free_i32(r3
);
2868 static DisasJumpType
op_lm32(DisasContext
*s
, DisasOps
*o
)
2870 int r1
= get_field(s
->fields
, r1
);
2871 int r3
= get_field(s
->fields
, r3
);
2874 /* Only one register to read. */
2875 t1
= tcg_temp_new_i64();
2876 if (unlikely(r1
== r3
)) {
2877 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2878 store_reg32_i64(r1
, t1
);
2883 /* First load the values of the first and last registers to trigger
2884 possible page faults. */
2885 t2
= tcg_temp_new_i64();
2886 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2887 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2888 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2889 store_reg32_i64(r1
, t1
);
2890 store_reg32_i64(r3
, t2
);
2892 /* Only two registers to read. */
2893 if (((r1
+ 1) & 15) == r3
) {
2899 /* Then load the remaining registers. Page fault can't occur. */
2901 tcg_gen_movi_i64(t2
, 4);
2904 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2905 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2906 store_reg32_i64(r1
, t1
);
2914 static DisasJumpType
op_lmh(DisasContext
*s
, DisasOps
*o
)
2916 int r1
= get_field(s
->fields
, r1
);
2917 int r3
= get_field(s
->fields
, r3
);
2920 /* Only one register to read. */
2921 t1
= tcg_temp_new_i64();
2922 if (unlikely(r1
== r3
)) {
2923 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2924 store_reg32h_i64(r1
, t1
);
2929 /* First load the values of the first and last registers to trigger
2930 possible page faults. */
2931 t2
= tcg_temp_new_i64();
2932 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2933 tcg_gen_addi_i64(t2
, o
->in2
, 4 * ((r3
- r1
) & 15));
2934 tcg_gen_qemu_ld32u(t2
, t2
, get_mem_index(s
));
2935 store_reg32h_i64(r1
, t1
);
2936 store_reg32h_i64(r3
, t2
);
2938 /* Only two registers to read. */
2939 if (((r1
+ 1) & 15) == r3
) {
2945 /* Then load the remaining registers. Page fault can't occur. */
2947 tcg_gen_movi_i64(t2
, 4);
2950 tcg_gen_add_i64(o
->in2
, o
->in2
, t2
);
2951 tcg_gen_qemu_ld32u(t1
, o
->in2
, get_mem_index(s
));
2952 store_reg32h_i64(r1
, t1
);
2960 static DisasJumpType
op_lm64(DisasContext
*s
, DisasOps
*o
)
2962 int r1
= get_field(s
->fields
, r1
);
2963 int r3
= get_field(s
->fields
, r3
);
2966 /* Only one register to read. */
2967 if (unlikely(r1
== r3
)) {
2968 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
2972 /* First load the values of the first and last registers to trigger
2973 possible page faults. */
2974 t1
= tcg_temp_new_i64();
2975 t2
= tcg_temp_new_i64();
2976 tcg_gen_qemu_ld64(t1
, o
->in2
, get_mem_index(s
));
2977 tcg_gen_addi_i64(t2
, o
->in2
, 8 * ((r3
- r1
) & 15));
2978 tcg_gen_qemu_ld64(regs
[r3
], t2
, get_mem_index(s
));
2979 tcg_gen_mov_i64(regs
[r1
], t1
);
2982 /* Only two registers to read. */
2983 if (((r1
+ 1) & 15) == r3
) {
2988 /* Then load the remaining registers. Page fault can't occur. */
2990 tcg_gen_movi_i64(t1
, 8);
2993 tcg_gen_add_i64(o
->in2
, o
->in2
, t1
);
2994 tcg_gen_qemu_ld64(regs
[r1
], o
->in2
, get_mem_index(s
));
3001 static DisasJumpType
op_lpd(DisasContext
*s
, DisasOps
*o
)
3004 TCGMemOp mop
= s
->insn
->data
;
3006 /* In a parallel context, stop the world and single step. */
3007 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3010 gen_exception(EXCP_ATOMIC
);
3011 return DISAS_NORETURN
;
3014 /* In a serial context, perform the two loads ... */
3015 a1
= get_address(s
, 0, get_field(s
->fields
, b1
), get_field(s
->fields
, d1
));
3016 a2
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3017 tcg_gen_qemu_ld_i64(o
->out
, a1
, get_mem_index(s
), mop
| MO_ALIGN
);
3018 tcg_gen_qemu_ld_i64(o
->out2
, a2
, get_mem_index(s
), mop
| MO_ALIGN
);
3019 tcg_temp_free_i64(a1
);
3020 tcg_temp_free_i64(a2
);
3022 /* ... and indicate that we performed them while interlocked. */
3023 gen_op_movi_cc(s
, 0);
3027 static DisasJumpType
op_lpq(DisasContext
*s
, DisasOps
*o
)
3029 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3030 gen_helper_lpq_parallel(o
->out
, cpu_env
, o
->in2
);
3032 gen_helper_lpq(o
->out
, cpu_env
, o
->in2
);
3034 return_low128(o
->out2
);
3038 #ifndef CONFIG_USER_ONLY
3039 static DisasJumpType
op_lura(DisasContext
*s
, DisasOps
*o
)
3041 gen_helper_lura(o
->out
, cpu_env
, o
->in2
);
3045 static DisasJumpType
op_lurag(DisasContext
*s
, DisasOps
*o
)
3047 gen_helper_lurag(o
->out
, cpu_env
, o
->in2
);
3052 static DisasJumpType
op_lzrb(DisasContext
*s
, DisasOps
*o
)
3054 tcg_gen_andi_i64(o
->out
, o
->in2
, -256);
3058 static DisasJumpType
op_mov2(DisasContext
*s
, DisasOps
*o
)
3061 o
->g_out
= o
->g_in2
;
3067 static DisasJumpType
op_mov2e(DisasContext
*s
, DisasOps
*o
)
3069 int b2
= get_field(s
->fields
, b2
);
3070 TCGv ar1
= tcg_temp_new_i64();
3073 o
->g_out
= o
->g_in2
;
3077 switch (s
->base
.tb
->flags
& FLAG_MASK_ASC
) {
3078 case PSW_ASC_PRIMARY
>> FLAG_MASK_PSW_SHIFT
:
3079 tcg_gen_movi_i64(ar1
, 0);
3081 case PSW_ASC_ACCREG
>> FLAG_MASK_PSW_SHIFT
:
3082 tcg_gen_movi_i64(ar1
, 1);
3084 case PSW_ASC_SECONDARY
>> FLAG_MASK_PSW_SHIFT
:
3086 tcg_gen_ld32u_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[b2
]));
3088 tcg_gen_movi_i64(ar1
, 0);
3091 case PSW_ASC_HOME
>> FLAG_MASK_PSW_SHIFT
:
3092 tcg_gen_movi_i64(ar1
, 2);
3096 tcg_gen_st32_i64(ar1
, cpu_env
, offsetof(CPUS390XState
, aregs
[1]));
3097 tcg_temp_free_i64(ar1
);
3102 static DisasJumpType
op_movx(DisasContext
*s
, DisasOps
*o
)
3106 o
->g_out
= o
->g_in1
;
3107 o
->g_out2
= o
->g_in2
;
3110 o
->g_in1
= o
->g_in2
= false;
3114 static DisasJumpType
op_mvc(DisasContext
*s
, DisasOps
*o
)
3116 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3117 gen_helper_mvc(cpu_env
, l
, o
->addr1
, o
->in2
);
3118 tcg_temp_free_i32(l
);
3122 static DisasJumpType
op_mvcin(DisasContext
*s
, DisasOps
*o
)
3124 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3125 gen_helper_mvcin(cpu_env
, l
, o
->addr1
, o
->in2
);
3126 tcg_temp_free_i32(l
);
3130 static DisasJumpType
op_mvcl(DisasContext
*s
, DisasOps
*o
)
3132 int r1
= get_field(s
->fields
, r1
);
3133 int r2
= get_field(s
->fields
, r2
);
3136 /* r1 and r2 must be even. */
3137 if (r1
& 1 || r2
& 1) {
3138 gen_program_exception(s
, PGM_SPECIFICATION
);
3139 return DISAS_NORETURN
;
3142 t1
= tcg_const_i32(r1
);
3143 t2
= tcg_const_i32(r2
);
3144 gen_helper_mvcl(cc_op
, cpu_env
, t1
, t2
);
3145 tcg_temp_free_i32(t1
);
3146 tcg_temp_free_i32(t2
);
3151 static DisasJumpType
op_mvcle(DisasContext
*s
, DisasOps
*o
)
3153 int r1
= get_field(s
->fields
, r1
);
3154 int r3
= get_field(s
->fields
, r3
);
3157 /* r1 and r3 must be even. */
3158 if (r1
& 1 || r3
& 1) {
3159 gen_program_exception(s
, PGM_SPECIFICATION
);
3160 return DISAS_NORETURN
;
3163 t1
= tcg_const_i32(r1
);
3164 t3
= tcg_const_i32(r3
);
3165 gen_helper_mvcle(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3166 tcg_temp_free_i32(t1
);
3167 tcg_temp_free_i32(t3
);
3172 static DisasJumpType
op_mvclu(DisasContext
*s
, DisasOps
*o
)
3174 int r1
= get_field(s
->fields
, r1
);
3175 int r3
= get_field(s
->fields
, r3
);
3178 /* r1 and r3 must be even. */
3179 if (r1
& 1 || r3
& 1) {
3180 gen_program_exception(s
, PGM_SPECIFICATION
);
3181 return DISAS_NORETURN
;
3184 t1
= tcg_const_i32(r1
);
3185 t3
= tcg_const_i32(r3
);
3186 gen_helper_mvclu(cc_op
, cpu_env
, t1
, o
->in2
, t3
);
3187 tcg_temp_free_i32(t1
);
3188 tcg_temp_free_i32(t3
);
3193 static DisasJumpType
op_mvcos(DisasContext
*s
, DisasOps
*o
)
3195 int r3
= get_field(s
->fields
, r3
);
3196 gen_helper_mvcos(cc_op
, cpu_env
, o
->addr1
, o
->in2
, regs
[r3
]);
3201 #ifndef CONFIG_USER_ONLY
3202 static DisasJumpType
op_mvcp(DisasContext
*s
, DisasOps
*o
)
3204 int r1
= get_field(s
->fields
, l1
);
3205 gen_helper_mvcp(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3210 static DisasJumpType
op_mvcs(DisasContext
*s
, DisasOps
*o
)
3212 int r1
= get_field(s
->fields
, l1
);
3213 gen_helper_mvcs(cc_op
, cpu_env
, regs
[r1
], o
->addr1
, o
->in2
);
3219 static DisasJumpType
op_mvn(DisasContext
*s
, DisasOps
*o
)
3221 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3222 gen_helper_mvn(cpu_env
, l
, o
->addr1
, o
->in2
);
3223 tcg_temp_free_i32(l
);
3227 static DisasJumpType
op_mvo(DisasContext
*s
, DisasOps
*o
)
3229 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3230 gen_helper_mvo(cpu_env
, l
, o
->addr1
, o
->in2
);
3231 tcg_temp_free_i32(l
);
3235 static DisasJumpType
op_mvpg(DisasContext
*s
, DisasOps
*o
)
3237 gen_helper_mvpg(cc_op
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3242 static DisasJumpType
op_mvst(DisasContext
*s
, DisasOps
*o
)
3244 gen_helper_mvst(o
->in1
, cpu_env
, regs
[0], o
->in1
, o
->in2
);
3246 return_low128(o
->in2
);
3250 static DisasJumpType
op_mvz(DisasContext
*s
, DisasOps
*o
)
3252 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3253 gen_helper_mvz(cpu_env
, l
, o
->addr1
, o
->in2
);
3254 tcg_temp_free_i32(l
);
3258 static DisasJumpType
op_mul(DisasContext
*s
, DisasOps
*o
)
3260 tcg_gen_mul_i64(o
->out
, o
->in1
, o
->in2
);
3264 static DisasJumpType
op_mul128(DisasContext
*s
, DisasOps
*o
)
3266 tcg_gen_mulu2_i64(o
->out2
, o
->out
, o
->in1
, o
->in2
);
3270 static DisasJumpType
op_meeb(DisasContext
*s
, DisasOps
*o
)
3272 gen_helper_meeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3276 static DisasJumpType
op_mdeb(DisasContext
*s
, DisasOps
*o
)
3278 gen_helper_mdeb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3282 static DisasJumpType
op_mdb(DisasContext
*s
, DisasOps
*o
)
3284 gen_helper_mdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3288 static DisasJumpType
op_mxb(DisasContext
*s
, DisasOps
*o
)
3290 gen_helper_mxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3291 return_low128(o
->out2
);
3295 static DisasJumpType
op_mxdb(DisasContext
*s
, DisasOps
*o
)
3297 gen_helper_mxdb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
3298 return_low128(o
->out2
);
3302 static DisasJumpType
op_maeb(DisasContext
*s
, DisasOps
*o
)
3304 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3305 gen_helper_maeb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3306 tcg_temp_free_i64(r3
);
3310 static DisasJumpType
op_madb(DisasContext
*s
, DisasOps
*o
)
3312 int r3
= get_field(s
->fields
, r3
);
3313 gen_helper_madb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3317 static DisasJumpType
op_mseb(DisasContext
*s
, DisasOps
*o
)
3319 TCGv_i64 r3
= load_freg32_i64(get_field(s
->fields
, r3
));
3320 gen_helper_mseb(o
->out
, cpu_env
, o
->in1
, o
->in2
, r3
);
3321 tcg_temp_free_i64(r3
);
3325 static DisasJumpType
op_msdb(DisasContext
*s
, DisasOps
*o
)
3327 int r3
= get_field(s
->fields
, r3
);
3328 gen_helper_msdb(o
->out
, cpu_env
, o
->in1
, o
->in2
, fregs
[r3
]);
3332 static DisasJumpType
op_nabs(DisasContext
*s
, DisasOps
*o
)
3335 z
= tcg_const_i64(0);
3336 n
= tcg_temp_new_i64();
3337 tcg_gen_neg_i64(n
, o
->in2
);
3338 tcg_gen_movcond_i64(TCG_COND_GE
, o
->out
, o
->in2
, z
, n
, o
->in2
);
3339 tcg_temp_free_i64(n
);
3340 tcg_temp_free_i64(z
);
3344 static DisasJumpType
op_nabsf32(DisasContext
*s
, DisasOps
*o
)
3346 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3350 static DisasJumpType
op_nabsf64(DisasContext
*s
, DisasOps
*o
)
3352 tcg_gen_ori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3356 static DisasJumpType
op_nabsf128(DisasContext
*s
, DisasOps
*o
)
3358 tcg_gen_ori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3359 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3363 static DisasJumpType
op_nc(DisasContext
*s
, DisasOps
*o
)
3365 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3366 gen_helper_nc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3367 tcg_temp_free_i32(l
);
3372 static DisasJumpType
op_neg(DisasContext
*s
, DisasOps
*o
)
3374 tcg_gen_neg_i64(o
->out
, o
->in2
);
3378 static DisasJumpType
op_negf32(DisasContext
*s
, DisasOps
*o
)
3380 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x80000000ull
);
3384 static DisasJumpType
op_negf64(DisasContext
*s
, DisasOps
*o
)
3386 tcg_gen_xori_i64(o
->out
, o
->in2
, 0x8000000000000000ull
);
3390 static DisasJumpType
op_negf128(DisasContext
*s
, DisasOps
*o
)
3392 tcg_gen_xori_i64(o
->out
, o
->in1
, 0x8000000000000000ull
);
3393 tcg_gen_mov_i64(o
->out2
, o
->in2
);
3397 static DisasJumpType
op_oc(DisasContext
*s
, DisasOps
*o
)
3399 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3400 gen_helper_oc(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
3401 tcg_temp_free_i32(l
);
3406 static DisasJumpType
op_or(DisasContext
*s
, DisasOps
*o
)
3408 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3412 static DisasJumpType
op_ori(DisasContext
*s
, DisasOps
*o
)
3414 int shift
= s
->insn
->data
& 0xff;
3415 int size
= s
->insn
->data
>> 8;
3416 uint64_t mask
= ((1ull << size
) - 1) << shift
;
3419 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
3420 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3422 /* Produce the CC from only the bits manipulated. */
3423 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3424 set_cc_nz_u64(s
, cc_dst
);
3428 static DisasJumpType
op_oi(DisasContext
*s
, DisasOps
*o
)
3430 o
->in1
= tcg_temp_new_i64();
3432 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3433 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3435 /* Perform the atomic operation in memory. */
3436 tcg_gen_atomic_fetch_or_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
3440 /* Recompute also for atomic case: needed for setting CC. */
3441 tcg_gen_or_i64(o
->out
, o
->in1
, o
->in2
);
3443 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
3444 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
3449 static DisasJumpType
op_pack(DisasContext
*s
, DisasOps
*o
)
3451 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
3452 gen_helper_pack(cpu_env
, l
, o
->addr1
, o
->in2
);
3453 tcg_temp_free_i32(l
);
3457 static DisasJumpType
op_pka(DisasContext
*s
, DisasOps
*o
)
3459 int l2
= get_field(s
->fields
, l2
) + 1;
3462 /* The length must not exceed 32 bytes. */
3464 gen_program_exception(s
, PGM_SPECIFICATION
);
3465 return DISAS_NORETURN
;
3467 l
= tcg_const_i32(l2
);
3468 gen_helper_pka(cpu_env
, o
->addr1
, o
->in2
, l
);
3469 tcg_temp_free_i32(l
);
3473 static DisasJumpType
op_pku(DisasContext
*s
, DisasOps
*o
)
3475 int l2
= get_field(s
->fields
, l2
) + 1;
3478 /* The length must be even and should not exceed 64 bytes. */
3479 if ((l2
& 1) || (l2
> 64)) {
3480 gen_program_exception(s
, PGM_SPECIFICATION
);
3481 return DISAS_NORETURN
;
3483 l
= tcg_const_i32(l2
);
3484 gen_helper_pku(cpu_env
, o
->addr1
, o
->in2
, l
);
3485 tcg_temp_free_i32(l
);
3489 static DisasJumpType
op_popcnt(DisasContext
*s
, DisasOps
*o
)
3491 gen_helper_popcnt(o
->out
, o
->in2
);
3495 #ifndef CONFIG_USER_ONLY
3496 static DisasJumpType
op_ptlb(DisasContext
*s
, DisasOps
*o
)
3498 gen_helper_ptlb(cpu_env
);
3503 static DisasJumpType
op_risbg(DisasContext
*s
, DisasOps
*o
)
3505 int i3
= get_field(s
->fields
, i3
);
3506 int i4
= get_field(s
->fields
, i4
);
3507 int i5
= get_field(s
->fields
, i5
);
3508 int do_zero
= i4
& 0x80;
3509 uint64_t mask
, imask
, pmask
;
3512 /* Adjust the arguments for the specific insn. */
3513 switch (s
->fields
->op2
) {
3514 case 0x55: /* risbg */
3515 case 0x59: /* risbgn */
3520 case 0x5d: /* risbhg */
3523 pmask
= 0xffffffff00000000ull
;
3525 case 0x51: /* risblg */
3528 pmask
= 0x00000000ffffffffull
;
3531 g_assert_not_reached();
3534 /* MASK is the set of bits to be inserted from R2.
3535 Take care for I3/I4 wraparound. */
3538 mask
^= pmask
>> i4
>> 1;
3540 mask
|= ~(pmask
>> i4
>> 1);
3544 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3545 insns, we need to keep the other half of the register. */
3546 imask
= ~mask
| ~pmask
;
3554 if (s
->fields
->op2
== 0x5d) {
3558 /* In some cases we can implement this with extract. */
3559 if (imask
== 0 && pos
== 0 && len
> 0 && len
<= rot
) {
3560 tcg_gen_extract_i64(o
->out
, o
->in2
, 64 - rot
, len
);
3564 /* In some cases we can implement this with deposit. */
3565 if (len
> 0 && (imask
== 0 || ~mask
== imask
)) {
3566 /* Note that we rotate the bits to be inserted to the lsb, not to
3567 the position as described in the PoO. */
3568 rot
= (rot
- pos
) & 63;
3573 /* Rotate the input as necessary. */
3574 tcg_gen_rotli_i64(o
->in2
, o
->in2
, rot
);
3576 /* Insert the selected bits into the output. */
3579 tcg_gen_deposit_z_i64(o
->out
, o
->in2
, pos
, len
);
3581 tcg_gen_deposit_i64(o
->out
, o
->out
, o
->in2
, pos
, len
);
3583 } else if (imask
== 0) {
3584 tcg_gen_andi_i64(o
->out
, o
->in2
, mask
);
3586 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3587 tcg_gen_andi_i64(o
->out
, o
->out
, imask
);
3588 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3593 static DisasJumpType
op_rosbg(DisasContext
*s
, DisasOps
*o
)
3595 int i3
= get_field(s
->fields
, i3
);
3596 int i4
= get_field(s
->fields
, i4
);
3597 int i5
= get_field(s
->fields
, i5
);
3600 /* If this is a test-only form, arrange to discard the result. */
3602 o
->out
= tcg_temp_new_i64();
3610 /* MASK is the set of bits to be operated on from R2.
3611 Take care for I3/I4 wraparound. */
3614 mask
^= ~0ull >> i4
>> 1;
3616 mask
|= ~(~0ull >> i4
>> 1);
3619 /* Rotate the input as necessary. */
3620 tcg_gen_rotli_i64(o
->in2
, o
->in2
, i5
);
3623 switch (s
->fields
->op2
) {
3624 case 0x55: /* AND */
3625 tcg_gen_ori_i64(o
->in2
, o
->in2
, ~mask
);
3626 tcg_gen_and_i64(o
->out
, o
->out
, o
->in2
);
3629 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3630 tcg_gen_or_i64(o
->out
, o
->out
, o
->in2
);
3632 case 0x57: /* XOR */
3633 tcg_gen_andi_i64(o
->in2
, o
->in2
, mask
);
3634 tcg_gen_xor_i64(o
->out
, o
->out
, o
->in2
);
3641 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
3642 set_cc_nz_u64(s
, cc_dst
);
3646 static DisasJumpType
op_rev16(DisasContext
*s
, DisasOps
*o
)
3648 tcg_gen_bswap16_i64(o
->out
, o
->in2
);
3652 static DisasJumpType
op_rev32(DisasContext
*s
, DisasOps
*o
)
3654 tcg_gen_bswap32_i64(o
->out
, o
->in2
);
3658 static DisasJumpType
op_rev64(DisasContext
*s
, DisasOps
*o
)
3660 tcg_gen_bswap64_i64(o
->out
, o
->in2
);
3664 static DisasJumpType
op_rll32(DisasContext
*s
, DisasOps
*o
)
3666 TCGv_i32 t1
= tcg_temp_new_i32();
3667 TCGv_i32 t2
= tcg_temp_new_i32();
3668 TCGv_i32 to
= tcg_temp_new_i32();
3669 tcg_gen_extrl_i64_i32(t1
, o
->in1
);
3670 tcg_gen_extrl_i64_i32(t2
, o
->in2
);
3671 tcg_gen_rotl_i32(to
, t1
, t2
);
3672 tcg_gen_extu_i32_i64(o
->out
, to
);
3673 tcg_temp_free_i32(t1
);
3674 tcg_temp_free_i32(t2
);
3675 tcg_temp_free_i32(to
);
3679 static DisasJumpType
op_rll64(DisasContext
*s
, DisasOps
*o
)
3681 tcg_gen_rotl_i64(o
->out
, o
->in1
, o
->in2
);
3685 #ifndef CONFIG_USER_ONLY
3686 static DisasJumpType
op_rrbe(DisasContext
*s
, DisasOps
*o
)
3688 gen_helper_rrbe(cc_op
, cpu_env
, o
->in2
);
3693 static DisasJumpType
op_sacf(DisasContext
*s
, DisasOps
*o
)
3695 gen_helper_sacf(cpu_env
, o
->in2
);
3696 /* Addressing mode has changed, so end the block. */
3697 return DISAS_PC_STALE
;
3701 static DisasJumpType
op_sam(DisasContext
*s
, DisasOps
*o
)
3703 int sam
= s
->insn
->data
;
3719 /* Bizarre but true, we check the address of the current insn for the
3720 specification exception, not the next to be executed. Thus the PoO
3721 documents that Bad Things Happen two bytes before the end. */
3722 if (s
->base
.pc_next
& ~mask
) {
3723 gen_program_exception(s
, PGM_SPECIFICATION
);
3724 return DISAS_NORETURN
;
3728 tsam
= tcg_const_i64(sam
);
3729 tcg_gen_deposit_i64(psw_mask
, psw_mask
, tsam
, 31, 2);
3730 tcg_temp_free_i64(tsam
);
3732 /* Always exit the TB, since we (may have) changed execution mode. */
3733 return DISAS_PC_STALE
;
3736 static DisasJumpType
op_sar(DisasContext
*s
, DisasOps
*o
)
3738 int r1
= get_field(s
->fields
, r1
);
3739 tcg_gen_st32_i64(o
->in2
, cpu_env
, offsetof(CPUS390XState
, aregs
[r1
]));
3743 static DisasJumpType
op_seb(DisasContext
*s
, DisasOps
*o
)
3745 gen_helper_seb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3749 static DisasJumpType
op_sdb(DisasContext
*s
, DisasOps
*o
)
3751 gen_helper_sdb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3755 static DisasJumpType
op_sxb(DisasContext
*s
, DisasOps
*o
)
3757 gen_helper_sxb(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in1
, o
->in2
);
3758 return_low128(o
->out2
);
3762 static DisasJumpType
op_sqeb(DisasContext
*s
, DisasOps
*o
)
3764 gen_helper_sqeb(o
->out
, cpu_env
, o
->in2
);
3768 static DisasJumpType
op_sqdb(DisasContext
*s
, DisasOps
*o
)
3770 gen_helper_sqdb(o
->out
, cpu_env
, o
->in2
);
3774 static DisasJumpType
op_sqxb(DisasContext
*s
, DisasOps
*o
)
3776 gen_helper_sqxb(o
->out
, cpu_env
, o
->in1
, o
->in2
);
3777 return_low128(o
->out2
);
3781 #ifndef CONFIG_USER_ONLY
3782 static DisasJumpType
op_servc(DisasContext
*s
, DisasOps
*o
)
3784 gen_helper_servc(cc_op
, cpu_env
, o
->in2
, o
->in1
);
3789 static DisasJumpType
op_sigp(DisasContext
*s
, DisasOps
*o
)
3791 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
3792 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
3793 gen_helper_sigp(cc_op
, cpu_env
, o
->in2
, r1
, r3
);
3795 tcg_temp_free_i32(r1
);
3796 tcg_temp_free_i32(r3
);
3801 static DisasJumpType
op_soc(DisasContext
*s
, DisasOps
*o
)
3808 disas_jcc(s
, &c
, get_field(s
->fields
, m3
));
3810 /* We want to store when the condition is fulfilled, so branch
3811 out when it's not */
3812 c
.cond
= tcg_invert_cond(c
.cond
);
3814 lab
= gen_new_label();
3816 tcg_gen_brcond_i64(c
.cond
, c
.u
.s64
.a
, c
.u
.s64
.b
, lab
);
3818 tcg_gen_brcond_i32(c
.cond
, c
.u
.s32
.a
, c
.u
.s32
.b
, lab
);
3822 r1
= get_field(s
->fields
, r1
);
3823 a
= get_address(s
, 0, get_field(s
->fields
, b2
), get_field(s
->fields
, d2
));
3824 switch (s
->insn
->data
) {
3826 tcg_gen_qemu_st64(regs
[r1
], a
, get_mem_index(s
));
3829 tcg_gen_qemu_st32(regs
[r1
], a
, get_mem_index(s
));
3831 case 2: /* STOCFH */
3832 h
= tcg_temp_new_i64();
3833 tcg_gen_shri_i64(h
, regs
[r1
], 32);
3834 tcg_gen_qemu_st32(h
, a
, get_mem_index(s
));
3835 tcg_temp_free_i64(h
);
3838 g_assert_not_reached();
3840 tcg_temp_free_i64(a
);
3846 static DisasJumpType
op_sla(DisasContext
*s
, DisasOps
*o
)
3848 uint64_t sign
= 1ull << s
->insn
->data
;
3849 enum cc_op cco
= s
->insn
->data
== 31 ? CC_OP_SLA_32
: CC_OP_SLA_64
;
3850 gen_op_update2_cc_i64(s
, cco
, o
->in1
, o
->in2
);
3851 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3852 /* The arithmetic left shift is curious in that it does not affect
3853 the sign bit. Copy that over from the source unchanged. */
3854 tcg_gen_andi_i64(o
->out
, o
->out
, ~sign
);
3855 tcg_gen_andi_i64(o
->in1
, o
->in1
, sign
);
3856 tcg_gen_or_i64(o
->out
, o
->out
, o
->in1
);
3860 static DisasJumpType
op_sll(DisasContext
*s
, DisasOps
*o
)
3862 tcg_gen_shl_i64(o
->out
, o
->in1
, o
->in2
);
3866 static DisasJumpType
op_sra(DisasContext
*s
, DisasOps
*o
)
3868 tcg_gen_sar_i64(o
->out
, o
->in1
, o
->in2
);
3872 static DisasJumpType
op_srl(DisasContext
*s
, DisasOps
*o
)
3874 tcg_gen_shr_i64(o
->out
, o
->in1
, o
->in2
);
3878 static DisasJumpType
op_sfpc(DisasContext
*s
, DisasOps
*o
)
3880 gen_helper_sfpc(cpu_env
, o
->in2
);
3884 static DisasJumpType
op_sfas(DisasContext
*s
, DisasOps
*o
)
3886 gen_helper_sfas(cpu_env
, o
->in2
);
3890 static DisasJumpType
op_srnm(DisasContext
*s
, DisasOps
*o
)
3892 int b2
= get_field(s
->fields
, b2
);
3893 int d2
= get_field(s
->fields
, d2
);
3894 TCGv_i64 t1
= tcg_temp_new_i64();
3895 TCGv_i64 t2
= tcg_temp_new_i64();
3898 switch (s
->fields
->op2
) {
3899 case 0x99: /* SRNM */
3902 case 0xb8: /* SRNMB */
3905 case 0xb9: /* SRNMT */
3911 mask
= (1 << len
) - 1;
3913 /* Insert the value into the appropriate field of the FPC. */
3915 tcg_gen_movi_i64(t1
, d2
& mask
);
3917 tcg_gen_addi_i64(t1
, regs
[b2
], d2
);
3918 tcg_gen_andi_i64(t1
, t1
, mask
);
3920 tcg_gen_ld32u_i64(t2
, cpu_env
, offsetof(CPUS390XState
, fpc
));
3921 tcg_gen_deposit_i64(t2
, t2
, t1
, pos
, len
);
3922 tcg_temp_free_i64(t1
);
3924 /* Then install the new FPC to set the rounding mode in fpu_status. */
3925 gen_helper_sfpc(cpu_env
, t2
);
3926 tcg_temp_free_i64(t2
);
3930 static DisasJumpType
op_spm(DisasContext
*s
, DisasOps
*o
)
3932 tcg_gen_extrl_i64_i32(cc_op
, o
->in1
);
3933 tcg_gen_extract_i32(cc_op
, cc_op
, 28, 2);
3936 tcg_gen_shri_i64(o
->in1
, o
->in1
, 24);
3937 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in1
, PSW_SHIFT_MASK_PM
, 4);
3941 static DisasJumpType
op_ectg(DisasContext
*s
, DisasOps
*o
)
3943 int b1
= get_field(s
->fields
, b1
);
3944 int d1
= get_field(s
->fields
, d1
);
3945 int b2
= get_field(s
->fields
, b2
);
3946 int d2
= get_field(s
->fields
, d2
);
3947 int r3
= get_field(s
->fields
, r3
);
3948 TCGv_i64 tmp
= tcg_temp_new_i64();
3950 /* fetch all operands first */
3951 o
->in1
= tcg_temp_new_i64();
3952 tcg_gen_addi_i64(o
->in1
, regs
[b1
], d1
);
3953 o
->in2
= tcg_temp_new_i64();
3954 tcg_gen_addi_i64(o
->in2
, regs
[b2
], d2
);
3955 o
->addr1
= get_address(s
, 0, r3
, 0);
3957 /* load the third operand into r3 before modifying anything */
3958 tcg_gen_qemu_ld64(regs
[r3
], o
->addr1
, get_mem_index(s
));
3960 /* subtract CPU timer from first operand and store in GR0 */
3961 gen_helper_stpt(tmp
, cpu_env
);
3962 tcg_gen_sub_i64(regs
[0], o
->in1
, tmp
);
3964 /* store second operand in GR1 */
3965 tcg_gen_mov_i64(regs
[1], o
->in2
);
3967 tcg_temp_free_i64(tmp
);
3971 #ifndef CONFIG_USER_ONLY
3972 static DisasJumpType
op_spka(DisasContext
*s
, DisasOps
*o
)
3974 tcg_gen_shri_i64(o
->in2
, o
->in2
, 4);
3975 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, PSW_SHIFT_KEY
, 4);
3979 static DisasJumpType
op_sske(DisasContext
*s
, DisasOps
*o
)
3981 gen_helper_sske(cpu_env
, o
->in1
, o
->in2
);
3985 static DisasJumpType
op_ssm(DisasContext
*s
, DisasOps
*o
)
3987 tcg_gen_deposit_i64(psw_mask
, psw_mask
, o
->in2
, 56, 8);
3988 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3989 return DISAS_PC_STALE_NOCHAIN
;
3992 static DisasJumpType
op_stap(DisasContext
*s
, DisasOps
*o
)
3994 tcg_gen_ld32u_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, core_id
));
3998 static DisasJumpType
op_stck(DisasContext
*s
, DisasOps
*o
)
4000 gen_helper_stck(o
->out
, cpu_env
);
4001 /* ??? We don't implement clock states. */
4002 gen_op_movi_cc(s
, 0);
4006 static DisasJumpType
op_stcke(DisasContext
*s
, DisasOps
*o
)
4008 TCGv_i64 c1
= tcg_temp_new_i64();
4009 TCGv_i64 c2
= tcg_temp_new_i64();
4010 TCGv_i64 todpr
= tcg_temp_new_i64();
4011 gen_helper_stck(c1
, cpu_env
);
4012 /* 16 bit value store in an uint32_t (only valid bits set) */
4013 tcg_gen_ld32u_i64(todpr
, cpu_env
, offsetof(CPUS390XState
, todpr
));
4014 /* Shift the 64-bit value into its place as a zero-extended
4015 104-bit value. Note that "bit positions 64-103 are always
4016 non-zero so that they compare differently to STCK"; we set
4017 the least significant bit to 1. */
4018 tcg_gen_shli_i64(c2
, c1
, 56);
4019 tcg_gen_shri_i64(c1
, c1
, 8);
4020 tcg_gen_ori_i64(c2
, c2
, 0x10000);
4021 tcg_gen_or_i64(c2
, c2
, todpr
);
4022 tcg_gen_qemu_st64(c1
, o
->in2
, get_mem_index(s
));
4023 tcg_gen_addi_i64(o
->in2
, o
->in2
, 8);
4024 tcg_gen_qemu_st64(c2
, o
->in2
, get_mem_index(s
));
4025 tcg_temp_free_i64(c1
);
4026 tcg_temp_free_i64(c2
);
4027 tcg_temp_free_i64(todpr
);
4028 /* ??? We don't implement clock states. */
4029 gen_op_movi_cc(s
, 0);
4033 static DisasJumpType
op_sck(DisasContext
*s
, DisasOps
*o
)
4035 tcg_gen_qemu_ld_i64(o
->in1
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
4036 gen_helper_sck(cc_op
, cpu_env
, o
->in1
);
4041 static DisasJumpType
op_sckc(DisasContext
*s
, DisasOps
*o
)
4043 gen_helper_sckc(cpu_env
, o
->in2
);
4047 static DisasJumpType
op_sckpf(DisasContext
*s
, DisasOps
*o
)
4049 gen_helper_sckpf(cpu_env
, regs
[0]);
4053 static DisasJumpType
op_stckc(DisasContext
*s
, DisasOps
*o
)
4055 gen_helper_stckc(o
->out
, cpu_env
);
4059 static DisasJumpType
op_stctg(DisasContext
*s
, DisasOps
*o
)
4061 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4062 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4063 gen_helper_stctg(cpu_env
, r1
, o
->in2
, r3
);
4064 tcg_temp_free_i32(r1
);
4065 tcg_temp_free_i32(r3
);
4069 static DisasJumpType
op_stctl(DisasContext
*s
, DisasOps
*o
)
4071 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4072 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4073 gen_helper_stctl(cpu_env
, r1
, o
->in2
, r3
);
4074 tcg_temp_free_i32(r1
);
4075 tcg_temp_free_i32(r3
);
4079 static DisasJumpType
op_stidp(DisasContext
*s
, DisasOps
*o
)
4081 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, cpuid
));
4085 static DisasJumpType
op_spt(DisasContext
*s
, DisasOps
*o
)
4087 gen_helper_spt(cpu_env
, o
->in2
);
4091 static DisasJumpType
op_stfl(DisasContext
*s
, DisasOps
*o
)
4093 gen_helper_stfl(cpu_env
);
4097 static DisasJumpType
op_stpt(DisasContext
*s
, DisasOps
*o
)
4099 gen_helper_stpt(o
->out
, cpu_env
);
4103 static DisasJumpType
op_stsi(DisasContext
*s
, DisasOps
*o
)
4105 gen_helper_stsi(cc_op
, cpu_env
, o
->in2
, regs
[0], regs
[1]);
4110 static DisasJumpType
op_spx(DisasContext
*s
, DisasOps
*o
)
4112 gen_helper_spx(cpu_env
, o
->in2
);
4116 static DisasJumpType
op_xsch(DisasContext
*s
, DisasOps
*o
)
4118 gen_helper_xsch(cpu_env
, regs
[1]);
4123 static DisasJumpType
op_csch(DisasContext
*s
, DisasOps
*o
)
4125 gen_helper_csch(cpu_env
, regs
[1]);
4130 static DisasJumpType
op_hsch(DisasContext
*s
, DisasOps
*o
)
4132 gen_helper_hsch(cpu_env
, regs
[1]);
4137 static DisasJumpType
op_msch(DisasContext
*s
, DisasOps
*o
)
4139 gen_helper_msch(cpu_env
, regs
[1], o
->in2
);
4144 static DisasJumpType
op_rchp(DisasContext
*s
, DisasOps
*o
)
4146 gen_helper_rchp(cpu_env
, regs
[1]);
4151 static DisasJumpType
op_rsch(DisasContext
*s
, DisasOps
*o
)
4153 gen_helper_rsch(cpu_env
, regs
[1]);
4158 static DisasJumpType
op_sal(DisasContext
*s
, DisasOps
*o
)
4160 gen_helper_sal(cpu_env
, regs
[1]);
4164 static DisasJumpType
op_schm(DisasContext
*s
, DisasOps
*o
)
4166 gen_helper_schm(cpu_env
, regs
[1], regs
[2], o
->in2
);
4170 static DisasJumpType
op_siga(DisasContext
*s
, DisasOps
*o
)
4172 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4173 gen_op_movi_cc(s
, 3);
4177 static DisasJumpType
op_stcps(DisasContext
*s
, DisasOps
*o
)
4179 /* The instruction is suppressed if not provided. */
4183 static DisasJumpType
op_ssch(DisasContext
*s
, DisasOps
*o
)
4185 gen_helper_ssch(cpu_env
, regs
[1], o
->in2
);
4190 static DisasJumpType
op_stsch(DisasContext
*s
, DisasOps
*o
)
4192 gen_helper_stsch(cpu_env
, regs
[1], o
->in2
);
4197 static DisasJumpType
op_stcrw(DisasContext
*s
, DisasOps
*o
)
4199 gen_helper_stcrw(cpu_env
, o
->in2
);
4204 static DisasJumpType
op_tpi(DisasContext
*s
, DisasOps
*o
)
4206 gen_helper_tpi(cc_op
, cpu_env
, o
->addr1
);
4211 static DisasJumpType
op_tsch(DisasContext
*s
, DisasOps
*o
)
4213 gen_helper_tsch(cpu_env
, regs
[1], o
->in2
);
4218 static DisasJumpType
op_chsc(DisasContext
*s
, DisasOps
*o
)
4220 gen_helper_chsc(cpu_env
, o
->in2
);
4225 static DisasJumpType
op_stpx(DisasContext
*s
, DisasOps
*o
)
4227 tcg_gen_ld_i64(o
->out
, cpu_env
, offsetof(CPUS390XState
, psa
));
4228 tcg_gen_andi_i64(o
->out
, o
->out
, 0x7fffe000);
4232 static DisasJumpType
op_stnosm(DisasContext
*s
, DisasOps
*o
)
4234 uint64_t i2
= get_field(s
->fields
, i2
);
4237 /* It is important to do what the instruction name says: STORE THEN.
4238 If we let the output hook perform the store then if we fault and
4239 restart, we'll have the wrong SYSTEM MASK in place. */
4240 t
= tcg_temp_new_i64();
4241 tcg_gen_shri_i64(t
, psw_mask
, 56);
4242 tcg_gen_qemu_st8(t
, o
->addr1
, get_mem_index(s
));
4243 tcg_temp_free_i64(t
);
4245 if (s
->fields
->op
== 0xac) {
4246 tcg_gen_andi_i64(psw_mask
, psw_mask
,
4247 (i2
<< 56) | 0x00ffffffffffffffull
);
4249 tcg_gen_ori_i64(psw_mask
, psw_mask
, i2
<< 56);
4252 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4253 return DISAS_PC_STALE_NOCHAIN
;
4256 static DisasJumpType
op_stura(DisasContext
*s
, DisasOps
*o
)
4258 gen_helper_stura(cpu_env
, o
->in2
, o
->in1
);
4262 static DisasJumpType
op_sturg(DisasContext
*s
, DisasOps
*o
)
4264 gen_helper_sturg(cpu_env
, o
->in2
, o
->in1
);
4269 static DisasJumpType
op_stfle(DisasContext
*s
, DisasOps
*o
)
4271 gen_helper_stfle(cc_op
, cpu_env
, o
->in2
);
4276 static DisasJumpType
op_st8(DisasContext
*s
, DisasOps
*o
)
4278 tcg_gen_qemu_st8(o
->in1
, o
->in2
, get_mem_index(s
));
4282 static DisasJumpType
op_st16(DisasContext
*s
, DisasOps
*o
)
4284 tcg_gen_qemu_st16(o
->in1
, o
->in2
, get_mem_index(s
));
4288 static DisasJumpType
op_st32(DisasContext
*s
, DisasOps
*o
)
4290 tcg_gen_qemu_st32(o
->in1
, o
->in2
, get_mem_index(s
));
4294 static DisasJumpType
op_st64(DisasContext
*s
, DisasOps
*o
)
4296 tcg_gen_qemu_st64(o
->in1
, o
->in2
, get_mem_index(s
));
4300 static DisasJumpType
op_stam(DisasContext
*s
, DisasOps
*o
)
4302 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4303 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4304 gen_helper_stam(cpu_env
, r1
, o
->in2
, r3
);
4305 tcg_temp_free_i32(r1
);
4306 tcg_temp_free_i32(r3
);
4310 static DisasJumpType
op_stcm(DisasContext
*s
, DisasOps
*o
)
4312 int m3
= get_field(s
->fields
, m3
);
4313 int pos
, base
= s
->insn
->data
;
4314 TCGv_i64 tmp
= tcg_temp_new_i64();
4316 pos
= base
+ ctz32(m3
) * 8;
4319 /* Effectively a 32-bit store. */
4320 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4321 tcg_gen_qemu_st32(tmp
, o
->in2
, get_mem_index(s
));
4327 /* Effectively a 16-bit store. */
4328 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4329 tcg_gen_qemu_st16(tmp
, o
->in2
, get_mem_index(s
));
4336 /* Effectively an 8-bit store. */
4337 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4338 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4342 /* This is going to be a sequence of shifts and stores. */
4343 pos
= base
+ 32 - 8;
4346 tcg_gen_shri_i64(tmp
, o
->in1
, pos
);
4347 tcg_gen_qemu_st8(tmp
, o
->in2
, get_mem_index(s
));
4348 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
4350 m3
= (m3
<< 1) & 0xf;
4355 tcg_temp_free_i64(tmp
);
4359 static DisasJumpType
op_stm(DisasContext
*s
, DisasOps
*o
)
4361 int r1
= get_field(s
->fields
, r1
);
4362 int r3
= get_field(s
->fields
, r3
);
4363 int size
= s
->insn
->data
;
4364 TCGv_i64 tsize
= tcg_const_i64(size
);
4368 tcg_gen_qemu_st64(regs
[r1
], o
->in2
, get_mem_index(s
));
4370 tcg_gen_qemu_st32(regs
[r1
], o
->in2
, get_mem_index(s
));
4375 tcg_gen_add_i64(o
->in2
, o
->in2
, tsize
);
4379 tcg_temp_free_i64(tsize
);
4383 static DisasJumpType
op_stmh(DisasContext
*s
, DisasOps
*o
)
4385 int r1
= get_field(s
->fields
, r1
);
4386 int r3
= get_field(s
->fields
, r3
);
4387 TCGv_i64 t
= tcg_temp_new_i64();
4388 TCGv_i64 t4
= tcg_const_i64(4);
4389 TCGv_i64 t32
= tcg_const_i64(32);
4392 tcg_gen_shl_i64(t
, regs
[r1
], t32
);
4393 tcg_gen_qemu_st32(t
, o
->in2
, get_mem_index(s
));
4397 tcg_gen_add_i64(o
->in2
, o
->in2
, t4
);
4401 tcg_temp_free_i64(t
);
4402 tcg_temp_free_i64(t4
);
4403 tcg_temp_free_i64(t32
);
4407 static DisasJumpType
op_stpq(DisasContext
*s
, DisasOps
*o
)
4409 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
4410 gen_helper_stpq_parallel(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4412 gen_helper_stpq(cpu_env
, o
->in2
, o
->out2
, o
->out
);
4417 static DisasJumpType
op_srst(DisasContext
*s
, DisasOps
*o
)
4419 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4420 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4422 gen_helper_srst(cpu_env
, r1
, r2
);
4424 tcg_temp_free_i32(r1
);
4425 tcg_temp_free_i32(r2
);
4430 static DisasJumpType
op_srstu(DisasContext
*s
, DisasOps
*o
)
4432 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4433 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4435 gen_helper_srstu(cpu_env
, r1
, r2
);
4437 tcg_temp_free_i32(r1
);
4438 tcg_temp_free_i32(r2
);
4443 static DisasJumpType
op_sub(DisasContext
*s
, DisasOps
*o
)
4445 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4449 static DisasJumpType
op_subb(DisasContext
*s
, DisasOps
*o
)
4454 tcg_gen_sub_i64(o
->out
, o
->in1
, o
->in2
);
4456 /* The !borrow flag is the msb of CC. Since we want the inverse of
4457 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4458 disas_jcc(s
, &cmp
, 8 | 4);
4459 borrow
= tcg_temp_new_i64();
4461 tcg_gen_setcond_i64(cmp
.cond
, borrow
, cmp
.u
.s64
.a
, cmp
.u
.s64
.b
);
4463 TCGv_i32 t
= tcg_temp_new_i32();
4464 tcg_gen_setcond_i32(cmp
.cond
, t
, cmp
.u
.s32
.a
, cmp
.u
.s32
.b
);
4465 tcg_gen_extu_i32_i64(borrow
, t
);
4466 tcg_temp_free_i32(t
);
4470 tcg_gen_sub_i64(o
->out
, o
->out
, borrow
);
4471 tcg_temp_free_i64(borrow
);
4475 static DisasJumpType
op_svc(DisasContext
*s
, DisasOps
*o
)
4482 t
= tcg_const_i32(get_field(s
->fields
, i1
) & 0xff);
4483 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_code
));
4484 tcg_temp_free_i32(t
);
4486 t
= tcg_const_i32(s
->ilen
);
4487 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUS390XState
, int_svc_ilen
));
4488 tcg_temp_free_i32(t
);
4490 gen_exception(EXCP_SVC
);
4491 return DISAS_NORETURN
;
4494 static DisasJumpType
op_tam(DisasContext
*s
, DisasOps
*o
)
4498 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_64
) ? 2 : 0;
4499 cc
|= (s
->base
.tb
->flags
& FLAG_MASK_32
) ? 1 : 0;
4500 gen_op_movi_cc(s
, cc
);
4504 static DisasJumpType
op_tceb(DisasContext
*s
, DisasOps
*o
)
4506 gen_helper_tceb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4511 static DisasJumpType
op_tcdb(DisasContext
*s
, DisasOps
*o
)
4513 gen_helper_tcdb(cc_op
, cpu_env
, o
->in1
, o
->in2
);
4518 static DisasJumpType
op_tcxb(DisasContext
*s
, DisasOps
*o
)
4520 gen_helper_tcxb(cc_op
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4525 #ifndef CONFIG_USER_ONLY
4527 static DisasJumpType
op_testblock(DisasContext
*s
, DisasOps
*o
)
4529 gen_helper_testblock(cc_op
, cpu_env
, o
->in2
);
4534 static DisasJumpType
op_tprot(DisasContext
*s
, DisasOps
*o
)
4536 gen_helper_tprot(cc_op
, cpu_env
, o
->addr1
, o
->in2
);
4543 static DisasJumpType
op_tp(DisasContext
*s
, DisasOps
*o
)
4545 TCGv_i32 l1
= tcg_const_i32(get_field(s
->fields
, l1
) + 1);
4546 gen_helper_tp(cc_op
, cpu_env
, o
->addr1
, l1
);
4547 tcg_temp_free_i32(l1
);
4552 static DisasJumpType
op_tr(DisasContext
*s
, DisasOps
*o
)
4554 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4555 gen_helper_tr(cpu_env
, l
, o
->addr1
, o
->in2
);
4556 tcg_temp_free_i32(l
);
4561 static DisasJumpType
op_tre(DisasContext
*s
, DisasOps
*o
)
4563 gen_helper_tre(o
->out
, cpu_env
, o
->out
, o
->out2
, o
->in2
);
4564 return_low128(o
->out2
);
4569 static DisasJumpType
op_trt(DisasContext
*s
, DisasOps
*o
)
4571 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4572 gen_helper_trt(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4573 tcg_temp_free_i32(l
);
4578 static DisasJumpType
op_trtr(DisasContext
*s
, DisasOps
*o
)
4580 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4581 gen_helper_trtr(cc_op
, cpu_env
, l
, o
->addr1
, o
->in2
);
4582 tcg_temp_free_i32(l
);
4587 static DisasJumpType
op_trXX(DisasContext
*s
, DisasOps
*o
)
4589 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4590 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4591 TCGv_i32 sizes
= tcg_const_i32(s
->insn
->opc
& 3);
4592 TCGv_i32 tst
= tcg_temp_new_i32();
4593 int m3
= get_field(s
->fields
, m3
);
4595 if (!s390_has_feat(S390_FEAT_ETF2_ENH
)) {
4599 tcg_gen_movi_i32(tst
, -1);
4601 tcg_gen_extrl_i64_i32(tst
, regs
[0]);
4602 if (s
->insn
->opc
& 3) {
4603 tcg_gen_ext8u_i32(tst
, tst
);
4605 tcg_gen_ext16u_i32(tst
, tst
);
4608 gen_helper_trXX(cc_op
, cpu_env
, r1
, r2
, tst
, sizes
);
4610 tcg_temp_free_i32(r1
);
4611 tcg_temp_free_i32(r2
);
4612 tcg_temp_free_i32(sizes
);
4613 tcg_temp_free_i32(tst
);
4618 static DisasJumpType
op_ts(DisasContext
*s
, DisasOps
*o
)
4620 TCGv_i32 t1
= tcg_const_i32(0xff);
4621 tcg_gen_atomic_xchg_i32(t1
, o
->in2
, t1
, get_mem_index(s
), MO_UB
);
4622 tcg_gen_extract_i32(cc_op
, t1
, 7, 1);
4623 tcg_temp_free_i32(t1
);
4628 static DisasJumpType
op_unpk(DisasContext
*s
, DisasOps
*o
)
4630 TCGv_i32 l
= tcg_const_i32(get_field(s
->fields
, l1
));
4631 gen_helper_unpk(cpu_env
, l
, o
->addr1
, o
->in2
);
4632 tcg_temp_free_i32(l
);
4636 static DisasJumpType
op_unpka(DisasContext
*s
, DisasOps
*o
)
4638 int l1
= get_field(s
->fields
, l1
) + 1;
4641 /* The length must not exceed 32 bytes. */
4643 gen_program_exception(s
, PGM_SPECIFICATION
);
4644 return DISAS_NORETURN
;
4646 l
= tcg_const_i32(l1
);
4647 gen_helper_unpka(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4648 tcg_temp_free_i32(l
);
4653 static DisasJumpType
op_unpku(DisasContext
*s
, DisasOps
*o
)
4655 int l1
= get_field(s
->fields
, l1
) + 1;
4658 /* The length must be even and should not exceed 64 bytes. */
4659 if ((l1
& 1) || (l1
> 64)) {
4660 gen_program_exception(s
, PGM_SPECIFICATION
);
4661 return DISAS_NORETURN
;
4663 l
= tcg_const_i32(l1
);
4664 gen_helper_unpku(cc_op
, cpu_env
, o
->addr1
, l
, o
->in2
);
4665 tcg_temp_free_i32(l
);
4671 static DisasJumpType
op_xc(DisasContext
*s
, DisasOps
*o
)
4673 int d1
= get_field(s
->fields
, d1
);
4674 int d2
= get_field(s
->fields
, d2
);
4675 int b1
= get_field(s
->fields
, b1
);
4676 int b2
= get_field(s
->fields
, b2
);
4677 int l
= get_field(s
->fields
, l1
);
4680 o
->addr1
= get_address(s
, 0, b1
, d1
);
4682 /* If the addresses are identical, this is a store/memset of zero. */
4683 if (b1
== b2
&& d1
== d2
&& (l
+ 1) <= 32) {
4684 o
->in2
= tcg_const_i64(0);
4688 tcg_gen_qemu_st64(o
->in2
, o
->addr1
, get_mem_index(s
));
4691 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 8);
4695 tcg_gen_qemu_st32(o
->in2
, o
->addr1
, get_mem_index(s
));
4698 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 4);
4702 tcg_gen_qemu_st16(o
->in2
, o
->addr1
, get_mem_index(s
));
4705 tcg_gen_addi_i64(o
->addr1
, o
->addr1
, 2);
4709 tcg_gen_qemu_st8(o
->in2
, o
->addr1
, get_mem_index(s
));
4711 gen_op_movi_cc(s
, 0);
4715 /* But in general we'll defer to a helper. */
4716 o
->in2
= get_address(s
, 0, b2
, d2
);
4717 t32
= tcg_const_i32(l
);
4718 gen_helper_xc(cc_op
, cpu_env
, t32
, o
->addr1
, o
->in2
);
4719 tcg_temp_free_i32(t32
);
4724 static DisasJumpType
op_xor(DisasContext
*s
, DisasOps
*o
)
4726 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4730 static DisasJumpType
op_xori(DisasContext
*s
, DisasOps
*o
)
4732 int shift
= s
->insn
->data
& 0xff;
4733 int size
= s
->insn
->data
>> 8;
4734 uint64_t mask
= ((1ull << size
) - 1) << shift
;
4737 tcg_gen_shli_i64(o
->in2
, o
->in2
, shift
);
4738 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4740 /* Produce the CC from only the bits manipulated. */
4741 tcg_gen_andi_i64(cc_dst
, o
->out
, mask
);
4742 set_cc_nz_u64(s
, cc_dst
);
4746 static DisasJumpType
op_xi(DisasContext
*s
, DisasOps
*o
)
4748 o
->in1
= tcg_temp_new_i64();
4750 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4751 tcg_gen_qemu_ld_tl(o
->in1
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4753 /* Perform the atomic operation in memory. */
4754 tcg_gen_atomic_fetch_xor_i64(o
->in1
, o
->addr1
, o
->in2
, get_mem_index(s
),
4758 /* Recompute also for atomic case: needed for setting CC. */
4759 tcg_gen_xor_i64(o
->out
, o
->in1
, o
->in2
);
4761 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2
)) {
4762 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), s
->insn
->data
);
4767 static DisasJumpType
op_zero(DisasContext
*s
, DisasOps
*o
)
4769 o
->out
= tcg_const_i64(0);
4773 static DisasJumpType
op_zero2(DisasContext
*s
, DisasOps
*o
)
4775 o
->out
= tcg_const_i64(0);
4781 #ifndef CONFIG_USER_ONLY
4782 static DisasJumpType
op_clp(DisasContext
*s
, DisasOps
*o
)
4784 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4786 gen_helper_clp(cpu_env
, r2
);
4787 tcg_temp_free_i32(r2
);
4792 static DisasJumpType
op_pcilg(DisasContext
*s
, DisasOps
*o
)
4794 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4795 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4797 gen_helper_pcilg(cpu_env
, r1
, r2
);
4798 tcg_temp_free_i32(r1
);
4799 tcg_temp_free_i32(r2
);
4804 static DisasJumpType
op_pcistg(DisasContext
*s
, DisasOps
*o
)
4806 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4807 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4809 gen_helper_pcistg(cpu_env
, r1
, r2
);
4810 tcg_temp_free_i32(r1
);
4811 tcg_temp_free_i32(r2
);
4816 static DisasJumpType
op_stpcifc(DisasContext
*s
, DisasOps
*o
)
4818 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4819 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4821 gen_helper_stpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4822 tcg_temp_free_i32(ar
);
4823 tcg_temp_free_i32(r1
);
4828 static DisasJumpType
op_sic(DisasContext
*s
, DisasOps
*o
)
4830 gen_helper_sic(cpu_env
, o
->in1
, o
->in2
);
4834 static DisasJumpType
op_rpcit(DisasContext
*s
, DisasOps
*o
)
4836 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4837 TCGv_i32 r2
= tcg_const_i32(get_field(s
->fields
, r2
));
4839 gen_helper_rpcit(cpu_env
, r1
, r2
);
4840 tcg_temp_free_i32(r1
);
4841 tcg_temp_free_i32(r2
);
4846 static DisasJumpType
op_pcistb(DisasContext
*s
, DisasOps
*o
)
4848 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4849 TCGv_i32 r3
= tcg_const_i32(get_field(s
->fields
, r3
));
4850 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4852 gen_helper_pcistb(cpu_env
, r1
, r3
, o
->addr1
, ar
);
4853 tcg_temp_free_i32(ar
);
4854 tcg_temp_free_i32(r1
);
4855 tcg_temp_free_i32(r3
);
4860 static DisasJumpType
op_mpcifc(DisasContext
*s
, DisasOps
*o
)
4862 TCGv_i32 r1
= tcg_const_i32(get_field(s
->fields
, r1
));
4863 TCGv_i32 ar
= tcg_const_i32(get_field(s
->fields
, b2
));
4865 gen_helper_mpcifc(cpu_env
, r1
, o
->addr1
, ar
);
4866 tcg_temp_free_i32(ar
);
4867 tcg_temp_free_i32(r1
);
4873 /* ====================================================================== */
4874 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4875 the original inputs), update the various cc data structures in order to
4876 be able to compute the new condition code. */
4878 static void cout_abs32(DisasContext
*s
, DisasOps
*o
)
4880 gen_op_update1_cc_i64(s
, CC_OP_ABS_32
, o
->out
);
4883 static void cout_abs64(DisasContext
*s
, DisasOps
*o
)
4885 gen_op_update1_cc_i64(s
, CC_OP_ABS_64
, o
->out
);
4888 static void cout_adds32(DisasContext
*s
, DisasOps
*o
)
4890 gen_op_update3_cc_i64(s
, CC_OP_ADD_32
, o
->in1
, o
->in2
, o
->out
);
4893 static void cout_adds64(DisasContext
*s
, DisasOps
*o
)
4895 gen_op_update3_cc_i64(s
, CC_OP_ADD_64
, o
->in1
, o
->in2
, o
->out
);
4898 static void cout_addu32(DisasContext
*s
, DisasOps
*o
)
4900 gen_op_update3_cc_i64(s
, CC_OP_ADDU_32
, o
->in1
, o
->in2
, o
->out
);
4903 static void cout_addu64(DisasContext
*s
, DisasOps
*o
)
4905 gen_op_update3_cc_i64(s
, CC_OP_ADDU_64
, o
->in1
, o
->in2
, o
->out
);
4908 static void cout_addc32(DisasContext
*s
, DisasOps
*o
)
4910 gen_op_update3_cc_i64(s
, CC_OP_ADDC_32
, o
->in1
, o
->in2
, o
->out
);
4913 static void cout_addc64(DisasContext
*s
, DisasOps
*o
)
4915 gen_op_update3_cc_i64(s
, CC_OP_ADDC_64
, o
->in1
, o
->in2
, o
->out
);
4918 static void cout_cmps32(DisasContext
*s
, DisasOps
*o
)
4920 gen_op_update2_cc_i64(s
, CC_OP_LTGT_32
, o
->in1
, o
->in2
);
4923 static void cout_cmps64(DisasContext
*s
, DisasOps
*o
)
4925 gen_op_update2_cc_i64(s
, CC_OP_LTGT_64
, o
->in1
, o
->in2
);
4928 static void cout_cmpu32(DisasContext
*s
, DisasOps
*o
)
4930 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_32
, o
->in1
, o
->in2
);
4933 static void cout_cmpu64(DisasContext
*s
, DisasOps
*o
)
4935 gen_op_update2_cc_i64(s
, CC_OP_LTUGTU_64
, o
->in1
, o
->in2
);
4938 static void cout_f32(DisasContext
*s
, DisasOps
*o
)
4940 gen_op_update1_cc_i64(s
, CC_OP_NZ_F32
, o
->out
);
4943 static void cout_f64(DisasContext
*s
, DisasOps
*o
)
4945 gen_op_update1_cc_i64(s
, CC_OP_NZ_F64
, o
->out
);
4948 static void cout_f128(DisasContext
*s
, DisasOps
*o
)
4950 gen_op_update2_cc_i64(s
, CC_OP_NZ_F128
, o
->out
, o
->out2
);
4953 static void cout_nabs32(DisasContext
*s
, DisasOps
*o
)
4955 gen_op_update1_cc_i64(s
, CC_OP_NABS_32
, o
->out
);
4958 static void cout_nabs64(DisasContext
*s
, DisasOps
*o
)
4960 gen_op_update1_cc_i64(s
, CC_OP_NABS_64
, o
->out
);
4963 static void cout_neg32(DisasContext
*s
, DisasOps
*o
)
4965 gen_op_update1_cc_i64(s
, CC_OP_COMP_32
, o
->out
);
4968 static void cout_neg64(DisasContext
*s
, DisasOps
*o
)
4970 gen_op_update1_cc_i64(s
, CC_OP_COMP_64
, o
->out
);
4973 static void cout_nz32(DisasContext
*s
, DisasOps
*o
)
4975 tcg_gen_ext32u_i64(cc_dst
, o
->out
);
4976 gen_op_update1_cc_i64(s
, CC_OP_NZ
, cc_dst
);
4979 static void cout_nz64(DisasContext
*s
, DisasOps
*o
)
4981 gen_op_update1_cc_i64(s
, CC_OP_NZ
, o
->out
);
4984 static void cout_s32(DisasContext
*s
, DisasOps
*o
)
4986 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_32
, o
->out
);
4989 static void cout_s64(DisasContext
*s
, DisasOps
*o
)
4991 gen_op_update1_cc_i64(s
, CC_OP_LTGT0_64
, o
->out
);
4994 static void cout_subs32(DisasContext
*s
, DisasOps
*o
)
4996 gen_op_update3_cc_i64(s
, CC_OP_SUB_32
, o
->in1
, o
->in2
, o
->out
);
4999 static void cout_subs64(DisasContext
*s
, DisasOps
*o
)
5001 gen_op_update3_cc_i64(s
, CC_OP_SUB_64
, o
->in1
, o
->in2
, o
->out
);
5004 static void cout_subu32(DisasContext
*s
, DisasOps
*o
)
5006 gen_op_update3_cc_i64(s
, CC_OP_SUBU_32
, o
->in1
, o
->in2
, o
->out
);
5009 static void cout_subu64(DisasContext
*s
, DisasOps
*o
)
5011 gen_op_update3_cc_i64(s
, CC_OP_SUBU_64
, o
->in1
, o
->in2
, o
->out
);
5014 static void cout_subb32(DisasContext
*s
, DisasOps
*o
)
5016 gen_op_update3_cc_i64(s
, CC_OP_SUBB_32
, o
->in1
, o
->in2
, o
->out
);
5019 static void cout_subb64(DisasContext
*s
, DisasOps
*o
)
5021 gen_op_update3_cc_i64(s
, CC_OP_SUBB_64
, o
->in1
, o
->in2
, o
->out
);
5024 static void cout_tm32(DisasContext
*s
, DisasOps
*o
)
5026 gen_op_update2_cc_i64(s
, CC_OP_TM_32
, o
->in1
, o
->in2
);
5029 static void cout_tm64(DisasContext
*s
, DisasOps
*o
)
5031 gen_op_update2_cc_i64(s
, CC_OP_TM_64
, o
->in1
, o
->in2
);
5034 /* ====================================================================== */
5035 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5036 with the TCG register to which we will write. Used in combination with
5037 the "wout" generators, in some cases we need a new temporary, and in
5038 some cases we can write to a TCG global. */
5040 static void prep_new(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5042 o
->out
= tcg_temp_new_i64();
5044 #define SPEC_prep_new 0
5046 static void prep_new_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5048 o
->out
= tcg_temp_new_i64();
5049 o
->out2
= tcg_temp_new_i64();
5051 #define SPEC_prep_new_P 0
5053 static void prep_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5055 o
->out
= regs
[get_field(f
, r1
)];
5058 #define SPEC_prep_r1 0
5060 static void prep_r1_P(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5062 int r1
= get_field(f
, r1
);
5064 o
->out2
= regs
[r1
+ 1];
5065 o
->g_out
= o
->g_out2
= true;
5067 #define SPEC_prep_r1_P SPEC_r1_even
5069 static void prep_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5071 o
->out
= fregs
[get_field(f
, r1
)];
5074 #define SPEC_prep_f1 0
5076 static void prep_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5078 int r1
= get_field(f
, r1
);
5080 o
->out2
= fregs
[r1
+ 2];
5081 o
->g_out
= o
->g_out2
= true;
5083 #define SPEC_prep_x1 SPEC_r1_f128
5085 /* ====================================================================== */
5086 /* The "Write OUTput" generators. These generally perform some non-trivial
5087 copy of data to TCG globals, or to main memory. The trivial cases are
5088 generally handled by having a "prep" generator install the TCG global
5089 as the destination of the operation. */
5091 static void wout_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5093 store_reg(get_field(f
, r1
), o
->out
);
5095 #define SPEC_wout_r1 0
5097 static void wout_r1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5099 int r1
= get_field(f
, r1
);
5100 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 8);
5102 #define SPEC_wout_r1_8 0
5104 static void wout_r1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5106 int r1
= get_field(f
, r1
);
5107 tcg_gen_deposit_i64(regs
[r1
], regs
[r1
], o
->out
, 0, 16);
5109 #define SPEC_wout_r1_16 0
5111 static void wout_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5113 store_reg32_i64(get_field(f
, r1
), o
->out
);
5115 #define SPEC_wout_r1_32 0
5117 static void wout_r1_32h(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5119 store_reg32h_i64(get_field(f
, r1
), o
->out
);
5121 #define SPEC_wout_r1_32h 0
5123 static void wout_r1_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5125 int r1
= get_field(f
, r1
);
5126 store_reg32_i64(r1
, o
->out
);
5127 store_reg32_i64(r1
+ 1, o
->out2
);
5129 #define SPEC_wout_r1_P32 SPEC_r1_even
5131 static void wout_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5133 int r1
= get_field(f
, r1
);
5134 store_reg32_i64(r1
+ 1, o
->out
);
5135 tcg_gen_shri_i64(o
->out
, o
->out
, 32);
5136 store_reg32_i64(r1
, o
->out
);
5138 #define SPEC_wout_r1_D32 SPEC_r1_even
5140 static void wout_r3_P32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5142 int r3
= get_field(f
, r3
);
5143 store_reg32_i64(r3
, o
->out
);
5144 store_reg32_i64(r3
+ 1, o
->out2
);
5146 #define SPEC_wout_r3_P32 SPEC_r3_even
5148 static void wout_r3_P64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5150 int r3
= get_field(f
, r3
);
5151 store_reg(r3
, o
->out
);
5152 store_reg(r3
+ 1, o
->out2
);
5154 #define SPEC_wout_r3_P64 SPEC_r3_even
5156 static void wout_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5158 store_freg32_i64(get_field(f
, r1
), o
->out
);
5160 #define SPEC_wout_e1 0
5162 static void wout_f1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5164 store_freg(get_field(f
, r1
), o
->out
);
5166 #define SPEC_wout_f1 0
5168 static void wout_x1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5170 int f1
= get_field(s
->fields
, r1
);
5171 store_freg(f1
, o
->out
);
5172 store_freg(f1
+ 2, o
->out2
);
5174 #define SPEC_wout_x1 SPEC_r1_f128
5176 static void wout_cond_r1r2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5178 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5179 store_reg32_i64(get_field(f
, r1
), o
->out
);
5182 #define SPEC_wout_cond_r1r2_32 0
5184 static void wout_cond_e1e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5186 if (get_field(f
, r1
) != get_field(f
, r2
)) {
5187 store_freg32_i64(get_field(f
, r1
), o
->out
);
5190 #define SPEC_wout_cond_e1e2 0
5192 static void wout_m1_8(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5194 tcg_gen_qemu_st8(o
->out
, o
->addr1
, get_mem_index(s
));
5196 #define SPEC_wout_m1_8 0
5198 static void wout_m1_16(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5200 tcg_gen_qemu_st16(o
->out
, o
->addr1
, get_mem_index(s
));
5202 #define SPEC_wout_m1_16 0
5204 #ifndef CONFIG_USER_ONLY
5205 static void wout_m1_16a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5207 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUW
| MO_ALIGN
);
5209 #define SPEC_wout_m1_16a 0
5212 static void wout_m1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5214 tcg_gen_qemu_st32(o
->out
, o
->addr1
, get_mem_index(s
));
5216 #define SPEC_wout_m1_32 0
5218 #ifndef CONFIG_USER_ONLY
5219 static void wout_m1_32a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5221 tcg_gen_qemu_st_tl(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5223 #define SPEC_wout_m1_32a 0
5226 static void wout_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5228 tcg_gen_qemu_st64(o
->out
, o
->addr1
, get_mem_index(s
));
5230 #define SPEC_wout_m1_64 0
5232 #ifndef CONFIG_USER_ONLY
5233 static void wout_m1_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5235 tcg_gen_qemu_st_i64(o
->out
, o
->addr1
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5237 #define SPEC_wout_m1_64a 0
5240 static void wout_m2_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5242 tcg_gen_qemu_st32(o
->out
, o
->in2
, get_mem_index(s
));
5244 #define SPEC_wout_m2_32 0
5246 static void wout_in2_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5248 store_reg(get_field(f
, r1
), o
->in2
);
5250 #define SPEC_wout_in2_r1 0
5252 static void wout_in2_r1_32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5254 store_reg32_i64(get_field(f
, r1
), o
->in2
);
5256 #define SPEC_wout_in2_r1_32 0
5258 /* ====================================================================== */
5259 /* The "INput 1" generators. These load the first operand to an insn. */
5261 static void in1_r1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5263 o
->in1
= load_reg(get_field(f
, r1
));
5265 #define SPEC_in1_r1 0
5267 static void in1_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5269 o
->in1
= regs
[get_field(f
, r1
)];
5272 #define SPEC_in1_r1_o 0
5274 static void in1_r1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5276 o
->in1
= tcg_temp_new_i64();
5277 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5279 #define SPEC_in1_r1_32s 0
5281 static void in1_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5283 o
->in1
= tcg_temp_new_i64();
5284 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
)]);
5286 #define SPEC_in1_r1_32u 0
5288 static void in1_r1_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5290 o
->in1
= tcg_temp_new_i64();
5291 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r1
)], 32);
5293 #define SPEC_in1_r1_sr32 0
5295 static void in1_r1p1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5297 o
->in1
= load_reg(get_field(f
, r1
) + 1);
5299 #define SPEC_in1_r1p1 SPEC_r1_even
5301 static void in1_r1p1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5303 o
->in1
= tcg_temp_new_i64();
5304 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5306 #define SPEC_in1_r1p1_32s SPEC_r1_even
5308 static void in1_r1p1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5310 o
->in1
= tcg_temp_new_i64();
5311 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r1
) + 1]);
5313 #define SPEC_in1_r1p1_32u SPEC_r1_even
5315 static void in1_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5317 int r1
= get_field(f
, r1
);
5318 o
->in1
= tcg_temp_new_i64();
5319 tcg_gen_concat32_i64(o
->in1
, regs
[r1
+ 1], regs
[r1
]);
5321 #define SPEC_in1_r1_D32 SPEC_r1_even
5323 static void in1_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5325 o
->in1
= load_reg(get_field(f
, r2
));
5327 #define SPEC_in1_r2 0
5329 static void in1_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5331 o
->in1
= tcg_temp_new_i64();
5332 tcg_gen_shri_i64(o
->in1
, regs
[get_field(f
, r2
)], 32);
5334 #define SPEC_in1_r2_sr32 0
5336 static void in1_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5338 o
->in1
= load_reg(get_field(f
, r3
));
5340 #define SPEC_in1_r3 0
5342 static void in1_r3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5344 o
->in1
= regs
[get_field(f
, r3
)];
5347 #define SPEC_in1_r3_o 0
5349 static void in1_r3_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5351 o
->in1
= tcg_temp_new_i64();
5352 tcg_gen_ext32s_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5354 #define SPEC_in1_r3_32s 0
5356 static void in1_r3_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5358 o
->in1
= tcg_temp_new_i64();
5359 tcg_gen_ext32u_i64(o
->in1
, regs
[get_field(f
, r3
)]);
5361 #define SPEC_in1_r3_32u 0
5363 static void in1_r3_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5365 int r3
= get_field(f
, r3
);
5366 o
->in1
= tcg_temp_new_i64();
5367 tcg_gen_concat32_i64(o
->in1
, regs
[r3
+ 1], regs
[r3
]);
5369 #define SPEC_in1_r3_D32 SPEC_r3_even
5371 static void in1_e1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5373 o
->in1
= load_freg32_i64(get_field(f
, r1
));
5375 #define SPEC_in1_e1 0
5377 static void in1_f1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5379 o
->in1
= fregs
[get_field(f
, r1
)];
5382 #define SPEC_in1_f1_o 0
5384 static void in1_x1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5386 int r1
= get_field(f
, r1
);
5388 o
->out2
= fregs
[r1
+ 2];
5389 o
->g_out
= o
->g_out2
= true;
5391 #define SPEC_in1_x1_o SPEC_r1_f128
5393 static void in1_f3_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5395 o
->in1
= fregs
[get_field(f
, r3
)];
5398 #define SPEC_in1_f3_o 0
5400 static void in1_la1(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5402 o
->addr1
= get_address(s
, 0, get_field(f
, b1
), get_field(f
, d1
));
5404 #define SPEC_in1_la1 0
5406 static void in1_la2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5408 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5409 o
->addr1
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5411 #define SPEC_in1_la2 0
5413 static void in1_m1_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5416 o
->in1
= tcg_temp_new_i64();
5417 tcg_gen_qemu_ld8u(o
->in1
, o
->addr1
, get_mem_index(s
));
5419 #define SPEC_in1_m1_8u 0
5421 static void in1_m1_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5424 o
->in1
= tcg_temp_new_i64();
5425 tcg_gen_qemu_ld16s(o
->in1
, o
->addr1
, get_mem_index(s
));
5427 #define SPEC_in1_m1_16s 0
5429 static void in1_m1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5432 o
->in1
= tcg_temp_new_i64();
5433 tcg_gen_qemu_ld16u(o
->in1
, o
->addr1
, get_mem_index(s
));
5435 #define SPEC_in1_m1_16u 0
5437 static void in1_m1_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5440 o
->in1
= tcg_temp_new_i64();
5441 tcg_gen_qemu_ld32s(o
->in1
, o
->addr1
, get_mem_index(s
));
5443 #define SPEC_in1_m1_32s 0
5445 static void in1_m1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5448 o
->in1
= tcg_temp_new_i64();
5449 tcg_gen_qemu_ld32u(o
->in1
, o
->addr1
, get_mem_index(s
));
5451 #define SPEC_in1_m1_32u 0
5453 static void in1_m1_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5456 o
->in1
= tcg_temp_new_i64();
5457 tcg_gen_qemu_ld64(o
->in1
, o
->addr1
, get_mem_index(s
));
5459 #define SPEC_in1_m1_64 0
5461 /* ====================================================================== */
5462 /* The "INput 2" generators. These load the second operand to an insn. */
5464 static void in2_r1_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5466 o
->in2
= regs
[get_field(f
, r1
)];
5469 #define SPEC_in2_r1_o 0
5471 static void in2_r1_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5473 o
->in2
= tcg_temp_new_i64();
5474 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5476 #define SPEC_in2_r1_16u 0
5478 static void in2_r1_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5480 o
->in2
= tcg_temp_new_i64();
5481 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r1
)]);
5483 #define SPEC_in2_r1_32u 0
5485 static void in2_r1_D32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5487 int r1
= get_field(f
, r1
);
5488 o
->in2
= tcg_temp_new_i64();
5489 tcg_gen_concat32_i64(o
->in2
, regs
[r1
+ 1], regs
[r1
]);
5491 #define SPEC_in2_r1_D32 SPEC_r1_even
5493 static void in2_r2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5495 o
->in2
= load_reg(get_field(f
, r2
));
5497 #define SPEC_in2_r2 0
5499 static void in2_r2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5501 o
->in2
= regs
[get_field(f
, r2
)];
5504 #define SPEC_in2_r2_o 0
5506 static void in2_r2_nz(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5508 int r2
= get_field(f
, r2
);
5510 o
->in2
= load_reg(r2
);
5513 #define SPEC_in2_r2_nz 0
5515 static void in2_r2_8s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5517 o
->in2
= tcg_temp_new_i64();
5518 tcg_gen_ext8s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5520 #define SPEC_in2_r2_8s 0
5522 static void in2_r2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5524 o
->in2
= tcg_temp_new_i64();
5525 tcg_gen_ext8u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5527 #define SPEC_in2_r2_8u 0
5529 static void in2_r2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5531 o
->in2
= tcg_temp_new_i64();
5532 tcg_gen_ext16s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5534 #define SPEC_in2_r2_16s 0
5536 static void in2_r2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5538 o
->in2
= tcg_temp_new_i64();
5539 tcg_gen_ext16u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5541 #define SPEC_in2_r2_16u 0
5543 static void in2_r3(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5545 o
->in2
= load_reg(get_field(f
, r3
));
5547 #define SPEC_in2_r3 0
5549 static void in2_r3_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5551 o
->in2
= tcg_temp_new_i64();
5552 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r3
)], 32);
5554 #define SPEC_in2_r3_sr32 0
5556 static void in2_r2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5558 o
->in2
= tcg_temp_new_i64();
5559 tcg_gen_ext32s_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5561 #define SPEC_in2_r2_32s 0
5563 static void in2_r2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5565 o
->in2
= tcg_temp_new_i64();
5566 tcg_gen_ext32u_i64(o
->in2
, regs
[get_field(f
, r2
)]);
5568 #define SPEC_in2_r2_32u 0
5570 static void in2_r2_sr32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5572 o
->in2
= tcg_temp_new_i64();
5573 tcg_gen_shri_i64(o
->in2
, regs
[get_field(f
, r2
)], 32);
5575 #define SPEC_in2_r2_sr32 0
5577 static void in2_e2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5579 o
->in2
= load_freg32_i64(get_field(f
, r2
));
5581 #define SPEC_in2_e2 0
5583 static void in2_f2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5585 o
->in2
= fregs
[get_field(f
, r2
)];
5588 #define SPEC_in2_f2_o 0
5590 static void in2_x2_o(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5592 int r2
= get_field(f
, r2
);
5594 o
->in2
= fregs
[r2
+ 2];
5595 o
->g_in1
= o
->g_in2
= true;
5597 #define SPEC_in2_x2_o SPEC_r2_f128
5599 static void in2_ra2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5601 o
->in2
= get_address(s
, 0, get_field(f
, r2
), 0);
5603 #define SPEC_in2_ra2 0
5605 static void in2_a2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5607 int x2
= have_field(f
, x2
) ? get_field(f
, x2
) : 0;
5608 o
->in2
= get_address(s
, x2
, get_field(f
, b2
), get_field(f
, d2
));
5610 #define SPEC_in2_a2 0
5612 static void in2_ri2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5614 o
->in2
= tcg_const_i64(s
->base
.pc_next
+ (int64_t)get_field(f
, i2
) * 2);
5616 #define SPEC_in2_ri2 0
5618 static void in2_sh32(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5620 help_l2_shift(s
, f
, o
, 31);
5622 #define SPEC_in2_sh32 0
5624 static void in2_sh64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5626 help_l2_shift(s
, f
, o
, 63);
5628 #define SPEC_in2_sh64 0
5630 static void in2_m2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5633 tcg_gen_qemu_ld8u(o
->in2
, o
->in2
, get_mem_index(s
));
5635 #define SPEC_in2_m2_8u 0
5637 static void in2_m2_16s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5640 tcg_gen_qemu_ld16s(o
->in2
, o
->in2
, get_mem_index(s
));
5642 #define SPEC_in2_m2_16s 0
5644 static void in2_m2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5647 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5649 #define SPEC_in2_m2_16u 0
5651 static void in2_m2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5654 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5656 #define SPEC_in2_m2_32s 0
5658 static void in2_m2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5661 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5663 #define SPEC_in2_m2_32u 0
5665 #ifndef CONFIG_USER_ONLY
5666 static void in2_m2_32ua(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5669 tcg_gen_qemu_ld_tl(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEUL
| MO_ALIGN
);
5671 #define SPEC_in2_m2_32ua 0
5674 static void in2_m2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5677 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5679 #define SPEC_in2_m2_64 0
5681 #ifndef CONFIG_USER_ONLY
5682 static void in2_m2_64a(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5685 tcg_gen_qemu_ld_i64(o
->in2
, o
->in2
, get_mem_index(s
), MO_TEQ
| MO_ALIGN
);
5687 #define SPEC_in2_m2_64a 0
5690 static void in2_mri2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5693 tcg_gen_qemu_ld16u(o
->in2
, o
->in2
, get_mem_index(s
));
5695 #define SPEC_in2_mri2_16u 0
5697 static void in2_mri2_32s(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5700 tcg_gen_qemu_ld32s(o
->in2
, o
->in2
, get_mem_index(s
));
5702 #define SPEC_in2_mri2_32s 0
5704 static void in2_mri2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5707 tcg_gen_qemu_ld32u(o
->in2
, o
->in2
, get_mem_index(s
));
5709 #define SPEC_in2_mri2_32u 0
5711 static void in2_mri2_64(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5714 tcg_gen_qemu_ld64(o
->in2
, o
->in2
, get_mem_index(s
));
5716 #define SPEC_in2_mri2_64 0
5718 static void in2_i2(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5720 o
->in2
= tcg_const_i64(get_field(f
, i2
));
5722 #define SPEC_in2_i2 0
5724 static void in2_i2_8u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5726 o
->in2
= tcg_const_i64((uint8_t)get_field(f
, i2
));
5728 #define SPEC_in2_i2_8u 0
5730 static void in2_i2_16u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5732 o
->in2
= tcg_const_i64((uint16_t)get_field(f
, i2
));
5734 #define SPEC_in2_i2_16u 0
5736 static void in2_i2_32u(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5738 o
->in2
= tcg_const_i64((uint32_t)get_field(f
, i2
));
5740 #define SPEC_in2_i2_32u 0
5742 static void in2_i2_16u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5744 uint64_t i2
= (uint16_t)get_field(f
, i2
);
5745 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5747 #define SPEC_in2_i2_16u_shl 0
5749 static void in2_i2_32u_shl(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5751 uint64_t i2
= (uint32_t)get_field(f
, i2
);
5752 o
->in2
= tcg_const_i64(i2
<< s
->insn
->data
);
5754 #define SPEC_in2_i2_32u_shl 0
5756 #ifndef CONFIG_USER_ONLY
5757 static void in2_insn(DisasContext
*s
, DisasFields
*f
, DisasOps
*o
)
5759 o
->in2
= tcg_const_i64(s
->fields
->raw_insn
);
5761 #define SPEC_in2_insn 0
5764 /* ====================================================================== */
5766 /* Find opc within the table of insns. This is formulated as a switch
5767 statement so that (1) we get compile-time notice of cut-paste errors
5768 for duplicated opcodes, and (2) the compiler generates the binary
5769 search tree, rather than us having to post-process the table. */
5771 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5772 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5774 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5775 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5777 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5778 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5780 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5782 enum DisasInsnEnum
{
5783 #include "insn-data.def"
5787 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
5792 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5794 .help_in1 = in1_##I1, \
5795 .help_in2 = in2_##I2, \
5796 .help_prep = prep_##P, \
5797 .help_wout = wout_##W, \
5798 .help_cout = cout_##CC, \
5799 .help_op = op_##OP, \
5803 /* Allow 0 to be used for NULL in the table below. */
5811 #define SPEC_in1_0 0
5812 #define SPEC_in2_0 0
5813 #define SPEC_prep_0 0
5814 #define SPEC_wout_0 0
5816 /* Give smaller names to the various facilities. */
5817 #define FAC_Z S390_FEAT_ZARCH
5818 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5819 #define FAC_DFP S390_FEAT_DFP
5820 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5821 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5822 #define FAC_EE S390_FEAT_EXECUTE_EXT
5823 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5824 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5825 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5826 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5827 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5828 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5829 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5830 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5831 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5832 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5833 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5834 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5835 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5836 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5837 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5838 #define FAC_SFLE S390_FEAT_STFLE
5839 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5840 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5841 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5842 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5843 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5844 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5845 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5846 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5847 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5848 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5849 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5850 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5851 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5852 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5853 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5854 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5856 static const DisasInsn insn_info
[] = {
5857 #include "insn-data.def"
5861 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
5862 case OPC: return &insn_info[insn_ ## NM];
5864 static const DisasInsn
*lookup_opc(uint16_t opc
)
5867 #include "insn-data.def"
5878 /* Extract a field from the insn. The INSN should be left-aligned in
5879 the uint64_t so that we can more easily utilize the big-bit-endian
5880 definitions we extract from the Principals of Operation. */
5882 static void extract_field(DisasFields
*o
, const DisasField
*f
, uint64_t insn
)
5890 /* Zero extract the field from the insn. */
5891 r
= (insn
<< f
->beg
) >> (64 - f
->size
);
5893 /* Sign-extend, or un-swap the field as necessary. */
5895 case 0: /* unsigned */
5897 case 1: /* signed */
5898 assert(f
->size
<= 32);
5899 m
= 1u << (f
->size
- 1);
5902 case 2: /* dl+dh split, signed 20 bit. */
5903 r
= ((int8_t)r
<< 12) | (r
>> 8);
5909 /* Validate that the "compressed" encoding we selected above is valid.
5910 I.e. we havn't make two different original fields overlap. */
5911 assert(((o
->presentC
>> f
->indexC
) & 1) == 0);
5912 o
->presentC
|= 1 << f
->indexC
;
5913 o
->presentO
|= 1 << f
->indexO
;
5915 o
->c
[f
->indexC
] = r
;
5918 /* Lookup the insn at the current PC, extracting the operands into O and
5919 returning the info struct for the insn. Returns NULL for invalid insn. */
5921 static const DisasInsn
*extract_insn(CPUS390XState
*env
, DisasContext
*s
,
5924 uint64_t insn
, pc
= s
->base
.pc_next
;
5926 const DisasInsn
*info
;
5928 if (unlikely(s
->ex_value
)) {
5929 /* Drop the EX data now, so that it's clear on exception paths. */
5930 TCGv_i64 zero
= tcg_const_i64(0);
5931 tcg_gen_st_i64(zero
, cpu_env
, offsetof(CPUS390XState
, ex_value
));
5932 tcg_temp_free_i64(zero
);
5934 /* Extract the values saved by EXECUTE. */
5935 insn
= s
->ex_value
& 0xffffffffffff0000ull
;
5936 ilen
= s
->ex_value
& 0xf;
5939 insn
= ld_code2(env
, pc
);
5940 op
= (insn
>> 8) & 0xff;
5941 ilen
= get_ilen(op
);
5947 insn
= ld_code4(env
, pc
) << 32;
5950 insn
= (insn
<< 48) | (ld_code4(env
, pc
+ 2) << 16);
5953 g_assert_not_reached();
5956 s
->pc_tmp
= s
->base
.pc_next
+ ilen
;
5959 /* We can't actually determine the insn format until we've looked up
5960 the full insn opcode. Which we can't do without locating the
5961 secondary opcode. Assume by default that OP2 is at bit 40; for
5962 those smaller insns that don't actually have a secondary opcode
5963 this will correctly result in OP2 = 0. */
5969 case 0xb2: /* S, RRF, RRE, IE */
5970 case 0xb3: /* RRE, RRD, RRF */
5971 case 0xb9: /* RRE, RRF */
5972 case 0xe5: /* SSE, SIL */
5973 op2
= (insn
<< 8) >> 56;
5977 case 0xc0: /* RIL */
5978 case 0xc2: /* RIL */
5979 case 0xc4: /* RIL */
5980 case 0xc6: /* RIL */
5981 case 0xc8: /* SSF */
5982 case 0xcc: /* RIL */
5983 op2
= (insn
<< 12) >> 60;
5985 case 0xc5: /* MII */
5986 case 0xc7: /* SMI */
5987 case 0xd0 ... 0xdf: /* SS */
5993 case 0xee ... 0xf3: /* SS */
5994 case 0xf8 ... 0xfd: /* SS */
5998 op2
= (insn
<< 40) >> 56;
6002 memset(f
, 0, sizeof(*f
));
6007 /* Lookup the instruction. */
6008 info
= lookup_opc(op
<< 8 | op2
);
6010 /* If we found it, extract the operands. */
6012 DisasFormat fmt
= info
->fmt
;
6015 for (i
= 0; i
< NUM_C_FIELD
; ++i
) {
6016 extract_field(f
, &format_info
[fmt
].op
[i
], insn
);
6022 static bool is_afp_reg(int reg
)
6024 return reg
% 2 || reg
> 6;
6027 static bool is_fp_pair(int reg
)
6029 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6030 return !(reg
& 0x2);
6033 static DisasJumpType
translate_one(CPUS390XState
*env
, DisasContext
*s
)
6035 const DisasInsn
*insn
;
6036 DisasJumpType ret
= DISAS_NEXT
;
6040 /* Search for the insn in the table. */
6041 insn
= extract_insn(env
, s
, &f
);
6043 /* Not found means unimplemented/illegal opcode. */
6045 qemu_log_mask(LOG_UNIMP
, "unimplemented opcode 0x%02x%02x\n",
6047 gen_illegal_opcode(s
);
6048 return DISAS_NORETURN
;
6051 #ifndef CONFIG_USER_ONLY
6052 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6053 TCGv_i64 addr
= tcg_const_i64(s
->base
.pc_next
);
6054 gen_helper_per_ifetch(cpu_env
, addr
);
6055 tcg_temp_free_i64(addr
);
6061 /* privileged instruction */
6062 if ((s
->base
.tb
->flags
& FLAG_MASK_PSTATE
) && (insn
->flags
& IF_PRIV
)) {
6063 gen_program_exception(s
, PGM_PRIVILEGED
);
6064 return DISAS_NORETURN
;
6067 /* if AFP is not enabled, instructions and registers are forbidden */
6068 if (!(s
->base
.tb
->flags
& FLAG_MASK_AFP
)) {
6071 if ((insn
->flags
& IF_AFP1
) && is_afp_reg(get_field(&f
, r1
))) {
6074 if ((insn
->flags
& IF_AFP2
) && is_afp_reg(get_field(&f
, r2
))) {
6077 if ((insn
->flags
& IF_AFP3
) && is_afp_reg(get_field(&f
, r3
))) {
6080 if (insn
->flags
& IF_BFP
) {
6083 if (insn
->flags
& IF_DFP
) {
6087 gen_data_exception(dxc
);
6088 return DISAS_NORETURN
;
6093 /* Check for insn specification exceptions. */
6095 if ((insn
->spec
& SPEC_r1_even
&& get_field(&f
, r1
) & 1) ||
6096 (insn
->spec
& SPEC_r2_even
&& get_field(&f
, r2
) & 1) ||
6097 (insn
->spec
& SPEC_r3_even
&& get_field(&f
, r3
) & 1) ||
6098 (insn
->spec
& SPEC_r1_f128
&& !is_fp_pair(get_field(&f
, r1
))) ||
6099 (insn
->spec
& SPEC_r2_f128
&& !is_fp_pair(get_field(&f
, r2
)))) {
6100 gen_program_exception(s
, PGM_SPECIFICATION
);
6101 return DISAS_NORETURN
;
6105 /* Set up the strutures we use to communicate with the helpers. */
6108 o
.g_out
= o
.g_out2
= o
.g_in1
= o
.g_in2
= false;
6115 /* Implement the instruction. */
6116 if (insn
->help_in1
) {
6117 insn
->help_in1(s
, &f
, &o
);
6119 if (insn
->help_in2
) {
6120 insn
->help_in2(s
, &f
, &o
);
6122 if (insn
->help_prep
) {
6123 insn
->help_prep(s
, &f
, &o
);
6125 if (insn
->help_op
) {
6126 ret
= insn
->help_op(s
, &o
);
6128 if (insn
->help_wout
) {
6129 insn
->help_wout(s
, &f
, &o
);
6131 if (insn
->help_cout
) {
6132 insn
->help_cout(s
, &o
);
6135 /* Free any temporaries created by the helpers. */
6136 if (o
.out
&& !o
.g_out
) {
6137 tcg_temp_free_i64(o
.out
);
6139 if (o
.out2
&& !o
.g_out2
) {
6140 tcg_temp_free_i64(o
.out2
);
6142 if (o
.in1
&& !o
.g_in1
) {
6143 tcg_temp_free_i64(o
.in1
);
6145 if (o
.in2
&& !o
.g_in2
) {
6146 tcg_temp_free_i64(o
.in2
);
6149 tcg_temp_free_i64(o
.addr1
);
6152 #ifndef CONFIG_USER_ONLY
6153 if (s
->base
.tb
->flags
& FLAG_MASK_PER
) {
6154 /* An exception might be triggered, save PSW if not already done. */
6155 if (ret
== DISAS_NEXT
|| ret
== DISAS_PC_STALE
) {
6156 tcg_gen_movi_i64(psw_addr
, s
->pc_tmp
);
6159 /* Call the helper to check for a possible PER exception. */
6160 gen_helper_per_check_exception(cpu_env
);
6164 /* Advance to the next instruction. */
6165 s
->base
.pc_next
= s
->pc_tmp
;
6169 static void s390x_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
6171 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6174 if (!(dc
->base
.tb
->flags
& FLAG_MASK_64
)) {
6175 dc
->base
.pc_first
&= 0x7fffffff;
6176 dc
->base
.pc_next
= dc
->base
.pc_first
;
6179 dc
->cc_op
= CC_OP_DYNAMIC
;
6180 dc
->ex_value
= dc
->base
.tb
->cs_base
;
6181 dc
->do_debug
= dc
->base
.singlestep_enabled
;
6184 static void s390x_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
6188 static void s390x_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
6190 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6192 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
6195 static bool s390x_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
6196 const CPUBreakpoint
*bp
)
6198 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6200 dc
->base
.is_jmp
= DISAS_PC_STALE
;
6201 dc
->do_debug
= true;
6202 /* The address covered by the breakpoint must be included in
6203 [tb->pc, tb->pc + tb->size) in order to for it to be
6204 properly cleared -- thus we increment the PC here so that
6205 the logic setting tb->size does the right thing. */
6206 dc
->base
.pc_next
+= 2;
6210 static void s390x_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
6212 CPUS390XState
*env
= cs
->env_ptr
;
6213 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6215 dc
->base
.is_jmp
= translate_one(env
, dc
);
6216 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
6217 uint64_t page_start
;
6219 page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
6220 if (dc
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
|| dc
->ex_value
) {
6221 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
6226 static void s390x_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
6228 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6230 switch (dc
->base
.is_jmp
) {
6232 case DISAS_NORETURN
:
6234 case DISAS_TOO_MANY
:
6235 case DISAS_PC_STALE
:
6236 case DISAS_PC_STALE_NOCHAIN
:
6237 update_psw_addr(dc
);
6239 case DISAS_PC_UPDATED
:
6240 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6241 cc op type is in env */
6244 case DISAS_PC_CC_UPDATED
:
6245 /* Exit the TB, either by raising a debug exception or by return. */
6247 gen_exception(EXCP_DEBUG
);
6248 } else if (use_exit_tb(dc
) ||
6249 dc
->base
.is_jmp
== DISAS_PC_STALE_NOCHAIN
) {
6250 tcg_gen_exit_tb(NULL
, 0);
6252 tcg_gen_lookup_and_goto_ptr();
6256 g_assert_not_reached();
6260 static void s390x_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
6262 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6264 if (unlikely(dc
->ex_value
)) {
6265 /* ??? Unfortunately log_target_disas can't use host memory. */
6266 qemu_log("IN: EXECUTE %016" PRIx64
, dc
->ex_value
);
6268 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
6269 log_target_disas(cs
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
6273 static const TranslatorOps s390x_tr_ops
= {
6274 .init_disas_context
= s390x_tr_init_disas_context
,
6275 .tb_start
= s390x_tr_tb_start
,
6276 .insn_start
= s390x_tr_insn_start
,
6277 .breakpoint_check
= s390x_tr_breakpoint_check
,
6278 .translate_insn
= s390x_tr_translate_insn
,
6279 .tb_stop
= s390x_tr_tb_stop
,
6280 .disas_log
= s390x_tr_disas_log
,
6283 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
)
6287 translator_loop(&s390x_tr_ops
, &dc
.base
, cs
, tb
);
6290 void restore_state_to_opc(CPUS390XState
*env
, TranslationBlock
*tb
,
6293 int cc_op
= data
[1];
6294 env
->psw
.addr
= data
[0];
6295 if ((cc_op
!= CC_OP_DYNAMIC
) && (cc_op
!= CC_OP_STATIC
)) {