qapi: drop the sentinel in enum array
[qemu/armbru.git] / target / s390x / translate.c
blob4b0db7b7bd27a5fc83f472b8d0f85317ca9838b2
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
41 /* global register indexes */
42 static TCGv_env cpu_env;
44 #include "exec/gen-icount.h"
45 #include "exec/helper-proto.h"
46 #include "exec/helper-gen.h"
48 #include "trace-tcg.h"
49 #include "exec/log.h"
52 /* Information that (most) every instruction needs to manipulate. */
53 typedef struct DisasContext DisasContext;
54 typedef struct DisasInsn DisasInsn;
55 typedef struct DisasFields DisasFields;
57 struct DisasContext {
58 struct TranslationBlock *tb;
59 const DisasInsn *insn;
60 DisasFields *fields;
61 uint64_t ex_value;
62 uint64_t pc, next_pc;
63 uint32_t ilen;
64 enum cc_op cc_op;
65 bool singlestep_enabled;
68 /* Information carried about a condition to be evaluated. */
69 typedef struct {
70 TCGCond cond:8;
71 bool is_64;
72 bool g1;
73 bool g2;
74 union {
75 struct { TCGv_i64 a, b; } s64;
76 struct { TCGv_i32 a, b; } s32;
77 } u;
78 } DisasCompare;
80 #define DISAS_EXCP 4
82 #ifdef DEBUG_INLINE_BRANCHES
83 static uint64_t inline_branch_hit[CC_OP_MAX];
84 static uint64_t inline_branch_miss[CC_OP_MAX];
85 #endif
87 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
94 return pc;
97 static TCGv_i64 psw_addr;
98 static TCGv_i64 psw_mask;
99 static TCGv_i64 gbea;
101 static TCGv_i32 cc_op;
102 static TCGv_i64 cc_src;
103 static TCGv_i64 cc_dst;
104 static TCGv_i64 cc_vr;
106 static char cpu_reg_names[32][4];
107 static TCGv_i64 regs[16];
108 static TCGv_i64 fregs[16];
110 void s390x_translate_init(void)
112 int i;
114 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
115 tcg_ctx.tcg_env = cpu_env;
116 psw_addr = tcg_global_mem_new_i64(cpu_env,
117 offsetof(CPUS390XState, psw.addr),
118 "psw_addr");
119 psw_mask = tcg_global_mem_new_i64(cpu_env,
120 offsetof(CPUS390XState, psw.mask),
121 "psw_mask");
122 gbea = tcg_global_mem_new_i64(cpu_env,
123 offsetof(CPUS390XState, gbea),
124 "gbea");
126 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
127 "cc_op");
128 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
129 "cc_src");
130 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
131 "cc_dst");
132 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
133 "cc_vr");
135 for (i = 0; i < 16; i++) {
136 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
137 regs[i] = tcg_global_mem_new(cpu_env,
138 offsetof(CPUS390XState, regs[i]),
139 cpu_reg_names[i]);
142 for (i = 0; i < 16; i++) {
143 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
144 fregs[i] = tcg_global_mem_new(cpu_env,
145 offsetof(CPUS390XState, vregs[i][0].d),
146 cpu_reg_names[i + 16]);
150 static TCGv_i64 load_reg(int reg)
152 TCGv_i64 r = tcg_temp_new_i64();
153 tcg_gen_mov_i64(r, regs[reg]);
154 return r;
157 static TCGv_i64 load_freg32_i64(int reg)
159 TCGv_i64 r = tcg_temp_new_i64();
160 tcg_gen_shri_i64(r, fregs[reg], 32);
161 return r;
164 static void store_reg(int reg, TCGv_i64 v)
166 tcg_gen_mov_i64(regs[reg], v);
169 static void store_freg(int reg, TCGv_i64 v)
171 tcg_gen_mov_i64(fregs[reg], v);
174 static void store_reg32_i64(int reg, TCGv_i64 v)
176 /* 32 bit register writes keep the upper half */
177 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
180 static void store_reg32h_i64(int reg, TCGv_i64 v)
182 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
185 static void store_freg32_i64(int reg, TCGv_i64 v)
187 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
190 static void return_low128(TCGv_i64 dest)
192 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
195 static void update_psw_addr(DisasContext *s)
197 /* psw.addr */
198 tcg_gen_movi_i64(psw_addr, s->pc);
201 static void per_branch(DisasContext *s, bool to_next)
203 #ifndef CONFIG_USER_ONLY
204 tcg_gen_movi_i64(gbea, s->pc);
206 if (s->tb->flags & FLAG_MASK_PER) {
207 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
208 gen_helper_per_branch(cpu_env, gbea, next_pc);
209 if (to_next) {
210 tcg_temp_free_i64(next_pc);
213 #endif
216 static void per_branch_cond(DisasContext *s, TCGCond cond,
217 TCGv_i64 arg1, TCGv_i64 arg2)
219 #ifndef CONFIG_USER_ONLY
220 if (s->tb->flags & FLAG_MASK_PER) {
221 TCGLabel *lab = gen_new_label();
222 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
224 tcg_gen_movi_i64(gbea, s->pc);
225 gen_helper_per_branch(cpu_env, gbea, psw_addr);
227 gen_set_label(lab);
228 } else {
229 TCGv_i64 pc = tcg_const_i64(s->pc);
230 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
231 tcg_temp_free_i64(pc);
233 #endif
236 static void per_breaking_event(DisasContext *s)
238 tcg_gen_movi_i64(gbea, s->pc);
241 static void update_cc_op(DisasContext *s)
243 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
244 tcg_gen_movi_i32(cc_op, s->cc_op);
248 static void potential_page_fault(DisasContext *s)
250 update_psw_addr(s);
251 update_cc_op(s);
254 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
256 return (uint64_t)cpu_lduw_code(env, pc);
259 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
264 static int get_mem_index(DisasContext *s)
266 switch (s->tb->flags & FLAG_MASK_ASC) {
267 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
268 return 0;
269 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
270 return 1;
271 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
272 return 2;
273 default:
274 tcg_abort();
275 break;
279 static void gen_exception(int excp)
281 TCGv_i32 tmp = tcg_const_i32(excp);
282 gen_helper_exception(cpu_env, tmp);
283 tcg_temp_free_i32(tmp);
286 static void gen_program_exception(DisasContext *s, int code)
288 TCGv_i32 tmp;
290 /* Remember what pgm exeption this was. */
291 tmp = tcg_const_i32(code);
292 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
293 tcg_temp_free_i32(tmp);
295 tmp = tcg_const_i32(s->ilen);
296 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
297 tcg_temp_free_i32(tmp);
299 /* update the psw */
300 update_psw_addr(s);
302 /* Save off cc. */
303 update_cc_op(s);
305 /* Trigger exception. */
306 gen_exception(EXCP_PGM);
309 static inline void gen_illegal_opcode(DisasContext *s)
311 gen_program_exception(s, PGM_OPERATION);
314 static inline void gen_trap(DisasContext *s)
316 TCGv_i32 t;
318 /* Set DXC to 0xff. */
319 t = tcg_temp_new_i32();
320 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
321 tcg_gen_ori_i32(t, t, 0xff00);
322 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
323 tcg_temp_free_i32(t);
325 gen_program_exception(s, PGM_DATA);
328 #ifndef CONFIG_USER_ONLY
329 static void check_privileged(DisasContext *s)
331 if (s->tb->flags & FLAG_MASK_PSTATE) {
332 gen_program_exception(s, PGM_PRIVILEGED);
335 #endif
337 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
339 TCGv_i64 tmp = tcg_temp_new_i64();
340 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
342 /* Note that d2 is limited to 20 bits, signed. If we crop negative
343 displacements early we create larger immedate addends. */
345 /* Note that addi optimizes the imm==0 case. */
346 if (b2 && x2) {
347 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
348 tcg_gen_addi_i64(tmp, tmp, d2);
349 } else if (b2) {
350 tcg_gen_addi_i64(tmp, regs[b2], d2);
351 } else if (x2) {
352 tcg_gen_addi_i64(tmp, regs[x2], d2);
353 } else {
354 if (need_31) {
355 d2 &= 0x7fffffff;
356 need_31 = false;
358 tcg_gen_movi_i64(tmp, d2);
360 if (need_31) {
361 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
364 return tmp;
367 static inline bool live_cc_data(DisasContext *s)
369 return (s->cc_op != CC_OP_DYNAMIC
370 && s->cc_op != CC_OP_STATIC
371 && s->cc_op > 3);
374 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
376 if (live_cc_data(s)) {
377 tcg_gen_discard_i64(cc_src);
378 tcg_gen_discard_i64(cc_dst);
379 tcg_gen_discard_i64(cc_vr);
381 s->cc_op = CC_OP_CONST0 + val;
384 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
386 if (live_cc_data(s)) {
387 tcg_gen_discard_i64(cc_src);
388 tcg_gen_discard_i64(cc_vr);
390 tcg_gen_mov_i64(cc_dst, dst);
391 s->cc_op = op;
394 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
395 TCGv_i64 dst)
397 if (live_cc_data(s)) {
398 tcg_gen_discard_i64(cc_vr);
400 tcg_gen_mov_i64(cc_src, src);
401 tcg_gen_mov_i64(cc_dst, dst);
402 s->cc_op = op;
405 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
406 TCGv_i64 dst, TCGv_i64 vr)
408 tcg_gen_mov_i64(cc_src, src);
409 tcg_gen_mov_i64(cc_dst, dst);
410 tcg_gen_mov_i64(cc_vr, vr);
411 s->cc_op = op;
414 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
416 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
419 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
421 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
424 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
426 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
429 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
431 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
434 /* CC value is in env->cc_op */
435 static void set_cc_static(DisasContext *s)
437 if (live_cc_data(s)) {
438 tcg_gen_discard_i64(cc_src);
439 tcg_gen_discard_i64(cc_dst);
440 tcg_gen_discard_i64(cc_vr);
442 s->cc_op = CC_OP_STATIC;
445 /* calculates cc into cc_op */
446 static void gen_op_calc_cc(DisasContext *s)
448 TCGv_i32 local_cc_op;
449 TCGv_i64 dummy;
451 TCGV_UNUSED_I32(local_cc_op);
452 TCGV_UNUSED_I64(dummy);
453 switch (s->cc_op) {
454 default:
455 dummy = tcg_const_i64(0);
456 /* FALLTHRU */
457 case CC_OP_ADD_64:
458 case CC_OP_ADDU_64:
459 case CC_OP_ADDC_64:
460 case CC_OP_SUB_64:
461 case CC_OP_SUBU_64:
462 case CC_OP_SUBB_64:
463 case CC_OP_ADD_32:
464 case CC_OP_ADDU_32:
465 case CC_OP_ADDC_32:
466 case CC_OP_SUB_32:
467 case CC_OP_SUBU_32:
468 case CC_OP_SUBB_32:
469 local_cc_op = tcg_const_i32(s->cc_op);
470 break;
471 case CC_OP_CONST0:
472 case CC_OP_CONST1:
473 case CC_OP_CONST2:
474 case CC_OP_CONST3:
475 case CC_OP_STATIC:
476 case CC_OP_DYNAMIC:
477 break;
480 switch (s->cc_op) {
481 case CC_OP_CONST0:
482 case CC_OP_CONST1:
483 case CC_OP_CONST2:
484 case CC_OP_CONST3:
485 /* s->cc_op is the cc value */
486 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
487 break;
488 case CC_OP_STATIC:
489 /* env->cc_op already is the cc value */
490 break;
491 case CC_OP_NZ:
492 case CC_OP_ABS_64:
493 case CC_OP_NABS_64:
494 case CC_OP_ABS_32:
495 case CC_OP_NABS_32:
496 case CC_OP_LTGT0_32:
497 case CC_OP_LTGT0_64:
498 case CC_OP_COMP_32:
499 case CC_OP_COMP_64:
500 case CC_OP_NZ_F32:
501 case CC_OP_NZ_F64:
502 case CC_OP_FLOGR:
503 /* 1 argument */
504 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
505 break;
506 case CC_OP_ICM:
507 case CC_OP_LTGT_32:
508 case CC_OP_LTGT_64:
509 case CC_OP_LTUGTU_32:
510 case CC_OP_LTUGTU_64:
511 case CC_OP_TM_32:
512 case CC_OP_TM_64:
513 case CC_OP_SLA_32:
514 case CC_OP_SLA_64:
515 case CC_OP_NZ_F128:
516 /* 2 arguments */
517 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
518 break;
519 case CC_OP_ADD_64:
520 case CC_OP_ADDU_64:
521 case CC_OP_ADDC_64:
522 case CC_OP_SUB_64:
523 case CC_OP_SUBU_64:
524 case CC_OP_SUBB_64:
525 case CC_OP_ADD_32:
526 case CC_OP_ADDU_32:
527 case CC_OP_ADDC_32:
528 case CC_OP_SUB_32:
529 case CC_OP_SUBU_32:
530 case CC_OP_SUBB_32:
531 /* 3 arguments */
532 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
533 break;
534 case CC_OP_DYNAMIC:
535 /* unknown operation - assume 3 arguments and cc_op in env */
536 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
537 break;
538 default:
539 tcg_abort();
542 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
543 tcg_temp_free_i32(local_cc_op);
545 if (!TCGV_IS_UNUSED_I64(dummy)) {
546 tcg_temp_free_i64(dummy);
549 /* We now have cc in cc_op as constant */
550 set_cc_static(s);
553 static bool use_exit_tb(DisasContext *s)
555 return (s->singlestep_enabled ||
556 (s->tb->cflags & CF_LAST_IO) ||
557 (s->tb->flags & FLAG_MASK_PER));
560 static bool use_goto_tb(DisasContext *s, uint64_t dest)
562 if (unlikely(use_exit_tb(s))) {
563 return false;
565 #ifndef CONFIG_USER_ONLY
566 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
567 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
568 #else
569 return true;
570 #endif
573 static void account_noninline_branch(DisasContext *s, int cc_op)
575 #ifdef DEBUG_INLINE_BRANCHES
576 inline_branch_miss[cc_op]++;
577 #endif
580 static void account_inline_branch(DisasContext *s, int cc_op)
582 #ifdef DEBUG_INLINE_BRANCHES
583 inline_branch_hit[cc_op]++;
584 #endif
587 /* Table of mask values to comparison codes, given a comparison as input.
588 For such, CC=3 should not be possible. */
589 static const TCGCond ltgt_cond[16] = {
590 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
591 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
592 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
593 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
594 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
595 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
596 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
597 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
600 /* Table of mask values to comparison codes, given a logic op as input.
601 For such, only CC=0 and CC=1 should be possible. */
602 static const TCGCond nz_cond[16] = {
603 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
604 TCG_COND_NEVER, TCG_COND_NEVER,
605 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
606 TCG_COND_NE, TCG_COND_NE,
607 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
608 TCG_COND_EQ, TCG_COND_EQ,
609 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
610 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
613 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
614 details required to generate a TCG comparison. */
615 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
617 TCGCond cond;
618 enum cc_op old_cc_op = s->cc_op;
620 if (mask == 15 || mask == 0) {
621 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
622 c->u.s32.a = cc_op;
623 c->u.s32.b = cc_op;
624 c->g1 = c->g2 = true;
625 c->is_64 = false;
626 return;
629 /* Find the TCG condition for the mask + cc op. */
630 switch (old_cc_op) {
631 case CC_OP_LTGT0_32:
632 case CC_OP_LTGT0_64:
633 case CC_OP_LTGT_32:
634 case CC_OP_LTGT_64:
635 cond = ltgt_cond[mask];
636 if (cond == TCG_COND_NEVER) {
637 goto do_dynamic;
639 account_inline_branch(s, old_cc_op);
640 break;
642 case CC_OP_LTUGTU_32:
643 case CC_OP_LTUGTU_64:
644 cond = tcg_unsigned_cond(ltgt_cond[mask]);
645 if (cond == TCG_COND_NEVER) {
646 goto do_dynamic;
648 account_inline_branch(s, old_cc_op);
649 break;
651 case CC_OP_NZ:
652 cond = nz_cond[mask];
653 if (cond == TCG_COND_NEVER) {
654 goto do_dynamic;
656 account_inline_branch(s, old_cc_op);
657 break;
659 case CC_OP_TM_32:
660 case CC_OP_TM_64:
661 switch (mask) {
662 case 8:
663 cond = TCG_COND_EQ;
664 break;
665 case 4 | 2 | 1:
666 cond = TCG_COND_NE;
667 break;
668 default:
669 goto do_dynamic;
671 account_inline_branch(s, old_cc_op);
672 break;
674 case CC_OP_ICM:
675 switch (mask) {
676 case 8:
677 cond = TCG_COND_EQ;
678 break;
679 case 4 | 2 | 1:
680 case 4 | 2:
681 cond = TCG_COND_NE;
682 break;
683 default:
684 goto do_dynamic;
686 account_inline_branch(s, old_cc_op);
687 break;
689 case CC_OP_FLOGR:
690 switch (mask & 0xa) {
691 case 8: /* src == 0 -> no one bit found */
692 cond = TCG_COND_EQ;
693 break;
694 case 2: /* src != 0 -> one bit found */
695 cond = TCG_COND_NE;
696 break;
697 default:
698 goto do_dynamic;
700 account_inline_branch(s, old_cc_op);
701 break;
703 case CC_OP_ADDU_32:
704 case CC_OP_ADDU_64:
705 switch (mask) {
706 case 8 | 2: /* vr == 0 */
707 cond = TCG_COND_EQ;
708 break;
709 case 4 | 1: /* vr != 0 */
710 cond = TCG_COND_NE;
711 break;
712 case 8 | 4: /* no carry -> vr >= src */
713 cond = TCG_COND_GEU;
714 break;
715 case 2 | 1: /* carry -> vr < src */
716 cond = TCG_COND_LTU;
717 break;
718 default:
719 goto do_dynamic;
721 account_inline_branch(s, old_cc_op);
722 break;
724 case CC_OP_SUBU_32:
725 case CC_OP_SUBU_64:
726 /* Note that CC=0 is impossible; treat it as dont-care. */
727 switch (mask & 7) {
728 case 2: /* zero -> op1 == op2 */
729 cond = TCG_COND_EQ;
730 break;
731 case 4 | 1: /* !zero -> op1 != op2 */
732 cond = TCG_COND_NE;
733 break;
734 case 4: /* borrow (!carry) -> op1 < op2 */
735 cond = TCG_COND_LTU;
736 break;
737 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
738 cond = TCG_COND_GEU;
739 break;
740 default:
741 goto do_dynamic;
743 account_inline_branch(s, old_cc_op);
744 break;
746 default:
747 do_dynamic:
748 /* Calculate cc value. */
749 gen_op_calc_cc(s);
750 /* FALLTHRU */
752 case CC_OP_STATIC:
753 /* Jump based on CC. We'll load up the real cond below;
754 the assignment here merely avoids a compiler warning. */
755 account_noninline_branch(s, old_cc_op);
756 old_cc_op = CC_OP_STATIC;
757 cond = TCG_COND_NEVER;
758 break;
761 /* Load up the arguments of the comparison. */
762 c->is_64 = true;
763 c->g1 = c->g2 = false;
764 switch (old_cc_op) {
765 case CC_OP_LTGT0_32:
766 c->is_64 = false;
767 c->u.s32.a = tcg_temp_new_i32();
768 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
769 c->u.s32.b = tcg_const_i32(0);
770 break;
771 case CC_OP_LTGT_32:
772 case CC_OP_LTUGTU_32:
773 case CC_OP_SUBU_32:
774 c->is_64 = false;
775 c->u.s32.a = tcg_temp_new_i32();
776 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
777 c->u.s32.b = tcg_temp_new_i32();
778 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
779 break;
781 case CC_OP_LTGT0_64:
782 case CC_OP_NZ:
783 case CC_OP_FLOGR:
784 c->u.s64.a = cc_dst;
785 c->u.s64.b = tcg_const_i64(0);
786 c->g1 = true;
787 break;
788 case CC_OP_LTGT_64:
789 case CC_OP_LTUGTU_64:
790 case CC_OP_SUBU_64:
791 c->u.s64.a = cc_src;
792 c->u.s64.b = cc_dst;
793 c->g1 = c->g2 = true;
794 break;
796 case CC_OP_TM_32:
797 case CC_OP_TM_64:
798 case CC_OP_ICM:
799 c->u.s64.a = tcg_temp_new_i64();
800 c->u.s64.b = tcg_const_i64(0);
801 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
802 break;
804 case CC_OP_ADDU_32:
805 c->is_64 = false;
806 c->u.s32.a = tcg_temp_new_i32();
807 c->u.s32.b = tcg_temp_new_i32();
808 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
809 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
810 tcg_gen_movi_i32(c->u.s32.b, 0);
811 } else {
812 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
814 break;
816 case CC_OP_ADDU_64:
817 c->u.s64.a = cc_vr;
818 c->g1 = true;
819 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
820 c->u.s64.b = tcg_const_i64(0);
821 } else {
822 c->u.s64.b = cc_src;
823 c->g2 = true;
825 break;
827 case CC_OP_STATIC:
828 c->is_64 = false;
829 c->u.s32.a = cc_op;
830 c->g1 = true;
831 switch (mask) {
832 case 0x8 | 0x4 | 0x2: /* cc != 3 */
833 cond = TCG_COND_NE;
834 c->u.s32.b = tcg_const_i32(3);
835 break;
836 case 0x8 | 0x4 | 0x1: /* cc != 2 */
837 cond = TCG_COND_NE;
838 c->u.s32.b = tcg_const_i32(2);
839 break;
840 case 0x8 | 0x2 | 0x1: /* cc != 1 */
841 cond = TCG_COND_NE;
842 c->u.s32.b = tcg_const_i32(1);
843 break;
844 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
845 cond = TCG_COND_EQ;
846 c->g1 = false;
847 c->u.s32.a = tcg_temp_new_i32();
848 c->u.s32.b = tcg_const_i32(0);
849 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
850 break;
851 case 0x8 | 0x4: /* cc < 2 */
852 cond = TCG_COND_LTU;
853 c->u.s32.b = tcg_const_i32(2);
854 break;
855 case 0x8: /* cc == 0 */
856 cond = TCG_COND_EQ;
857 c->u.s32.b = tcg_const_i32(0);
858 break;
859 case 0x4 | 0x2 | 0x1: /* cc != 0 */
860 cond = TCG_COND_NE;
861 c->u.s32.b = tcg_const_i32(0);
862 break;
863 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
864 cond = TCG_COND_NE;
865 c->g1 = false;
866 c->u.s32.a = tcg_temp_new_i32();
867 c->u.s32.b = tcg_const_i32(0);
868 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
869 break;
870 case 0x4: /* cc == 1 */
871 cond = TCG_COND_EQ;
872 c->u.s32.b = tcg_const_i32(1);
873 break;
874 case 0x2 | 0x1: /* cc > 1 */
875 cond = TCG_COND_GTU;
876 c->u.s32.b = tcg_const_i32(1);
877 break;
878 case 0x2: /* cc == 2 */
879 cond = TCG_COND_EQ;
880 c->u.s32.b = tcg_const_i32(2);
881 break;
882 case 0x1: /* cc == 3 */
883 cond = TCG_COND_EQ;
884 c->u.s32.b = tcg_const_i32(3);
885 break;
886 default:
887 /* CC is masked by something else: (8 >> cc) & mask. */
888 cond = TCG_COND_NE;
889 c->g1 = false;
890 c->u.s32.a = tcg_const_i32(8);
891 c->u.s32.b = tcg_const_i32(0);
892 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
893 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
894 break;
896 break;
898 default:
899 abort();
901 c->cond = cond;
904 static void free_compare(DisasCompare *c)
906 if (!c->g1) {
907 if (c->is_64) {
908 tcg_temp_free_i64(c->u.s64.a);
909 } else {
910 tcg_temp_free_i32(c->u.s32.a);
913 if (!c->g2) {
914 if (c->is_64) {
915 tcg_temp_free_i64(c->u.s64.b);
916 } else {
917 tcg_temp_free_i32(c->u.s32.b);
922 /* ====================================================================== */
923 /* Define the insn format enumeration. */
924 #define F0(N) FMT_##N,
925 #define F1(N, X1) F0(N)
926 #define F2(N, X1, X2) F0(N)
927 #define F3(N, X1, X2, X3) F0(N)
928 #define F4(N, X1, X2, X3, X4) F0(N)
929 #define F5(N, X1, X2, X3, X4, X5) F0(N)
931 typedef enum {
932 #include "insn-format.def"
933 } DisasFormat;
935 #undef F0
936 #undef F1
937 #undef F2
938 #undef F3
939 #undef F4
940 #undef F5
942 /* Define a structure to hold the decoded fields. We'll store each inside
943 an array indexed by an enum. In order to conserve memory, we'll arrange
944 for fields that do not exist at the same time to overlap, thus the "C"
945 for compact. For checking purposes there is an "O" for original index
946 as well that will be applied to availability bitmaps. */
948 enum DisasFieldIndexO {
949 FLD_O_r1,
950 FLD_O_r2,
951 FLD_O_r3,
952 FLD_O_m1,
953 FLD_O_m3,
954 FLD_O_m4,
955 FLD_O_b1,
956 FLD_O_b2,
957 FLD_O_b4,
958 FLD_O_d1,
959 FLD_O_d2,
960 FLD_O_d4,
961 FLD_O_x2,
962 FLD_O_l1,
963 FLD_O_l2,
964 FLD_O_i1,
965 FLD_O_i2,
966 FLD_O_i3,
967 FLD_O_i4,
968 FLD_O_i5
971 enum DisasFieldIndexC {
972 FLD_C_r1 = 0,
973 FLD_C_m1 = 0,
974 FLD_C_b1 = 0,
975 FLD_C_i1 = 0,
977 FLD_C_r2 = 1,
978 FLD_C_b2 = 1,
979 FLD_C_i2 = 1,
981 FLD_C_r3 = 2,
982 FLD_C_m3 = 2,
983 FLD_C_i3 = 2,
985 FLD_C_m4 = 3,
986 FLD_C_b4 = 3,
987 FLD_C_i4 = 3,
988 FLD_C_l1 = 3,
990 FLD_C_i5 = 4,
991 FLD_C_d1 = 4,
993 FLD_C_d2 = 5,
995 FLD_C_d4 = 6,
996 FLD_C_x2 = 6,
997 FLD_C_l2 = 6,
999 NUM_C_FIELD = 7
1002 struct DisasFields {
1003 uint64_t raw_insn;
1004 unsigned op:8;
1005 unsigned op2:8;
1006 unsigned presentC:16;
1007 unsigned int presentO;
1008 int c[NUM_C_FIELD];
1011 /* This is the way fields are to be accessed out of DisasFields. */
1012 #define have_field(S, F) have_field1((S), FLD_O_##F)
1013 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1015 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1017 return (f->presentO >> c) & 1;
1020 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1021 enum DisasFieldIndexC c)
1023 assert(have_field1(f, o));
1024 return f->c[c];
1027 /* Describe the layout of each field in each format. */
1028 typedef struct DisasField {
1029 unsigned int beg:8;
1030 unsigned int size:8;
1031 unsigned int type:2;
1032 unsigned int indexC:6;
1033 enum DisasFieldIndexO indexO:8;
1034 } DisasField;
1036 typedef struct DisasFormatInfo {
1037 DisasField op[NUM_C_FIELD];
1038 } DisasFormatInfo;
1040 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1041 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1042 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1044 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1047 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1048 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1049 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1051 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1052 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1053 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1055 #define F0(N) { { } },
1056 #define F1(N, X1) { { X1 } },
1057 #define F2(N, X1, X2) { { X1, X2 } },
1058 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1059 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1060 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1062 static const DisasFormatInfo format_info[] = {
1063 #include "insn-format.def"
1066 #undef F0
1067 #undef F1
1068 #undef F2
1069 #undef F3
1070 #undef F4
1071 #undef F5
1072 #undef R
1073 #undef M
1074 #undef BD
1075 #undef BXD
1076 #undef BDL
1077 #undef BXDL
1078 #undef I
1079 #undef L
1081 /* Generally, we'll extract operands into this structures, operate upon
1082 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1083 of routines below for more details. */
1084 typedef struct {
1085 bool g_out, g_out2, g_in1, g_in2;
1086 TCGv_i64 out, out2, in1, in2;
1087 TCGv_i64 addr1;
1088 } DisasOps;
1090 /* Instructions can place constraints on their operands, raising specification
1091 exceptions if they are violated. To make this easy to automate, each "in1",
1092 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1093 of the following, or 0. To make this easy to document, we'll put the
1094 SPEC_<name> defines next to <name>. */
1096 #define SPEC_r1_even 1
1097 #define SPEC_r2_even 2
1098 #define SPEC_r3_even 4
1099 #define SPEC_r1_f128 8
1100 #define SPEC_r2_f128 16
1102 /* Return values from translate_one, indicating the state of the TB. */
1103 typedef enum {
1104 /* Continue the TB. */
1105 NO_EXIT,
1106 /* We have emitted one or more goto_tb. No fixup required. */
1107 EXIT_GOTO_TB,
1108 /* We are not using a goto_tb (for whatever reason), but have updated
1109 the PC (for whatever reason), so there's no need to do it again on
1110 exiting the TB. */
1111 EXIT_PC_UPDATED,
1112 /* We have updated the PC and CC values. */
1113 EXIT_PC_CC_UPDATED,
1114 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1115 updated the PC for the next instruction to be executed. */
1116 EXIT_PC_STALE,
1117 /* We are exiting the TB to the main loop. */
1118 EXIT_PC_STALE_NOCHAIN,
1119 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1120 No following code will be executed. */
1121 EXIT_NORETURN,
1122 } ExitStatus;
1124 struct DisasInsn {
1125 unsigned opc:16;
1126 DisasFormat fmt:8;
1127 unsigned fac:8;
1128 unsigned spec:8;
1130 const char *name;
1132 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_cout)(DisasContext *, DisasOps *);
1137 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1139 uint64_t data;
1142 /* ====================================================================== */
1143 /* Miscellaneous helpers, used by several operations. */
1145 static void help_l2_shift(DisasContext *s, DisasFields *f,
1146 DisasOps *o, int mask)
1148 int b2 = get_field(f, b2);
1149 int d2 = get_field(f, d2);
1151 if (b2 == 0) {
1152 o->in2 = tcg_const_i64(d2 & mask);
1153 } else {
1154 o->in2 = get_address(s, 0, b2, d2);
1155 tcg_gen_andi_i64(o->in2, o->in2, mask);
1159 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1161 if (dest == s->next_pc) {
1162 per_branch(s, true);
1163 return NO_EXIT;
1165 if (use_goto_tb(s, dest)) {
1166 update_cc_op(s);
1167 per_breaking_event(s);
1168 tcg_gen_goto_tb(0);
1169 tcg_gen_movi_i64(psw_addr, dest);
1170 tcg_gen_exit_tb((uintptr_t)s->tb);
1171 return EXIT_GOTO_TB;
1172 } else {
1173 tcg_gen_movi_i64(psw_addr, dest);
1174 per_branch(s, false);
1175 return EXIT_PC_UPDATED;
1179 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1180 bool is_imm, int imm, TCGv_i64 cdest)
1182 ExitStatus ret;
1183 uint64_t dest = s->pc + 2 * imm;
1184 TCGLabel *lab;
1186 /* Take care of the special cases first. */
1187 if (c->cond == TCG_COND_NEVER) {
1188 ret = NO_EXIT;
1189 goto egress;
1191 if (is_imm) {
1192 if (dest == s->next_pc) {
1193 /* Branch to next. */
1194 per_branch(s, true);
1195 ret = NO_EXIT;
1196 goto egress;
1198 if (c->cond == TCG_COND_ALWAYS) {
1199 ret = help_goto_direct(s, dest);
1200 goto egress;
1202 } else {
1203 if (TCGV_IS_UNUSED_I64(cdest)) {
1204 /* E.g. bcr %r0 -> no branch. */
1205 ret = NO_EXIT;
1206 goto egress;
1208 if (c->cond == TCG_COND_ALWAYS) {
1209 tcg_gen_mov_i64(psw_addr, cdest);
1210 per_branch(s, false);
1211 ret = EXIT_PC_UPDATED;
1212 goto egress;
1216 if (use_goto_tb(s, s->next_pc)) {
1217 if (is_imm && use_goto_tb(s, dest)) {
1218 /* Both exits can use goto_tb. */
1219 update_cc_op(s);
1221 lab = gen_new_label();
1222 if (c->is_64) {
1223 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224 } else {
1225 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1228 /* Branch not taken. */
1229 tcg_gen_goto_tb(0);
1230 tcg_gen_movi_i64(psw_addr, s->next_pc);
1231 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1233 /* Branch taken. */
1234 gen_set_label(lab);
1235 per_breaking_event(s);
1236 tcg_gen_goto_tb(1);
1237 tcg_gen_movi_i64(psw_addr, dest);
1238 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1240 ret = EXIT_GOTO_TB;
1241 } else {
1242 /* Fallthru can use goto_tb, but taken branch cannot. */
1243 /* Store taken branch destination before the brcond. This
1244 avoids having to allocate a new local temp to hold it.
1245 We'll overwrite this in the not taken case anyway. */
1246 if (!is_imm) {
1247 tcg_gen_mov_i64(psw_addr, cdest);
1250 lab = gen_new_label();
1251 if (c->is_64) {
1252 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1253 } else {
1254 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1257 /* Branch not taken. */
1258 update_cc_op(s);
1259 tcg_gen_goto_tb(0);
1260 tcg_gen_movi_i64(psw_addr, s->next_pc);
1261 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1263 gen_set_label(lab);
1264 if (is_imm) {
1265 tcg_gen_movi_i64(psw_addr, dest);
1267 per_breaking_event(s);
1268 ret = EXIT_PC_UPDATED;
1270 } else {
1271 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1272 Most commonly we're single-stepping or some other condition that
1273 disables all use of goto_tb. Just update the PC and exit. */
1275 TCGv_i64 next = tcg_const_i64(s->next_pc);
1276 if (is_imm) {
1277 cdest = tcg_const_i64(dest);
1280 if (c->is_64) {
1281 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1282 cdest, next);
1283 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1284 } else {
1285 TCGv_i32 t0 = tcg_temp_new_i32();
1286 TCGv_i64 t1 = tcg_temp_new_i64();
1287 TCGv_i64 z = tcg_const_i64(0);
1288 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1289 tcg_gen_extu_i32_i64(t1, t0);
1290 tcg_temp_free_i32(t0);
1291 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1292 per_branch_cond(s, TCG_COND_NE, t1, z);
1293 tcg_temp_free_i64(t1);
1294 tcg_temp_free_i64(z);
1297 if (is_imm) {
1298 tcg_temp_free_i64(cdest);
1300 tcg_temp_free_i64(next);
1302 ret = EXIT_PC_UPDATED;
1305 egress:
1306 free_compare(c);
1307 return ret;
1310 /* ====================================================================== */
1311 /* The operations. These perform the bulk of the work for any insn,
1312 usually after the operands have been loaded and output initialized. */
1314 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1316 TCGv_i64 z, n;
1317 z = tcg_const_i64(0);
1318 n = tcg_temp_new_i64();
1319 tcg_gen_neg_i64(n, o->in2);
1320 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1321 tcg_temp_free_i64(n);
1322 tcg_temp_free_i64(z);
1323 return NO_EXIT;
1326 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1328 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1329 return NO_EXIT;
1332 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1334 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1335 return NO_EXIT;
1338 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1340 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1341 tcg_gen_mov_i64(o->out2, o->in2);
1342 return NO_EXIT;
1345 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1347 tcg_gen_add_i64(o->out, o->in1, o->in2);
1348 return NO_EXIT;
1351 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1353 DisasCompare cmp;
1354 TCGv_i64 carry;
1356 tcg_gen_add_i64(o->out, o->in1, o->in2);
1358 /* The carry flag is the msb of CC, therefore the branch mask that would
1359 create that comparison is 3. Feeding the generated comparison to
1360 setcond produces the carry flag that we desire. */
1361 disas_jcc(s, &cmp, 3);
1362 carry = tcg_temp_new_i64();
1363 if (cmp.is_64) {
1364 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1365 } else {
1366 TCGv_i32 t = tcg_temp_new_i32();
1367 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1368 tcg_gen_extu_i32_i64(carry, t);
1369 tcg_temp_free_i32(t);
1371 free_compare(&cmp);
1373 tcg_gen_add_i64(o->out, o->out, carry);
1374 tcg_temp_free_i64(carry);
1375 return NO_EXIT;
1378 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1380 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1381 return NO_EXIT;
1384 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1386 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1387 return NO_EXIT;
1390 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1392 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1393 return_low128(o->out2);
1394 return NO_EXIT;
1397 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1399 tcg_gen_and_i64(o->out, o->in1, o->in2);
1400 return NO_EXIT;
1403 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1405 int shift = s->insn->data & 0xff;
1406 int size = s->insn->data >> 8;
1407 uint64_t mask = ((1ull << size) - 1) << shift;
1409 assert(!o->g_in2);
1410 tcg_gen_shli_i64(o->in2, o->in2, shift);
1411 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1412 tcg_gen_and_i64(o->out, o->in1, o->in2);
1414 /* Produce the CC from only the bits manipulated. */
1415 tcg_gen_andi_i64(cc_dst, o->out, mask);
1416 set_cc_nz_u64(s, cc_dst);
1417 return NO_EXIT;
1420 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1422 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1423 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1424 tcg_gen_mov_i64(psw_addr, o->in2);
1425 per_branch(s, false);
1426 return EXIT_PC_UPDATED;
1427 } else {
1428 return NO_EXIT;
1432 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1434 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1435 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1438 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1440 int m1 = get_field(s->fields, m1);
1441 bool is_imm = have_field(s->fields, i2);
1442 int imm = is_imm ? get_field(s->fields, i2) : 0;
1443 DisasCompare c;
1445 /* BCR with R2 = 0 causes no branching */
1446 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1447 if (m1 == 14) {
1448 /* Perform serialization */
1449 /* FIXME: check for fast-BCR-serialization facility */
1450 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1452 if (m1 == 15) {
1453 /* Perform serialization */
1454 /* FIXME: perform checkpoint-synchronisation */
1455 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1457 return NO_EXIT;
1460 disas_jcc(s, &c, m1);
1461 return help_branch(s, &c, is_imm, imm, o->in2);
1464 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1466 int r1 = get_field(s->fields, r1);
1467 bool is_imm = have_field(s->fields, i2);
1468 int imm = is_imm ? get_field(s->fields, i2) : 0;
1469 DisasCompare c;
1470 TCGv_i64 t;
1472 c.cond = TCG_COND_NE;
1473 c.is_64 = false;
1474 c.g1 = false;
1475 c.g2 = false;
1477 t = tcg_temp_new_i64();
1478 tcg_gen_subi_i64(t, regs[r1], 1);
1479 store_reg32_i64(r1, t);
1480 c.u.s32.a = tcg_temp_new_i32();
1481 c.u.s32.b = tcg_const_i32(0);
1482 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1483 tcg_temp_free_i64(t);
1485 return help_branch(s, &c, is_imm, imm, o->in2);
1488 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1490 int r1 = get_field(s->fields, r1);
1491 int imm = get_field(s->fields, i2);
1492 DisasCompare c;
1493 TCGv_i64 t;
1495 c.cond = TCG_COND_NE;
1496 c.is_64 = false;
1497 c.g1 = false;
1498 c.g2 = false;
1500 t = tcg_temp_new_i64();
1501 tcg_gen_shri_i64(t, regs[r1], 32);
1502 tcg_gen_subi_i64(t, t, 1);
1503 store_reg32h_i64(r1, t);
1504 c.u.s32.a = tcg_temp_new_i32();
1505 c.u.s32.b = tcg_const_i32(0);
1506 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1507 tcg_temp_free_i64(t);
1509 return help_branch(s, &c, 1, imm, o->in2);
1512 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1514 int r1 = get_field(s->fields, r1);
1515 bool is_imm = have_field(s->fields, i2);
1516 int imm = is_imm ? get_field(s->fields, i2) : 0;
1517 DisasCompare c;
1519 c.cond = TCG_COND_NE;
1520 c.is_64 = true;
1521 c.g1 = true;
1522 c.g2 = false;
1524 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1525 c.u.s64.a = regs[r1];
1526 c.u.s64.b = tcg_const_i64(0);
1528 return help_branch(s, &c, is_imm, imm, o->in2);
1531 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1533 int r1 = get_field(s->fields, r1);
1534 int r3 = get_field(s->fields, r3);
1535 bool is_imm = have_field(s->fields, i2);
1536 int imm = is_imm ? get_field(s->fields, i2) : 0;
1537 DisasCompare c;
1538 TCGv_i64 t;
1540 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1541 c.is_64 = false;
1542 c.g1 = false;
1543 c.g2 = false;
1545 t = tcg_temp_new_i64();
1546 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1547 c.u.s32.a = tcg_temp_new_i32();
1548 c.u.s32.b = tcg_temp_new_i32();
1549 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1550 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1551 store_reg32_i64(r1, t);
1552 tcg_temp_free_i64(t);
1554 return help_branch(s, &c, is_imm, imm, o->in2);
1557 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1559 int r1 = get_field(s->fields, r1);
1560 int r3 = get_field(s->fields, r3);
1561 bool is_imm = have_field(s->fields, i2);
1562 int imm = is_imm ? get_field(s->fields, i2) : 0;
1563 DisasCompare c;
1565 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1566 c.is_64 = true;
1568 if (r1 == (r3 | 1)) {
1569 c.u.s64.b = load_reg(r3 | 1);
1570 c.g2 = false;
1571 } else {
1572 c.u.s64.b = regs[r3 | 1];
1573 c.g2 = true;
1576 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1577 c.u.s64.a = regs[r1];
1578 c.g1 = true;
1580 return help_branch(s, &c, is_imm, imm, o->in2);
1583 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1585 int imm, m3 = get_field(s->fields, m3);
1586 bool is_imm;
1587 DisasCompare c;
1589 c.cond = ltgt_cond[m3];
1590 if (s->insn->data) {
1591 c.cond = tcg_unsigned_cond(c.cond);
1593 c.is_64 = c.g1 = c.g2 = true;
1594 c.u.s64.a = o->in1;
1595 c.u.s64.b = o->in2;
1597 is_imm = have_field(s->fields, i4);
1598 if (is_imm) {
1599 imm = get_field(s->fields, i4);
1600 } else {
1601 imm = 0;
1602 o->out = get_address(s, 0, get_field(s->fields, b4),
1603 get_field(s->fields, d4));
1606 return help_branch(s, &c, is_imm, imm, o->out);
1609 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1611 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1612 set_cc_static(s);
1613 return NO_EXIT;
1616 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1618 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1619 set_cc_static(s);
1620 return NO_EXIT;
1623 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1625 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1626 set_cc_static(s);
1627 return NO_EXIT;
1630 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1632 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1633 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1634 tcg_temp_free_i32(m3);
1635 gen_set_cc_nz_f32(s, o->in2);
1636 return NO_EXIT;
1639 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1641 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1642 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1643 tcg_temp_free_i32(m3);
1644 gen_set_cc_nz_f64(s, o->in2);
1645 return NO_EXIT;
1648 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1650 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1651 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1652 tcg_temp_free_i32(m3);
1653 gen_set_cc_nz_f128(s, o->in1, o->in2);
1654 return NO_EXIT;
1657 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1659 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1660 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1661 tcg_temp_free_i32(m3);
1662 gen_set_cc_nz_f32(s, o->in2);
1663 return NO_EXIT;
1666 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1668 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1669 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1670 tcg_temp_free_i32(m3);
1671 gen_set_cc_nz_f64(s, o->in2);
1672 return NO_EXIT;
1675 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1677 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1678 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1679 tcg_temp_free_i32(m3);
1680 gen_set_cc_nz_f128(s, o->in1, o->in2);
1681 return NO_EXIT;
1684 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1686 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1687 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1688 tcg_temp_free_i32(m3);
1689 gen_set_cc_nz_f32(s, o->in2);
1690 return NO_EXIT;
1693 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1695 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1696 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1697 tcg_temp_free_i32(m3);
1698 gen_set_cc_nz_f64(s, o->in2);
1699 return NO_EXIT;
1702 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1704 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1705 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1706 tcg_temp_free_i32(m3);
1707 gen_set_cc_nz_f128(s, o->in1, o->in2);
1708 return NO_EXIT;
1711 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1713 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1714 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1715 tcg_temp_free_i32(m3);
1716 gen_set_cc_nz_f32(s, o->in2);
1717 return NO_EXIT;
1720 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1722 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1723 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1724 tcg_temp_free_i32(m3);
1725 gen_set_cc_nz_f64(s, o->in2);
1726 return NO_EXIT;
1729 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1731 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1732 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1733 tcg_temp_free_i32(m3);
1734 gen_set_cc_nz_f128(s, o->in1, o->in2);
1735 return NO_EXIT;
1738 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1740 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1741 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1742 tcg_temp_free_i32(m3);
1743 return NO_EXIT;
1746 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1748 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1749 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1750 tcg_temp_free_i32(m3);
1751 return NO_EXIT;
1754 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1756 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1757 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1758 tcg_temp_free_i32(m3);
1759 return_low128(o->out2);
1760 return NO_EXIT;
1763 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1765 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1766 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1767 tcg_temp_free_i32(m3);
1768 return NO_EXIT;
1771 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1776 return NO_EXIT;
1779 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 return_low128(o->out2);
1785 return NO_EXIT;
1788 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1790 int r2 = get_field(s->fields, r2);
1791 TCGv_i64 len = tcg_temp_new_i64();
1793 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1794 set_cc_static(s);
1795 return_low128(o->out);
1797 tcg_gen_add_i64(regs[r2], regs[r2], len);
1798 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1799 tcg_temp_free_i64(len);
1801 return NO_EXIT;
1804 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1806 int l = get_field(s->fields, l1);
1807 TCGv_i32 vl;
1809 switch (l + 1) {
1810 case 1:
1811 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1812 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1813 break;
1814 case 2:
1815 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1816 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1817 break;
1818 case 4:
1819 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1820 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1821 break;
1822 case 8:
1823 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1824 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1825 break;
1826 default:
1827 vl = tcg_const_i32(l);
1828 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1829 tcg_temp_free_i32(vl);
1830 set_cc_static(s);
1831 return NO_EXIT;
1833 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1834 return NO_EXIT;
1837 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1839 int r1 = get_field(s->fields, r1);
1840 int r2 = get_field(s->fields, r2);
1841 TCGv_i32 t1, t2;
1843 /* r1 and r2 must be even. */
1844 if (r1 & 1 || r2 & 1) {
1845 gen_program_exception(s, PGM_SPECIFICATION);
1846 return EXIT_NORETURN;
1849 t1 = tcg_const_i32(r1);
1850 t2 = tcg_const_i32(r2);
1851 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1852 tcg_temp_free_i32(t1);
1853 tcg_temp_free_i32(t2);
1854 set_cc_static(s);
1855 return NO_EXIT;
1858 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1860 int r1 = get_field(s->fields, r1);
1861 int r3 = get_field(s->fields, r3);
1862 TCGv_i32 t1, t3;
1864 /* r1 and r3 must be even. */
1865 if (r1 & 1 || r3 & 1) {
1866 gen_program_exception(s, PGM_SPECIFICATION);
1867 return EXIT_NORETURN;
1870 t1 = tcg_const_i32(r1);
1871 t3 = tcg_const_i32(r3);
1872 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1873 tcg_temp_free_i32(t1);
1874 tcg_temp_free_i32(t3);
1875 set_cc_static(s);
1876 return NO_EXIT;
1879 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1881 int r1 = get_field(s->fields, r1);
1882 int r3 = get_field(s->fields, r3);
1883 TCGv_i32 t1, t3;
1885 /* r1 and r3 must be even. */
1886 if (r1 & 1 || r3 & 1) {
1887 gen_program_exception(s, PGM_SPECIFICATION);
1888 return EXIT_NORETURN;
1891 t1 = tcg_const_i32(r1);
1892 t3 = tcg_const_i32(r3);
1893 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1894 tcg_temp_free_i32(t1);
1895 tcg_temp_free_i32(t3);
1896 set_cc_static(s);
1897 return NO_EXIT;
1900 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1902 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1903 TCGv_i32 t1 = tcg_temp_new_i32();
1904 tcg_gen_extrl_i64_i32(t1, o->in1);
1905 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1906 set_cc_static(s);
1907 tcg_temp_free_i32(t1);
1908 tcg_temp_free_i32(m3);
1909 return NO_EXIT;
1912 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1914 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1915 set_cc_static(s);
1916 return_low128(o->in2);
1917 return NO_EXIT;
1920 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1922 TCGv_i64 t = tcg_temp_new_i64();
1923 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1924 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1925 tcg_gen_or_i64(o->out, o->out, t);
1926 tcg_temp_free_i64(t);
1927 return NO_EXIT;
1930 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1932 int d2 = get_field(s->fields, d2);
1933 int b2 = get_field(s->fields, b2);
1934 TCGv_i64 addr, cc;
1936 /* Note that in1 = R3 (new value) and
1937 in2 = (zero-extended) R1 (expected value). */
1939 addr = get_address(s, 0, b2, d2);
1940 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1941 get_mem_index(s), s->insn->data | MO_ALIGN);
1942 tcg_temp_free_i64(addr);
1944 /* Are the memory and expected values (un)equal? Note that this setcond
1945 produces the output CC value, thus the NE sense of the test. */
1946 cc = tcg_temp_new_i64();
1947 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1948 tcg_gen_extrl_i64_i32(cc_op, cc);
1949 tcg_temp_free_i64(cc);
1950 set_cc_static(s);
1952 return NO_EXIT;
1955 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1957 int r1 = get_field(s->fields, r1);
1958 int r3 = get_field(s->fields, r3);
1959 int d2 = get_field(s->fields, d2);
1960 int b2 = get_field(s->fields, b2);
1961 TCGv_i64 addr;
1962 TCGv_i32 t_r1, t_r3;
1964 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1965 addr = get_address(s, 0, b2, d2);
1966 t_r1 = tcg_const_i32(r1);
1967 t_r3 = tcg_const_i32(r3);
1968 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
1969 tcg_temp_free_i64(addr);
1970 tcg_temp_free_i32(t_r1);
1971 tcg_temp_free_i32(t_r3);
1973 set_cc_static(s);
1974 return NO_EXIT;
1977 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
1979 int r3 = get_field(s->fields, r3);
1980 TCGv_i32 t_r3 = tcg_const_i32(r3);
1982 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
1983 tcg_temp_free_i32(t_r3);
1985 set_cc_static(s);
1986 return NO_EXIT;
1989 #ifndef CONFIG_USER_ONLY
1990 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1992 TCGMemOp mop = s->insn->data;
1993 TCGv_i64 addr, old, cc;
1994 TCGLabel *lab = gen_new_label();
1996 /* Note that in1 = R1 (zero-extended expected value),
1997 out = R1 (original reg), out2 = R1+1 (new value). */
1999 check_privileged(s);
2000 addr = tcg_temp_new_i64();
2001 old = tcg_temp_new_i64();
2002 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2003 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2004 get_mem_index(s), mop | MO_ALIGN);
2005 tcg_temp_free_i64(addr);
2007 /* Are the memory and expected values (un)equal? */
2008 cc = tcg_temp_new_i64();
2009 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2010 tcg_gen_extrl_i64_i32(cc_op, cc);
2012 /* Write back the output now, so that it happens before the
2013 following branch, so that we don't need local temps. */
2014 if ((mop & MO_SIZE) == MO_32) {
2015 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2016 } else {
2017 tcg_gen_mov_i64(o->out, old);
2019 tcg_temp_free_i64(old);
2021 /* If the comparison was equal, and the LSB of R2 was set,
2022 then we need to flush the TLB (for all cpus). */
2023 tcg_gen_xori_i64(cc, cc, 1);
2024 tcg_gen_and_i64(cc, cc, o->in2);
2025 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2026 tcg_temp_free_i64(cc);
2028 gen_helper_purge(cpu_env);
2029 gen_set_label(lab);
2031 return NO_EXIT;
2033 #endif
2035 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2037 TCGv_i64 t1 = tcg_temp_new_i64();
2038 TCGv_i32 t2 = tcg_temp_new_i32();
2039 tcg_gen_extrl_i64_i32(t2, o->in1);
2040 gen_helper_cvd(t1, t2);
2041 tcg_temp_free_i32(t2);
2042 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2043 tcg_temp_free_i64(t1);
2044 return NO_EXIT;
2047 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2049 int m3 = get_field(s->fields, m3);
2050 TCGLabel *lab = gen_new_label();
2051 TCGCond c;
2053 c = tcg_invert_cond(ltgt_cond[m3]);
2054 if (s->insn->data) {
2055 c = tcg_unsigned_cond(c);
2057 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2059 /* Trap. */
2060 gen_trap(s);
2062 gen_set_label(lab);
2063 return NO_EXIT;
2066 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2068 int m3 = get_field(s->fields, m3);
2069 int r1 = get_field(s->fields, r1);
2070 int r2 = get_field(s->fields, r2);
2071 TCGv_i32 tr1, tr2, chk;
2073 /* R1 and R2 must both be even. */
2074 if ((r1 | r2) & 1) {
2075 gen_program_exception(s, PGM_SPECIFICATION);
2076 return EXIT_NORETURN;
2078 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2079 m3 = 0;
2082 tr1 = tcg_const_i32(r1);
2083 tr2 = tcg_const_i32(r2);
2084 chk = tcg_const_i32(m3);
2086 switch (s->insn->data) {
2087 case 12:
2088 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2089 break;
2090 case 14:
2091 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2092 break;
2093 case 21:
2094 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2095 break;
2096 case 24:
2097 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2098 break;
2099 case 41:
2100 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2101 break;
2102 case 42:
2103 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2104 break;
2105 default:
2106 g_assert_not_reached();
2109 tcg_temp_free_i32(tr1);
2110 tcg_temp_free_i32(tr2);
2111 tcg_temp_free_i32(chk);
2112 set_cc_static(s);
2113 return NO_EXIT;
2116 #ifndef CONFIG_USER_ONLY
2117 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2119 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2120 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2121 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2123 check_privileged(s);
2124 update_psw_addr(s);
2125 gen_op_calc_cc(s);
2127 gen_helper_diag(cpu_env, r1, r3, func_code);
2129 tcg_temp_free_i32(func_code);
2130 tcg_temp_free_i32(r3);
2131 tcg_temp_free_i32(r1);
2132 return NO_EXIT;
2134 #endif
2136 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2138 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2139 return_low128(o->out);
2140 return NO_EXIT;
2143 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2145 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2146 return_low128(o->out);
2147 return NO_EXIT;
2150 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2152 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2153 return_low128(o->out);
2154 return NO_EXIT;
2157 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2159 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2160 return_low128(o->out);
2161 return NO_EXIT;
2164 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2166 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2167 return NO_EXIT;
2170 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2172 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2173 return NO_EXIT;
2176 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2178 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2179 return_low128(o->out2);
2180 return NO_EXIT;
2183 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2185 int r2 = get_field(s->fields, r2);
2186 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2187 return NO_EXIT;
2190 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2192 /* No cache information provided. */
2193 tcg_gen_movi_i64(o->out, -1);
2194 return NO_EXIT;
2197 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2199 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2200 return NO_EXIT;
2203 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2205 int r1 = get_field(s->fields, r1);
2206 int r2 = get_field(s->fields, r2);
2207 TCGv_i64 t = tcg_temp_new_i64();
2209 /* Note the "subsequently" in the PoO, which implies a defined result
2210 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2211 tcg_gen_shri_i64(t, psw_mask, 32);
2212 store_reg32_i64(r1, t);
2213 if (r2 != 0) {
2214 store_reg32_i64(r2, psw_mask);
2217 tcg_temp_free_i64(t);
2218 return NO_EXIT;
2221 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2223 int r1 = get_field(s->fields, r1);
2224 TCGv_i32 ilen;
2225 TCGv_i64 v1;
2227 /* Nested EXECUTE is not allowed. */
2228 if (unlikely(s->ex_value)) {
2229 gen_program_exception(s, PGM_EXECUTE);
2230 return EXIT_NORETURN;
2233 update_psw_addr(s);
2234 update_cc_op(s);
2236 if (r1 == 0) {
2237 v1 = tcg_const_i64(0);
2238 } else {
2239 v1 = regs[r1];
2242 ilen = tcg_const_i32(s->ilen);
2243 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2244 tcg_temp_free_i32(ilen);
2246 if (r1 == 0) {
2247 tcg_temp_free_i64(v1);
2250 return EXIT_PC_CC_UPDATED;
2253 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2255 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2256 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2257 tcg_temp_free_i32(m3);
2258 return NO_EXIT;
2261 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2263 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2264 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2265 tcg_temp_free_i32(m3);
2266 return NO_EXIT;
2269 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2271 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2272 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2273 return_low128(o->out2);
2274 tcg_temp_free_i32(m3);
2275 return NO_EXIT;
2278 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2280 /* We'll use the original input for cc computation, since we get to
2281 compare that against 0, which ought to be better than comparing
2282 the real output against 64. It also lets cc_dst be a convenient
2283 temporary during our computation. */
2284 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2286 /* R1 = IN ? CLZ(IN) : 64. */
2287 tcg_gen_clzi_i64(o->out, o->in2, 64);
2289 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2290 value by 64, which is undefined. But since the shift is 64 iff the
2291 input is zero, we still get the correct result after and'ing. */
2292 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2293 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2294 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2295 return NO_EXIT;
2298 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2300 int m3 = get_field(s->fields, m3);
2301 int pos, len, base = s->insn->data;
2302 TCGv_i64 tmp = tcg_temp_new_i64();
2303 uint64_t ccm;
2305 switch (m3) {
2306 case 0xf:
2307 /* Effectively a 32-bit load. */
2308 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2309 len = 32;
2310 goto one_insert;
2312 case 0xc:
2313 case 0x6:
2314 case 0x3:
2315 /* Effectively a 16-bit load. */
2316 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2317 len = 16;
2318 goto one_insert;
2320 case 0x8:
2321 case 0x4:
2322 case 0x2:
2323 case 0x1:
2324 /* Effectively an 8-bit load. */
2325 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2326 len = 8;
2327 goto one_insert;
2329 one_insert:
2330 pos = base + ctz32(m3) * 8;
2331 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2332 ccm = ((1ull << len) - 1) << pos;
2333 break;
2335 default:
2336 /* This is going to be a sequence of loads and inserts. */
2337 pos = base + 32 - 8;
2338 ccm = 0;
2339 while (m3) {
2340 if (m3 & 0x8) {
2341 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2342 tcg_gen_addi_i64(o->in2, o->in2, 1);
2343 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2344 ccm |= 0xff << pos;
2346 m3 = (m3 << 1) & 0xf;
2347 pos -= 8;
2349 break;
2352 tcg_gen_movi_i64(tmp, ccm);
2353 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2354 tcg_temp_free_i64(tmp);
2355 return NO_EXIT;
2358 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2360 int shift = s->insn->data & 0xff;
2361 int size = s->insn->data >> 8;
2362 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2363 return NO_EXIT;
2366 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2368 TCGv_i64 t1;
2370 gen_op_calc_cc(s);
2371 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2373 t1 = tcg_temp_new_i64();
2374 tcg_gen_shli_i64(t1, psw_mask, 20);
2375 tcg_gen_shri_i64(t1, t1, 36);
2376 tcg_gen_or_i64(o->out, o->out, t1);
2378 tcg_gen_extu_i32_i64(t1, cc_op);
2379 tcg_gen_shli_i64(t1, t1, 28);
2380 tcg_gen_or_i64(o->out, o->out, t1);
2381 tcg_temp_free_i64(t1);
2382 return NO_EXIT;
2385 #ifndef CONFIG_USER_ONLY
2386 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2388 TCGv_i32 m4;
2390 check_privileged(s);
2391 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2392 m4 = tcg_const_i32(get_field(s->fields, m4));
2393 } else {
2394 m4 = tcg_const_i32(0);
2396 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2397 tcg_temp_free_i32(m4);
2398 return NO_EXIT;
2401 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2403 TCGv_i32 m4;
2405 check_privileged(s);
2406 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2407 m4 = tcg_const_i32(get_field(s->fields, m4));
2408 } else {
2409 m4 = tcg_const_i32(0);
2411 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2412 tcg_temp_free_i32(m4);
2413 return NO_EXIT;
2416 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2418 check_privileged(s);
2419 gen_helper_iske(o->out, cpu_env, o->in2);
2420 return NO_EXIT;
2422 #endif
2424 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2426 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2427 set_cc_static(s);
2428 return NO_EXIT;
2431 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2433 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2434 set_cc_static(s);
2435 return NO_EXIT;
2438 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2440 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2441 set_cc_static(s);
2442 return NO_EXIT;
2445 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2447 /* The real output is indeed the original value in memory;
2448 recompute the addition for the computation of CC. */
2449 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2450 s->insn->data | MO_ALIGN);
2451 /* However, we need to recompute the addition for setting CC. */
2452 tcg_gen_add_i64(o->out, o->in1, o->in2);
2453 return NO_EXIT;
2456 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2458 /* The real output is indeed the original value in memory;
2459 recompute the addition for the computation of CC. */
2460 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2461 s->insn->data | MO_ALIGN);
2462 /* However, we need to recompute the operation for setting CC. */
2463 tcg_gen_and_i64(o->out, o->in1, o->in2);
2464 return NO_EXIT;
2467 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2469 /* The real output is indeed the original value in memory;
2470 recompute the addition for the computation of CC. */
2471 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2472 s->insn->data | MO_ALIGN);
2473 /* However, we need to recompute the operation for setting CC. */
2474 tcg_gen_or_i64(o->out, o->in1, o->in2);
2475 return NO_EXIT;
2478 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2480 /* The real output is indeed the original value in memory;
2481 recompute the addition for the computation of CC. */
2482 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2483 s->insn->data | MO_ALIGN);
2484 /* However, we need to recompute the operation for setting CC. */
2485 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2486 return NO_EXIT;
2489 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2491 gen_helper_ldeb(o->out, cpu_env, o->in2);
2492 return NO_EXIT;
2495 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2497 gen_helper_ledb(o->out, cpu_env, o->in2);
2498 return NO_EXIT;
2501 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2503 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2504 return NO_EXIT;
2507 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2509 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2510 return NO_EXIT;
2513 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2515 gen_helper_lxdb(o->out, cpu_env, o->in2);
2516 return_low128(o->out2);
2517 return NO_EXIT;
2520 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2522 gen_helper_lxeb(o->out, cpu_env, o->in2);
2523 return_low128(o->out2);
2524 return NO_EXIT;
2527 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2529 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2530 return NO_EXIT;
2533 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2535 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2536 return NO_EXIT;
2539 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2541 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2542 return NO_EXIT;
2545 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2547 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2548 return NO_EXIT;
2551 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2553 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2554 return NO_EXIT;
2557 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2559 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2560 return NO_EXIT;
2563 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2565 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2566 return NO_EXIT;
2569 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2571 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2572 return NO_EXIT;
2575 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2577 TCGLabel *lab = gen_new_label();
2578 store_reg32_i64(get_field(s->fields, r1), o->in2);
2579 /* The value is stored even in case of trap. */
2580 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2581 gen_trap(s);
2582 gen_set_label(lab);
2583 return NO_EXIT;
2586 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2588 TCGLabel *lab = gen_new_label();
2589 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2590 /* The value is stored even in case of trap. */
2591 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2592 gen_trap(s);
2593 gen_set_label(lab);
2594 return NO_EXIT;
2597 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2599 TCGLabel *lab = gen_new_label();
2600 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2601 /* The value is stored even in case of trap. */
2602 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2603 gen_trap(s);
2604 gen_set_label(lab);
2605 return NO_EXIT;
2608 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2610 TCGLabel *lab = gen_new_label();
2611 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2612 /* The value is stored even in case of trap. */
2613 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2614 gen_trap(s);
2615 gen_set_label(lab);
2616 return NO_EXIT;
2619 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2621 TCGLabel *lab = gen_new_label();
2622 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2623 /* The value is stored even in case of trap. */
2624 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2625 gen_trap(s);
2626 gen_set_label(lab);
2627 return NO_EXIT;
2630 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2632 DisasCompare c;
2634 disas_jcc(s, &c, get_field(s->fields, m3));
2636 if (c.is_64) {
2637 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2638 o->in2, o->in1);
2639 free_compare(&c);
2640 } else {
2641 TCGv_i32 t32 = tcg_temp_new_i32();
2642 TCGv_i64 t, z;
2644 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2645 free_compare(&c);
2647 t = tcg_temp_new_i64();
2648 tcg_gen_extu_i32_i64(t, t32);
2649 tcg_temp_free_i32(t32);
2651 z = tcg_const_i64(0);
2652 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2653 tcg_temp_free_i64(t);
2654 tcg_temp_free_i64(z);
2657 return NO_EXIT;
2660 #ifndef CONFIG_USER_ONLY
2661 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2663 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2664 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2665 check_privileged(s);
2666 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2667 tcg_temp_free_i32(r1);
2668 tcg_temp_free_i32(r3);
2669 return NO_EXIT;
2672 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2674 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2675 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2676 check_privileged(s);
2677 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2678 tcg_temp_free_i32(r1);
2679 tcg_temp_free_i32(r3);
2680 return NO_EXIT;
2683 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2685 check_privileged(s);
2686 gen_helper_lra(o->out, cpu_env, o->in2);
2687 set_cc_static(s);
2688 return NO_EXIT;
2691 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2693 check_privileged(s);
2695 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2696 return NO_EXIT;
2699 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2701 TCGv_i64 t1, t2;
2703 check_privileged(s);
2704 per_breaking_event(s);
2706 t1 = tcg_temp_new_i64();
2707 t2 = tcg_temp_new_i64();
2708 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2709 tcg_gen_addi_i64(o->in2, o->in2, 4);
2710 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2711 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2712 tcg_gen_shli_i64(t1, t1, 32);
2713 gen_helper_load_psw(cpu_env, t1, t2);
2714 tcg_temp_free_i64(t1);
2715 tcg_temp_free_i64(t2);
2716 return EXIT_NORETURN;
2719 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2721 TCGv_i64 t1, t2;
2723 check_privileged(s);
2724 per_breaking_event(s);
2726 t1 = tcg_temp_new_i64();
2727 t2 = tcg_temp_new_i64();
2728 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2729 tcg_gen_addi_i64(o->in2, o->in2, 8);
2730 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2731 gen_helper_load_psw(cpu_env, t1, t2);
2732 tcg_temp_free_i64(t1);
2733 tcg_temp_free_i64(t2);
2734 return EXIT_NORETURN;
2736 #endif
2738 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2740 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2741 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2742 gen_helper_lam(cpu_env, r1, o->in2, r3);
2743 tcg_temp_free_i32(r1);
2744 tcg_temp_free_i32(r3);
2745 return NO_EXIT;
2748 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2750 int r1 = get_field(s->fields, r1);
2751 int r3 = get_field(s->fields, r3);
2752 TCGv_i64 t1, t2;
2754 /* Only one register to read. */
2755 t1 = tcg_temp_new_i64();
2756 if (unlikely(r1 == r3)) {
2757 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2758 store_reg32_i64(r1, t1);
2759 tcg_temp_free(t1);
2760 return NO_EXIT;
2763 /* First load the values of the first and last registers to trigger
2764 possible page faults. */
2765 t2 = tcg_temp_new_i64();
2766 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2767 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2768 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2769 store_reg32_i64(r1, t1);
2770 store_reg32_i64(r3, t2);
2772 /* Only two registers to read. */
2773 if (((r1 + 1) & 15) == r3) {
2774 tcg_temp_free(t2);
2775 tcg_temp_free(t1);
2776 return NO_EXIT;
2779 /* Then load the remaining registers. Page fault can't occur. */
2780 r3 = (r3 - 1) & 15;
2781 tcg_gen_movi_i64(t2, 4);
2782 while (r1 != r3) {
2783 r1 = (r1 + 1) & 15;
2784 tcg_gen_add_i64(o->in2, o->in2, t2);
2785 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2786 store_reg32_i64(r1, t1);
2788 tcg_temp_free(t2);
2789 tcg_temp_free(t1);
2791 return NO_EXIT;
2794 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2796 int r1 = get_field(s->fields, r1);
2797 int r3 = get_field(s->fields, r3);
2798 TCGv_i64 t1, t2;
2800 /* Only one register to read. */
2801 t1 = tcg_temp_new_i64();
2802 if (unlikely(r1 == r3)) {
2803 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2804 store_reg32h_i64(r1, t1);
2805 tcg_temp_free(t1);
2806 return NO_EXIT;
2809 /* First load the values of the first and last registers to trigger
2810 possible page faults. */
2811 t2 = tcg_temp_new_i64();
2812 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2813 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2814 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2815 store_reg32h_i64(r1, t1);
2816 store_reg32h_i64(r3, t2);
2818 /* Only two registers to read. */
2819 if (((r1 + 1) & 15) == r3) {
2820 tcg_temp_free(t2);
2821 tcg_temp_free(t1);
2822 return NO_EXIT;
2825 /* Then load the remaining registers. Page fault can't occur. */
2826 r3 = (r3 - 1) & 15;
2827 tcg_gen_movi_i64(t2, 4);
2828 while (r1 != r3) {
2829 r1 = (r1 + 1) & 15;
2830 tcg_gen_add_i64(o->in2, o->in2, t2);
2831 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2832 store_reg32h_i64(r1, t1);
2834 tcg_temp_free(t2);
2835 tcg_temp_free(t1);
2837 return NO_EXIT;
2840 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2842 int r1 = get_field(s->fields, r1);
2843 int r3 = get_field(s->fields, r3);
2844 TCGv_i64 t1, t2;
2846 /* Only one register to read. */
2847 if (unlikely(r1 == r3)) {
2848 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2849 return NO_EXIT;
2852 /* First load the values of the first and last registers to trigger
2853 possible page faults. */
2854 t1 = tcg_temp_new_i64();
2855 t2 = tcg_temp_new_i64();
2856 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2857 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2858 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2859 tcg_gen_mov_i64(regs[r1], t1);
2860 tcg_temp_free(t2);
2862 /* Only two registers to read. */
2863 if (((r1 + 1) & 15) == r3) {
2864 tcg_temp_free(t1);
2865 return NO_EXIT;
2868 /* Then load the remaining registers. Page fault can't occur. */
2869 r3 = (r3 - 1) & 15;
2870 tcg_gen_movi_i64(t1, 8);
2871 while (r1 != r3) {
2872 r1 = (r1 + 1) & 15;
2873 tcg_gen_add_i64(o->in2, o->in2, t1);
2874 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2876 tcg_temp_free(t1);
2878 return NO_EXIT;
2881 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2883 TCGv_i64 a1, a2;
2884 TCGMemOp mop = s->insn->data;
2886 /* In a parallel context, stop the world and single step. */
2887 if (parallel_cpus) {
2888 potential_page_fault(s);
2889 gen_exception(EXCP_ATOMIC);
2890 return EXIT_NORETURN;
2893 /* In a serial context, perform the two loads ... */
2894 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2895 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2896 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2897 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2898 tcg_temp_free_i64(a1);
2899 tcg_temp_free_i64(a2);
2901 /* ... and indicate that we performed them while interlocked. */
2902 gen_op_movi_cc(s, 0);
2903 return NO_EXIT;
2906 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2908 gen_helper_lpq(o->out, cpu_env, o->in2);
2909 return_low128(o->out2);
2910 return NO_EXIT;
2913 #ifndef CONFIG_USER_ONLY
2914 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2916 check_privileged(s);
2917 potential_page_fault(s);
2918 gen_helper_lura(o->out, cpu_env, o->in2);
2919 return NO_EXIT;
2922 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2924 check_privileged(s);
2925 potential_page_fault(s);
2926 gen_helper_lurag(o->out, cpu_env, o->in2);
2927 return NO_EXIT;
2929 #endif
2931 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
2933 tcg_gen_andi_i64(o->out, o->in2, -256);
2934 return NO_EXIT;
2937 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2939 o->out = o->in2;
2940 o->g_out = o->g_in2;
2941 TCGV_UNUSED_I64(o->in2);
2942 o->g_in2 = false;
2943 return NO_EXIT;
2946 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2948 int b2 = get_field(s->fields, b2);
2949 TCGv ar1 = tcg_temp_new_i64();
2951 o->out = o->in2;
2952 o->g_out = o->g_in2;
2953 TCGV_UNUSED_I64(o->in2);
2954 o->g_in2 = false;
2956 switch (s->tb->flags & FLAG_MASK_ASC) {
2957 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
2958 tcg_gen_movi_i64(ar1, 0);
2959 break;
2960 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
2961 tcg_gen_movi_i64(ar1, 1);
2962 break;
2963 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
2964 if (b2) {
2965 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2966 } else {
2967 tcg_gen_movi_i64(ar1, 0);
2969 break;
2970 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
2971 tcg_gen_movi_i64(ar1, 2);
2972 break;
2975 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2976 tcg_temp_free_i64(ar1);
2978 return NO_EXIT;
2981 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2983 o->out = o->in1;
2984 o->out2 = o->in2;
2985 o->g_out = o->g_in1;
2986 o->g_out2 = o->g_in2;
2987 TCGV_UNUSED_I64(o->in1);
2988 TCGV_UNUSED_I64(o->in2);
2989 o->g_in1 = o->g_in2 = false;
2990 return NO_EXIT;
2993 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2995 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2996 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2997 tcg_temp_free_i32(l);
2998 return NO_EXIT;
3001 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3003 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3004 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3005 tcg_temp_free_i32(l);
3006 return NO_EXIT;
3009 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3011 int r1 = get_field(s->fields, r1);
3012 int r2 = get_field(s->fields, r2);
3013 TCGv_i32 t1, t2;
3015 /* r1 and r2 must be even. */
3016 if (r1 & 1 || r2 & 1) {
3017 gen_program_exception(s, PGM_SPECIFICATION);
3018 return EXIT_NORETURN;
3021 t1 = tcg_const_i32(r1);
3022 t2 = tcg_const_i32(r2);
3023 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3024 tcg_temp_free_i32(t1);
3025 tcg_temp_free_i32(t2);
3026 set_cc_static(s);
3027 return NO_EXIT;
3030 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3032 int r1 = get_field(s->fields, r1);
3033 int r3 = get_field(s->fields, r3);
3034 TCGv_i32 t1, t3;
3036 /* r1 and r3 must be even. */
3037 if (r1 & 1 || r3 & 1) {
3038 gen_program_exception(s, PGM_SPECIFICATION);
3039 return EXIT_NORETURN;
3042 t1 = tcg_const_i32(r1);
3043 t3 = tcg_const_i32(r3);
3044 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3045 tcg_temp_free_i32(t1);
3046 tcg_temp_free_i32(t3);
3047 set_cc_static(s);
3048 return NO_EXIT;
3051 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3053 int r1 = get_field(s->fields, r1);
3054 int r3 = get_field(s->fields, r3);
3055 TCGv_i32 t1, t3;
3057 /* r1 and r3 must be even. */
3058 if (r1 & 1 || r3 & 1) {
3059 gen_program_exception(s, PGM_SPECIFICATION);
3060 return EXIT_NORETURN;
3063 t1 = tcg_const_i32(r1);
3064 t3 = tcg_const_i32(r3);
3065 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3066 tcg_temp_free_i32(t1);
3067 tcg_temp_free_i32(t3);
3068 set_cc_static(s);
3069 return NO_EXIT;
3072 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3074 int r3 = get_field(s->fields, r3);
3075 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3076 set_cc_static(s);
3077 return NO_EXIT;
3080 #ifndef CONFIG_USER_ONLY
3081 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3083 int r1 = get_field(s->fields, l1);
3084 check_privileged(s);
3085 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3086 set_cc_static(s);
3087 return NO_EXIT;
3090 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3092 int r1 = get_field(s->fields, l1);
3093 check_privileged(s);
3094 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3095 set_cc_static(s);
3096 return NO_EXIT;
3098 #endif
3100 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3102 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3103 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3104 tcg_temp_free_i32(l);
3105 return NO_EXIT;
3108 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3110 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3111 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3112 tcg_temp_free_i32(l);
3113 return NO_EXIT;
3116 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3118 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3119 set_cc_static(s);
3120 return NO_EXIT;
3123 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3125 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3126 set_cc_static(s);
3127 return_low128(o->in2);
3128 return NO_EXIT;
3131 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3133 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3134 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3135 tcg_temp_free_i32(l);
3136 return NO_EXIT;
3139 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3141 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3142 return NO_EXIT;
3145 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3147 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3148 return NO_EXIT;
3151 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3153 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3154 return NO_EXIT;
3157 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3159 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3160 return NO_EXIT;
3163 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3165 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3166 return NO_EXIT;
3169 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3171 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3172 return_low128(o->out2);
3173 return NO_EXIT;
3176 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3178 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3179 return_low128(o->out2);
3180 return NO_EXIT;
3183 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3185 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3186 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3187 tcg_temp_free_i64(r3);
3188 return NO_EXIT;
3191 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3193 int r3 = get_field(s->fields, r3);
3194 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3195 return NO_EXIT;
3198 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3200 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3201 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3202 tcg_temp_free_i64(r3);
3203 return NO_EXIT;
3206 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3208 int r3 = get_field(s->fields, r3);
3209 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3210 return NO_EXIT;
3213 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3215 TCGv_i64 z, n;
3216 z = tcg_const_i64(0);
3217 n = tcg_temp_new_i64();
3218 tcg_gen_neg_i64(n, o->in2);
3219 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3220 tcg_temp_free_i64(n);
3221 tcg_temp_free_i64(z);
3222 return NO_EXIT;
3225 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3227 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3228 return NO_EXIT;
3231 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3233 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3234 return NO_EXIT;
3237 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3239 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3240 tcg_gen_mov_i64(o->out2, o->in2);
3241 return NO_EXIT;
3244 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3246 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3247 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3248 tcg_temp_free_i32(l);
3249 set_cc_static(s);
3250 return NO_EXIT;
3253 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3255 tcg_gen_neg_i64(o->out, o->in2);
3256 return NO_EXIT;
3259 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3261 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3262 return NO_EXIT;
3265 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3267 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3268 return NO_EXIT;
3271 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3273 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3274 tcg_gen_mov_i64(o->out2, o->in2);
3275 return NO_EXIT;
3278 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3280 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3281 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3282 tcg_temp_free_i32(l);
3283 set_cc_static(s);
3284 return NO_EXIT;
3287 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3289 tcg_gen_or_i64(o->out, o->in1, o->in2);
3290 return NO_EXIT;
3293 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3295 int shift = s->insn->data & 0xff;
3296 int size = s->insn->data >> 8;
3297 uint64_t mask = ((1ull << size) - 1) << shift;
3299 assert(!o->g_in2);
3300 tcg_gen_shli_i64(o->in2, o->in2, shift);
3301 tcg_gen_or_i64(o->out, o->in1, o->in2);
3303 /* Produce the CC from only the bits manipulated. */
3304 tcg_gen_andi_i64(cc_dst, o->out, mask);
3305 set_cc_nz_u64(s, cc_dst);
3306 return NO_EXIT;
3309 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3311 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3312 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3313 tcg_temp_free_i32(l);
3314 return NO_EXIT;
3317 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3319 int l2 = get_field(s->fields, l2) + 1;
3320 TCGv_i32 l;
3322 /* The length must not exceed 32 bytes. */
3323 if (l2 > 32) {
3324 gen_program_exception(s, PGM_SPECIFICATION);
3325 return EXIT_NORETURN;
3327 l = tcg_const_i32(l2);
3328 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3329 tcg_temp_free_i32(l);
3330 return NO_EXIT;
3333 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3335 int l2 = get_field(s->fields, l2) + 1;
3336 TCGv_i32 l;
3338 /* The length must be even and should not exceed 64 bytes. */
3339 if ((l2 & 1) || (l2 > 64)) {
3340 gen_program_exception(s, PGM_SPECIFICATION);
3341 return EXIT_NORETURN;
3343 l = tcg_const_i32(l2);
3344 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3345 tcg_temp_free_i32(l);
3346 return NO_EXIT;
3349 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3351 gen_helper_popcnt(o->out, o->in2);
3352 return NO_EXIT;
3355 #ifndef CONFIG_USER_ONLY
3356 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3358 check_privileged(s);
3359 gen_helper_ptlb(cpu_env);
3360 return NO_EXIT;
3362 #endif
3364 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3366 int i3 = get_field(s->fields, i3);
3367 int i4 = get_field(s->fields, i4);
3368 int i5 = get_field(s->fields, i5);
3369 int do_zero = i4 & 0x80;
3370 uint64_t mask, imask, pmask;
3371 int pos, len, rot;
3373 /* Adjust the arguments for the specific insn. */
3374 switch (s->fields->op2) {
3375 case 0x55: /* risbg */
3376 i3 &= 63;
3377 i4 &= 63;
3378 pmask = ~0;
3379 break;
3380 case 0x5d: /* risbhg */
3381 i3 &= 31;
3382 i4 &= 31;
3383 pmask = 0xffffffff00000000ull;
3384 break;
3385 case 0x51: /* risblg */
3386 i3 &= 31;
3387 i4 &= 31;
3388 pmask = 0x00000000ffffffffull;
3389 break;
3390 default:
3391 abort();
3394 /* MASK is the set of bits to be inserted from R2.
3395 Take care for I3/I4 wraparound. */
3396 mask = pmask >> i3;
3397 if (i3 <= i4) {
3398 mask ^= pmask >> i4 >> 1;
3399 } else {
3400 mask |= ~(pmask >> i4 >> 1);
3402 mask &= pmask;
3404 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3405 insns, we need to keep the other half of the register. */
3406 imask = ~mask | ~pmask;
3407 if (do_zero) {
3408 if (s->fields->op2 == 0x55) {
3409 imask = 0;
3410 } else {
3411 imask = ~pmask;
3415 len = i4 - i3 + 1;
3416 pos = 63 - i4;
3417 rot = i5 & 63;
3418 if (s->fields->op2 == 0x5d) {
3419 pos += 32;
3422 /* In some cases we can implement this with extract. */
3423 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3424 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3425 return NO_EXIT;
3428 /* In some cases we can implement this with deposit. */
3429 if (len > 0 && (imask == 0 || ~mask == imask)) {
3430 /* Note that we rotate the bits to be inserted to the lsb, not to
3431 the position as described in the PoO. */
3432 rot = (rot - pos) & 63;
3433 } else {
3434 pos = -1;
3437 /* Rotate the input as necessary. */
3438 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3440 /* Insert the selected bits into the output. */
3441 if (pos >= 0) {
3442 if (imask == 0) {
3443 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3444 } else {
3445 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3447 } else if (imask == 0) {
3448 tcg_gen_andi_i64(o->out, o->in2, mask);
3449 } else {
3450 tcg_gen_andi_i64(o->in2, o->in2, mask);
3451 tcg_gen_andi_i64(o->out, o->out, imask);
3452 tcg_gen_or_i64(o->out, o->out, o->in2);
3454 return NO_EXIT;
3457 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3459 int i3 = get_field(s->fields, i3);
3460 int i4 = get_field(s->fields, i4);
3461 int i5 = get_field(s->fields, i5);
3462 uint64_t mask;
3464 /* If this is a test-only form, arrange to discard the result. */
3465 if (i3 & 0x80) {
3466 o->out = tcg_temp_new_i64();
3467 o->g_out = false;
3470 i3 &= 63;
3471 i4 &= 63;
3472 i5 &= 63;
3474 /* MASK is the set of bits to be operated on from R2.
3475 Take care for I3/I4 wraparound. */
3476 mask = ~0ull >> i3;
3477 if (i3 <= i4) {
3478 mask ^= ~0ull >> i4 >> 1;
3479 } else {
3480 mask |= ~(~0ull >> i4 >> 1);
3483 /* Rotate the input as necessary. */
3484 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3486 /* Operate. */
3487 switch (s->fields->op2) {
3488 case 0x55: /* AND */
3489 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3490 tcg_gen_and_i64(o->out, o->out, o->in2);
3491 break;
3492 case 0x56: /* OR */
3493 tcg_gen_andi_i64(o->in2, o->in2, mask);
3494 tcg_gen_or_i64(o->out, o->out, o->in2);
3495 break;
3496 case 0x57: /* XOR */
3497 tcg_gen_andi_i64(o->in2, o->in2, mask);
3498 tcg_gen_xor_i64(o->out, o->out, o->in2);
3499 break;
3500 default:
3501 abort();
3504 /* Set the CC. */
3505 tcg_gen_andi_i64(cc_dst, o->out, mask);
3506 set_cc_nz_u64(s, cc_dst);
3507 return NO_EXIT;
3510 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3512 tcg_gen_bswap16_i64(o->out, o->in2);
3513 return NO_EXIT;
3516 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3518 tcg_gen_bswap32_i64(o->out, o->in2);
3519 return NO_EXIT;
3522 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3524 tcg_gen_bswap64_i64(o->out, o->in2);
3525 return NO_EXIT;
3528 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3530 TCGv_i32 t1 = tcg_temp_new_i32();
3531 TCGv_i32 t2 = tcg_temp_new_i32();
3532 TCGv_i32 to = tcg_temp_new_i32();
3533 tcg_gen_extrl_i64_i32(t1, o->in1);
3534 tcg_gen_extrl_i64_i32(t2, o->in2);
3535 tcg_gen_rotl_i32(to, t1, t2);
3536 tcg_gen_extu_i32_i64(o->out, to);
3537 tcg_temp_free_i32(t1);
3538 tcg_temp_free_i32(t2);
3539 tcg_temp_free_i32(to);
3540 return NO_EXIT;
3543 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3545 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3546 return NO_EXIT;
3549 #ifndef CONFIG_USER_ONLY
3550 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3552 check_privileged(s);
3553 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3554 set_cc_static(s);
3555 return NO_EXIT;
3558 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3560 check_privileged(s);
3561 gen_helper_sacf(cpu_env, o->in2);
3562 /* Addressing mode has changed, so end the block. */
3563 return EXIT_PC_STALE;
3565 #endif
3567 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3569 int sam = s->insn->data;
3570 TCGv_i64 tsam;
3571 uint64_t mask;
3573 switch (sam) {
3574 case 0:
3575 mask = 0xffffff;
3576 break;
3577 case 1:
3578 mask = 0x7fffffff;
3579 break;
3580 default:
3581 mask = -1;
3582 break;
3585 /* Bizarre but true, we check the address of the current insn for the
3586 specification exception, not the next to be executed. Thus the PoO
3587 documents that Bad Things Happen two bytes before the end. */
3588 if (s->pc & ~mask) {
3589 gen_program_exception(s, PGM_SPECIFICATION);
3590 return EXIT_NORETURN;
3592 s->next_pc &= mask;
3594 tsam = tcg_const_i64(sam);
3595 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3596 tcg_temp_free_i64(tsam);
3598 /* Always exit the TB, since we (may have) changed execution mode. */
3599 return EXIT_PC_STALE;
3602 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3604 int r1 = get_field(s->fields, r1);
3605 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3606 return NO_EXIT;
3609 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3611 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3612 return NO_EXIT;
3615 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3617 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3618 return NO_EXIT;
3621 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3623 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3624 return_low128(o->out2);
3625 return NO_EXIT;
3628 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3630 gen_helper_sqeb(o->out, cpu_env, o->in2);
3631 return NO_EXIT;
3634 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3636 gen_helper_sqdb(o->out, cpu_env, o->in2);
3637 return NO_EXIT;
3640 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3642 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3643 return_low128(o->out2);
3644 return NO_EXIT;
3647 #ifndef CONFIG_USER_ONLY
3648 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3650 check_privileged(s);
3651 potential_page_fault(s);
3652 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3653 set_cc_static(s);
3654 return NO_EXIT;
3657 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3659 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3660 check_privileged(s);
3661 potential_page_fault(s);
3662 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3663 set_cc_static(s);
3664 tcg_temp_free_i32(r1);
3665 return NO_EXIT;
3667 #endif
3669 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3671 DisasCompare c;
3672 TCGv_i64 a, h;
3673 TCGLabel *lab;
3674 int r1;
3676 disas_jcc(s, &c, get_field(s->fields, m3));
3678 /* We want to store when the condition is fulfilled, so branch
3679 out when it's not */
3680 c.cond = tcg_invert_cond(c.cond);
3682 lab = gen_new_label();
3683 if (c.is_64) {
3684 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3685 } else {
3686 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3688 free_compare(&c);
3690 r1 = get_field(s->fields, r1);
3691 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3692 switch (s->insn->data) {
3693 case 1: /* STOCG */
3694 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3695 break;
3696 case 0: /* STOC */
3697 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3698 break;
3699 case 2: /* STOCFH */
3700 h = tcg_temp_new_i64();
3701 tcg_gen_shri_i64(h, regs[r1], 32);
3702 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3703 tcg_temp_free_i64(h);
3704 break;
3705 default:
3706 g_assert_not_reached();
3708 tcg_temp_free_i64(a);
3710 gen_set_label(lab);
3711 return NO_EXIT;
3714 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3716 uint64_t sign = 1ull << s->insn->data;
3717 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3718 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3719 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3720 /* The arithmetic left shift is curious in that it does not affect
3721 the sign bit. Copy that over from the source unchanged. */
3722 tcg_gen_andi_i64(o->out, o->out, ~sign);
3723 tcg_gen_andi_i64(o->in1, o->in1, sign);
3724 tcg_gen_or_i64(o->out, o->out, o->in1);
3725 return NO_EXIT;
3728 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3730 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3731 return NO_EXIT;
3734 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3736 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3737 return NO_EXIT;
3740 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3742 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3743 return NO_EXIT;
3746 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3748 gen_helper_sfpc(cpu_env, o->in2);
3749 return NO_EXIT;
3752 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3754 gen_helper_sfas(cpu_env, o->in2);
3755 return NO_EXIT;
3758 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3760 int b2 = get_field(s->fields, b2);
3761 int d2 = get_field(s->fields, d2);
3762 TCGv_i64 t1 = tcg_temp_new_i64();
3763 TCGv_i64 t2 = tcg_temp_new_i64();
3764 int mask, pos, len;
3766 switch (s->fields->op2) {
3767 case 0x99: /* SRNM */
3768 pos = 0, len = 2;
3769 break;
3770 case 0xb8: /* SRNMB */
3771 pos = 0, len = 3;
3772 break;
3773 case 0xb9: /* SRNMT */
3774 pos = 4, len = 3;
3775 break;
3776 default:
3777 tcg_abort();
3779 mask = (1 << len) - 1;
3781 /* Insert the value into the appropriate field of the FPC. */
3782 if (b2 == 0) {
3783 tcg_gen_movi_i64(t1, d2 & mask);
3784 } else {
3785 tcg_gen_addi_i64(t1, regs[b2], d2);
3786 tcg_gen_andi_i64(t1, t1, mask);
3788 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3789 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3790 tcg_temp_free_i64(t1);
3792 /* Then install the new FPC to set the rounding mode in fpu_status. */
3793 gen_helper_sfpc(cpu_env, t2);
3794 tcg_temp_free_i64(t2);
3795 return NO_EXIT;
3798 #ifndef CONFIG_USER_ONLY
3799 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3801 check_privileged(s);
3802 tcg_gen_shri_i64(o->in2, o->in2, 4);
3803 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3804 return NO_EXIT;
3807 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3809 check_privileged(s);
3810 gen_helper_sske(cpu_env, o->in1, o->in2);
3811 return NO_EXIT;
3814 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3816 check_privileged(s);
3817 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3818 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3819 return EXIT_PC_STALE_NOCHAIN;
3822 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3824 check_privileged(s);
3825 /* ??? Surely cpu address != cpu number. In any case the previous
3826 version of this stored more than the required half-word, so it
3827 is unlikely this has ever been tested. */
3828 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3829 return NO_EXIT;
3832 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3834 gen_helper_stck(o->out, cpu_env);
3835 /* ??? We don't implement clock states. */
3836 gen_op_movi_cc(s, 0);
3837 return NO_EXIT;
3840 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3842 TCGv_i64 c1 = tcg_temp_new_i64();
3843 TCGv_i64 c2 = tcg_temp_new_i64();
3844 gen_helper_stck(c1, cpu_env);
3845 /* Shift the 64-bit value into its place as a zero-extended
3846 104-bit value. Note that "bit positions 64-103 are always
3847 non-zero so that they compare differently to STCK"; we set
3848 the least significant bit to 1. */
3849 tcg_gen_shli_i64(c2, c1, 56);
3850 tcg_gen_shri_i64(c1, c1, 8);
3851 tcg_gen_ori_i64(c2, c2, 0x10000);
3852 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3853 tcg_gen_addi_i64(o->in2, o->in2, 8);
3854 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3855 tcg_temp_free_i64(c1);
3856 tcg_temp_free_i64(c2);
3857 /* ??? We don't implement clock states. */
3858 gen_op_movi_cc(s, 0);
3859 return NO_EXIT;
3862 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3864 check_privileged(s);
3865 gen_helper_sckc(cpu_env, o->in2);
3866 return NO_EXIT;
3869 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3871 check_privileged(s);
3872 gen_helper_stckc(o->out, cpu_env);
3873 return NO_EXIT;
3876 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3878 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3879 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3880 check_privileged(s);
3881 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3882 tcg_temp_free_i32(r1);
3883 tcg_temp_free_i32(r3);
3884 return NO_EXIT;
3887 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3889 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3890 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3891 check_privileged(s);
3892 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3893 tcg_temp_free_i32(r1);
3894 tcg_temp_free_i32(r3);
3895 return NO_EXIT;
3898 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3900 check_privileged(s);
3901 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3902 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3903 return NO_EXIT;
3906 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3908 check_privileged(s);
3909 gen_helper_spt(cpu_env, o->in2);
3910 return NO_EXIT;
3913 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3915 check_privileged(s);
3916 gen_helper_stfl(cpu_env);
3917 return NO_EXIT;
3920 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3922 check_privileged(s);
3923 gen_helper_stpt(o->out, cpu_env);
3924 return NO_EXIT;
3927 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3929 check_privileged(s);
3930 potential_page_fault(s);
3931 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3932 set_cc_static(s);
3933 return NO_EXIT;
3936 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3938 check_privileged(s);
3939 gen_helper_spx(cpu_env, o->in2);
3940 return NO_EXIT;
3943 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3945 check_privileged(s);
3946 potential_page_fault(s);
3947 gen_helper_xsch(cpu_env, regs[1]);
3948 set_cc_static(s);
3949 return NO_EXIT;
3952 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3954 check_privileged(s);
3955 potential_page_fault(s);
3956 gen_helper_csch(cpu_env, regs[1]);
3957 set_cc_static(s);
3958 return NO_EXIT;
3961 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3963 check_privileged(s);
3964 potential_page_fault(s);
3965 gen_helper_hsch(cpu_env, regs[1]);
3966 set_cc_static(s);
3967 return NO_EXIT;
3970 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3972 check_privileged(s);
3973 potential_page_fault(s);
3974 gen_helper_msch(cpu_env, regs[1], o->in2);
3975 set_cc_static(s);
3976 return NO_EXIT;
3979 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3981 check_privileged(s);
3982 potential_page_fault(s);
3983 gen_helper_rchp(cpu_env, regs[1]);
3984 set_cc_static(s);
3985 return NO_EXIT;
3988 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3990 check_privileged(s);
3991 potential_page_fault(s);
3992 gen_helper_rsch(cpu_env, regs[1]);
3993 set_cc_static(s);
3994 return NO_EXIT;
3997 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3999 check_privileged(s);
4000 potential_page_fault(s);
4001 gen_helper_ssch(cpu_env, regs[1], o->in2);
4002 set_cc_static(s);
4003 return NO_EXIT;
4006 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4008 check_privileged(s);
4009 potential_page_fault(s);
4010 gen_helper_stsch(cpu_env, regs[1], o->in2);
4011 set_cc_static(s);
4012 return NO_EXIT;
4015 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4017 check_privileged(s);
4018 potential_page_fault(s);
4019 gen_helper_tsch(cpu_env, regs[1], o->in2);
4020 set_cc_static(s);
4021 return NO_EXIT;
4024 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4026 check_privileged(s);
4027 potential_page_fault(s);
4028 gen_helper_chsc(cpu_env, o->in2);
4029 set_cc_static(s);
4030 return NO_EXIT;
4033 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4035 check_privileged(s);
4036 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4037 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4038 return NO_EXIT;
4041 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4043 uint64_t i2 = get_field(s->fields, i2);
4044 TCGv_i64 t;
4046 check_privileged(s);
4048 /* It is important to do what the instruction name says: STORE THEN.
4049 If we let the output hook perform the store then if we fault and
4050 restart, we'll have the wrong SYSTEM MASK in place. */
4051 t = tcg_temp_new_i64();
4052 tcg_gen_shri_i64(t, psw_mask, 56);
4053 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4054 tcg_temp_free_i64(t);
4056 if (s->fields->op == 0xac) {
4057 tcg_gen_andi_i64(psw_mask, psw_mask,
4058 (i2 << 56) | 0x00ffffffffffffffull);
4059 } else {
4060 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4063 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4064 return EXIT_PC_STALE_NOCHAIN;
4067 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4069 check_privileged(s);
4070 potential_page_fault(s);
4071 gen_helper_stura(cpu_env, o->in2, o->in1);
4072 return NO_EXIT;
4075 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4077 check_privileged(s);
4078 potential_page_fault(s);
4079 gen_helper_sturg(cpu_env, o->in2, o->in1);
4080 return NO_EXIT;
4082 #endif
4084 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4086 potential_page_fault(s);
4087 gen_helper_stfle(cc_op, cpu_env, o->in2);
4088 set_cc_static(s);
4089 return NO_EXIT;
4092 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4094 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4095 return NO_EXIT;
4098 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4100 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4101 return NO_EXIT;
4104 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4106 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4107 return NO_EXIT;
4110 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4112 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4113 return NO_EXIT;
4116 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4118 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4119 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4120 gen_helper_stam(cpu_env, r1, o->in2, r3);
4121 tcg_temp_free_i32(r1);
4122 tcg_temp_free_i32(r3);
4123 return NO_EXIT;
4126 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4128 int m3 = get_field(s->fields, m3);
4129 int pos, base = s->insn->data;
4130 TCGv_i64 tmp = tcg_temp_new_i64();
4132 pos = base + ctz32(m3) * 8;
4133 switch (m3) {
4134 case 0xf:
4135 /* Effectively a 32-bit store. */
4136 tcg_gen_shri_i64(tmp, o->in1, pos);
4137 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4138 break;
4140 case 0xc:
4141 case 0x6:
4142 case 0x3:
4143 /* Effectively a 16-bit store. */
4144 tcg_gen_shri_i64(tmp, o->in1, pos);
4145 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4146 break;
4148 case 0x8:
4149 case 0x4:
4150 case 0x2:
4151 case 0x1:
4152 /* Effectively an 8-bit store. */
4153 tcg_gen_shri_i64(tmp, o->in1, pos);
4154 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4155 break;
4157 default:
4158 /* This is going to be a sequence of shifts and stores. */
4159 pos = base + 32 - 8;
4160 while (m3) {
4161 if (m3 & 0x8) {
4162 tcg_gen_shri_i64(tmp, o->in1, pos);
4163 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4164 tcg_gen_addi_i64(o->in2, o->in2, 1);
4166 m3 = (m3 << 1) & 0xf;
4167 pos -= 8;
4169 break;
4171 tcg_temp_free_i64(tmp);
4172 return NO_EXIT;
4175 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4177 int r1 = get_field(s->fields, r1);
4178 int r3 = get_field(s->fields, r3);
4179 int size = s->insn->data;
4180 TCGv_i64 tsize = tcg_const_i64(size);
4182 while (1) {
4183 if (size == 8) {
4184 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4185 } else {
4186 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4188 if (r1 == r3) {
4189 break;
4191 tcg_gen_add_i64(o->in2, o->in2, tsize);
4192 r1 = (r1 + 1) & 15;
4195 tcg_temp_free_i64(tsize);
4196 return NO_EXIT;
4199 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4201 int r1 = get_field(s->fields, r1);
4202 int r3 = get_field(s->fields, r3);
4203 TCGv_i64 t = tcg_temp_new_i64();
4204 TCGv_i64 t4 = tcg_const_i64(4);
4205 TCGv_i64 t32 = tcg_const_i64(32);
4207 while (1) {
4208 tcg_gen_shl_i64(t, regs[r1], t32);
4209 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4210 if (r1 == r3) {
4211 break;
4213 tcg_gen_add_i64(o->in2, o->in2, t4);
4214 r1 = (r1 + 1) & 15;
4217 tcg_temp_free_i64(t);
4218 tcg_temp_free_i64(t4);
4219 tcg_temp_free_i64(t32);
4220 return NO_EXIT;
4223 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4225 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4226 return NO_EXIT;
4229 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4231 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4232 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4234 gen_helper_srst(cpu_env, r1, r2);
4236 tcg_temp_free_i32(r1);
4237 tcg_temp_free_i32(r2);
4238 set_cc_static(s);
4239 return NO_EXIT;
4242 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4244 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4245 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4247 gen_helper_srstu(cpu_env, r1, r2);
4249 tcg_temp_free_i32(r1);
4250 tcg_temp_free_i32(r2);
4251 set_cc_static(s);
4252 return NO_EXIT;
4255 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4257 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4258 return NO_EXIT;
4261 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4263 DisasCompare cmp;
4264 TCGv_i64 borrow;
4266 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4268 /* The !borrow flag is the msb of CC. Since we want the inverse of
4269 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4270 disas_jcc(s, &cmp, 8 | 4);
4271 borrow = tcg_temp_new_i64();
4272 if (cmp.is_64) {
4273 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4274 } else {
4275 TCGv_i32 t = tcg_temp_new_i32();
4276 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4277 tcg_gen_extu_i32_i64(borrow, t);
4278 tcg_temp_free_i32(t);
4280 free_compare(&cmp);
4282 tcg_gen_sub_i64(o->out, o->out, borrow);
4283 tcg_temp_free_i64(borrow);
4284 return NO_EXIT;
4287 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4289 TCGv_i32 t;
4291 update_psw_addr(s);
4292 update_cc_op(s);
4294 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4295 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4296 tcg_temp_free_i32(t);
4298 t = tcg_const_i32(s->ilen);
4299 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4300 tcg_temp_free_i32(t);
4302 gen_exception(EXCP_SVC);
4303 return EXIT_NORETURN;
4306 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4308 int cc = 0;
4310 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4311 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4312 gen_op_movi_cc(s, cc);
4313 return NO_EXIT;
4316 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4318 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4319 set_cc_static(s);
4320 return NO_EXIT;
4323 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4325 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4326 set_cc_static(s);
4327 return NO_EXIT;
4330 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4332 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4333 set_cc_static(s);
4334 return NO_EXIT;
4337 #ifndef CONFIG_USER_ONLY
4339 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4341 check_privileged(s);
4342 gen_helper_testblock(cc_op, cpu_env, o->in2);
4343 set_cc_static(s);
4344 return NO_EXIT;
4347 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4349 gen_helper_tprot(cc_op, o->addr1, o->in2);
4350 set_cc_static(s);
4351 return NO_EXIT;
4354 #endif
4356 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4358 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4359 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4360 tcg_temp_free_i32(l1);
4361 set_cc_static(s);
4362 return NO_EXIT;
4365 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4367 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4368 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4369 tcg_temp_free_i32(l);
4370 set_cc_static(s);
4371 return NO_EXIT;
4374 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4376 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4377 return_low128(o->out2);
4378 set_cc_static(s);
4379 return NO_EXIT;
4382 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4384 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4385 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4386 tcg_temp_free_i32(l);
4387 set_cc_static(s);
4388 return NO_EXIT;
4391 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4393 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4394 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4395 tcg_temp_free_i32(l);
4396 set_cc_static(s);
4397 return NO_EXIT;
4400 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4402 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4403 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4404 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4405 TCGv_i32 tst = tcg_temp_new_i32();
4406 int m3 = get_field(s->fields, m3);
4408 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4409 m3 = 0;
4411 if (m3 & 1) {
4412 tcg_gen_movi_i32(tst, -1);
4413 } else {
4414 tcg_gen_extrl_i64_i32(tst, regs[0]);
4415 if (s->insn->opc & 3) {
4416 tcg_gen_ext8u_i32(tst, tst);
4417 } else {
4418 tcg_gen_ext16u_i32(tst, tst);
4421 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4423 tcg_temp_free_i32(r1);
4424 tcg_temp_free_i32(r2);
4425 tcg_temp_free_i32(sizes);
4426 tcg_temp_free_i32(tst);
4427 set_cc_static(s);
4428 return NO_EXIT;
4431 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4433 TCGv_i32 t1 = tcg_const_i32(0xff);
4434 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4435 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4436 tcg_temp_free_i32(t1);
4437 set_cc_static(s);
4438 return NO_EXIT;
4441 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4443 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4444 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4445 tcg_temp_free_i32(l);
4446 return NO_EXIT;
4449 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4451 int l1 = get_field(s->fields, l1) + 1;
4452 TCGv_i32 l;
4454 /* The length must not exceed 32 bytes. */
4455 if (l1 > 32) {
4456 gen_program_exception(s, PGM_SPECIFICATION);
4457 return EXIT_NORETURN;
4459 l = tcg_const_i32(l1);
4460 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4461 tcg_temp_free_i32(l);
4462 set_cc_static(s);
4463 return NO_EXIT;
4466 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4468 int l1 = get_field(s->fields, l1) + 1;
4469 TCGv_i32 l;
4471 /* The length must be even and should not exceed 64 bytes. */
4472 if ((l1 & 1) || (l1 > 64)) {
4473 gen_program_exception(s, PGM_SPECIFICATION);
4474 return EXIT_NORETURN;
4476 l = tcg_const_i32(l1);
4477 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4478 tcg_temp_free_i32(l);
4479 set_cc_static(s);
4480 return NO_EXIT;
4484 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4486 int d1 = get_field(s->fields, d1);
4487 int d2 = get_field(s->fields, d2);
4488 int b1 = get_field(s->fields, b1);
4489 int b2 = get_field(s->fields, b2);
4490 int l = get_field(s->fields, l1);
4491 TCGv_i32 t32;
4493 o->addr1 = get_address(s, 0, b1, d1);
4495 /* If the addresses are identical, this is a store/memset of zero. */
4496 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4497 o->in2 = tcg_const_i64(0);
4499 l++;
4500 while (l >= 8) {
4501 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4502 l -= 8;
4503 if (l > 0) {
4504 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4507 if (l >= 4) {
4508 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4509 l -= 4;
4510 if (l > 0) {
4511 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4514 if (l >= 2) {
4515 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4516 l -= 2;
4517 if (l > 0) {
4518 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4521 if (l) {
4522 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4524 gen_op_movi_cc(s, 0);
4525 return NO_EXIT;
4528 /* But in general we'll defer to a helper. */
4529 o->in2 = get_address(s, 0, b2, d2);
4530 t32 = tcg_const_i32(l);
4531 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4532 tcg_temp_free_i32(t32);
4533 set_cc_static(s);
4534 return NO_EXIT;
4537 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4539 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4540 return NO_EXIT;
4543 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4545 int shift = s->insn->data & 0xff;
4546 int size = s->insn->data >> 8;
4547 uint64_t mask = ((1ull << size) - 1) << shift;
4549 assert(!o->g_in2);
4550 tcg_gen_shli_i64(o->in2, o->in2, shift);
4551 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4553 /* Produce the CC from only the bits manipulated. */
4554 tcg_gen_andi_i64(cc_dst, o->out, mask);
4555 set_cc_nz_u64(s, cc_dst);
4556 return NO_EXIT;
4559 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4561 o->out = tcg_const_i64(0);
4562 return NO_EXIT;
4565 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4567 o->out = tcg_const_i64(0);
4568 o->out2 = o->out;
4569 o->g_out2 = true;
4570 return NO_EXIT;
4573 /* ====================================================================== */
4574 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4575 the original inputs), update the various cc data structures in order to
4576 be able to compute the new condition code. */
4578 static void cout_abs32(DisasContext *s, DisasOps *o)
4580 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4583 static void cout_abs64(DisasContext *s, DisasOps *o)
4585 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4588 static void cout_adds32(DisasContext *s, DisasOps *o)
4590 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4593 static void cout_adds64(DisasContext *s, DisasOps *o)
4595 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4598 static void cout_addu32(DisasContext *s, DisasOps *o)
4600 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4603 static void cout_addu64(DisasContext *s, DisasOps *o)
4605 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4608 static void cout_addc32(DisasContext *s, DisasOps *o)
4610 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4613 static void cout_addc64(DisasContext *s, DisasOps *o)
4615 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4618 static void cout_cmps32(DisasContext *s, DisasOps *o)
4620 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4623 static void cout_cmps64(DisasContext *s, DisasOps *o)
4625 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4628 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4630 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4633 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4635 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4638 static void cout_f32(DisasContext *s, DisasOps *o)
4640 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4643 static void cout_f64(DisasContext *s, DisasOps *o)
4645 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4648 static void cout_f128(DisasContext *s, DisasOps *o)
4650 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4653 static void cout_nabs32(DisasContext *s, DisasOps *o)
4655 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4658 static void cout_nabs64(DisasContext *s, DisasOps *o)
4660 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4663 static void cout_neg32(DisasContext *s, DisasOps *o)
4665 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4668 static void cout_neg64(DisasContext *s, DisasOps *o)
4670 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4673 static void cout_nz32(DisasContext *s, DisasOps *o)
4675 tcg_gen_ext32u_i64(cc_dst, o->out);
4676 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4679 static void cout_nz64(DisasContext *s, DisasOps *o)
4681 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4684 static void cout_s32(DisasContext *s, DisasOps *o)
4686 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4689 static void cout_s64(DisasContext *s, DisasOps *o)
4691 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4694 static void cout_subs32(DisasContext *s, DisasOps *o)
4696 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4699 static void cout_subs64(DisasContext *s, DisasOps *o)
4701 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4704 static void cout_subu32(DisasContext *s, DisasOps *o)
4706 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4709 static void cout_subu64(DisasContext *s, DisasOps *o)
4711 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4714 static void cout_subb32(DisasContext *s, DisasOps *o)
4716 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4719 static void cout_subb64(DisasContext *s, DisasOps *o)
4721 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4724 static void cout_tm32(DisasContext *s, DisasOps *o)
4726 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4729 static void cout_tm64(DisasContext *s, DisasOps *o)
4731 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4734 /* ====================================================================== */
4735 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4736 with the TCG register to which we will write. Used in combination with
4737 the "wout" generators, in some cases we need a new temporary, and in
4738 some cases we can write to a TCG global. */
4740 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4742 o->out = tcg_temp_new_i64();
4744 #define SPEC_prep_new 0
4746 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4748 o->out = tcg_temp_new_i64();
4749 o->out2 = tcg_temp_new_i64();
4751 #define SPEC_prep_new_P 0
4753 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4755 o->out = regs[get_field(f, r1)];
4756 o->g_out = true;
4758 #define SPEC_prep_r1 0
4760 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4762 int r1 = get_field(f, r1);
4763 o->out = regs[r1];
4764 o->out2 = regs[r1 + 1];
4765 o->g_out = o->g_out2 = true;
4767 #define SPEC_prep_r1_P SPEC_r1_even
4769 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4771 o->out = fregs[get_field(f, r1)];
4772 o->g_out = true;
4774 #define SPEC_prep_f1 0
4776 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4778 int r1 = get_field(f, r1);
4779 o->out = fregs[r1];
4780 o->out2 = fregs[r1 + 2];
4781 o->g_out = o->g_out2 = true;
4783 #define SPEC_prep_x1 SPEC_r1_f128
4785 /* ====================================================================== */
4786 /* The "Write OUTput" generators. These generally perform some non-trivial
4787 copy of data to TCG globals, or to main memory. The trivial cases are
4788 generally handled by having a "prep" generator install the TCG global
4789 as the destination of the operation. */
4791 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4793 store_reg(get_field(f, r1), o->out);
4795 #define SPEC_wout_r1 0
4797 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4799 int r1 = get_field(f, r1);
4800 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4802 #define SPEC_wout_r1_8 0
4804 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4806 int r1 = get_field(f, r1);
4807 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4809 #define SPEC_wout_r1_16 0
4811 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4813 store_reg32_i64(get_field(f, r1), o->out);
4815 #define SPEC_wout_r1_32 0
4817 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4819 store_reg32h_i64(get_field(f, r1), o->out);
4821 #define SPEC_wout_r1_32h 0
4823 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4825 int r1 = get_field(f, r1);
4826 store_reg32_i64(r1, o->out);
4827 store_reg32_i64(r1 + 1, o->out2);
4829 #define SPEC_wout_r1_P32 SPEC_r1_even
4831 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4833 int r1 = get_field(f, r1);
4834 store_reg32_i64(r1 + 1, o->out);
4835 tcg_gen_shri_i64(o->out, o->out, 32);
4836 store_reg32_i64(r1, o->out);
4838 #define SPEC_wout_r1_D32 SPEC_r1_even
4840 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4842 int r3 = get_field(f, r3);
4843 store_reg32_i64(r3, o->out);
4844 store_reg32_i64(r3 + 1, o->out2);
4846 #define SPEC_wout_r3_P32 SPEC_r3_even
4848 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4850 int r3 = get_field(f, r3);
4851 store_reg(r3, o->out);
4852 store_reg(r3 + 1, o->out2);
4854 #define SPEC_wout_r3_P64 SPEC_r3_even
4856 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4858 store_freg32_i64(get_field(f, r1), o->out);
4860 #define SPEC_wout_e1 0
4862 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4864 store_freg(get_field(f, r1), o->out);
4866 #define SPEC_wout_f1 0
4868 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4870 int f1 = get_field(s->fields, r1);
4871 store_freg(f1, o->out);
4872 store_freg(f1 + 2, o->out2);
4874 #define SPEC_wout_x1 SPEC_r1_f128
4876 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4878 if (get_field(f, r1) != get_field(f, r2)) {
4879 store_reg32_i64(get_field(f, r1), o->out);
4882 #define SPEC_wout_cond_r1r2_32 0
4884 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4886 if (get_field(f, r1) != get_field(f, r2)) {
4887 store_freg32_i64(get_field(f, r1), o->out);
4890 #define SPEC_wout_cond_e1e2 0
4892 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4894 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4896 #define SPEC_wout_m1_8 0
4898 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4900 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4902 #define SPEC_wout_m1_16 0
4904 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4906 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4908 #define SPEC_wout_m1_32 0
4910 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4912 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4914 #define SPEC_wout_m1_64 0
4916 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4918 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4920 #define SPEC_wout_m2_32 0
4922 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4924 store_reg(get_field(f, r1), o->in2);
4926 #define SPEC_wout_in2_r1 0
4928 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4930 store_reg32_i64(get_field(f, r1), o->in2);
4932 #define SPEC_wout_in2_r1_32 0
4934 /* ====================================================================== */
4935 /* The "INput 1" generators. These load the first operand to an insn. */
4937 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4939 o->in1 = load_reg(get_field(f, r1));
4941 #define SPEC_in1_r1 0
4943 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4945 o->in1 = regs[get_field(f, r1)];
4946 o->g_in1 = true;
4948 #define SPEC_in1_r1_o 0
4950 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4952 o->in1 = tcg_temp_new_i64();
4953 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4955 #define SPEC_in1_r1_32s 0
4957 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4959 o->in1 = tcg_temp_new_i64();
4960 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4962 #define SPEC_in1_r1_32u 0
4964 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4966 o->in1 = tcg_temp_new_i64();
4967 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4969 #define SPEC_in1_r1_sr32 0
4971 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4973 o->in1 = load_reg(get_field(f, r1) + 1);
4975 #define SPEC_in1_r1p1 SPEC_r1_even
4977 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4979 o->in1 = tcg_temp_new_i64();
4980 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4982 #define SPEC_in1_r1p1_32s SPEC_r1_even
4984 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4986 o->in1 = tcg_temp_new_i64();
4987 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4989 #define SPEC_in1_r1p1_32u SPEC_r1_even
4991 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4993 int r1 = get_field(f, r1);
4994 o->in1 = tcg_temp_new_i64();
4995 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4997 #define SPEC_in1_r1_D32 SPEC_r1_even
4999 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5001 o->in1 = load_reg(get_field(f, r2));
5003 #define SPEC_in1_r2 0
5005 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5007 o->in1 = tcg_temp_new_i64();
5008 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5010 #define SPEC_in1_r2_sr32 0
5012 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5014 o->in1 = load_reg(get_field(f, r3));
5016 #define SPEC_in1_r3 0
5018 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5020 o->in1 = regs[get_field(f, r3)];
5021 o->g_in1 = true;
5023 #define SPEC_in1_r3_o 0
5025 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5027 o->in1 = tcg_temp_new_i64();
5028 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5030 #define SPEC_in1_r3_32s 0
5032 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5034 o->in1 = tcg_temp_new_i64();
5035 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5037 #define SPEC_in1_r3_32u 0
5039 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5041 int r3 = get_field(f, r3);
5042 o->in1 = tcg_temp_new_i64();
5043 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5045 #define SPEC_in1_r3_D32 SPEC_r3_even
5047 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5049 o->in1 = load_freg32_i64(get_field(f, r1));
5051 #define SPEC_in1_e1 0
5053 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5055 o->in1 = fregs[get_field(f, r1)];
5056 o->g_in1 = true;
5058 #define SPEC_in1_f1_o 0
5060 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5062 int r1 = get_field(f, r1);
5063 o->out = fregs[r1];
5064 o->out2 = fregs[r1 + 2];
5065 o->g_out = o->g_out2 = true;
5067 #define SPEC_in1_x1_o SPEC_r1_f128
5069 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5071 o->in1 = fregs[get_field(f, r3)];
5072 o->g_in1 = true;
5074 #define SPEC_in1_f3_o 0
5076 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5078 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5080 #define SPEC_in1_la1 0
5082 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5084 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5085 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5087 #define SPEC_in1_la2 0
5089 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5091 in1_la1(s, f, o);
5092 o->in1 = tcg_temp_new_i64();
5093 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5095 #define SPEC_in1_m1_8u 0
5097 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5099 in1_la1(s, f, o);
5100 o->in1 = tcg_temp_new_i64();
5101 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5103 #define SPEC_in1_m1_16s 0
5105 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5107 in1_la1(s, f, o);
5108 o->in1 = tcg_temp_new_i64();
5109 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5111 #define SPEC_in1_m1_16u 0
5113 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5115 in1_la1(s, f, o);
5116 o->in1 = tcg_temp_new_i64();
5117 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5119 #define SPEC_in1_m1_32s 0
5121 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5123 in1_la1(s, f, o);
5124 o->in1 = tcg_temp_new_i64();
5125 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5127 #define SPEC_in1_m1_32u 0
5129 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5131 in1_la1(s, f, o);
5132 o->in1 = tcg_temp_new_i64();
5133 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5135 #define SPEC_in1_m1_64 0
5137 /* ====================================================================== */
5138 /* The "INput 2" generators. These load the second operand to an insn. */
5140 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5142 o->in2 = regs[get_field(f, r1)];
5143 o->g_in2 = true;
5145 #define SPEC_in2_r1_o 0
5147 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5149 o->in2 = tcg_temp_new_i64();
5150 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5152 #define SPEC_in2_r1_16u 0
5154 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5156 o->in2 = tcg_temp_new_i64();
5157 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5159 #define SPEC_in2_r1_32u 0
5161 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5163 int r1 = get_field(f, r1);
5164 o->in2 = tcg_temp_new_i64();
5165 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5167 #define SPEC_in2_r1_D32 SPEC_r1_even
5169 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5171 o->in2 = load_reg(get_field(f, r2));
5173 #define SPEC_in2_r2 0
5175 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5177 o->in2 = regs[get_field(f, r2)];
5178 o->g_in2 = true;
5180 #define SPEC_in2_r2_o 0
5182 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5184 int r2 = get_field(f, r2);
5185 if (r2 != 0) {
5186 o->in2 = load_reg(r2);
5189 #define SPEC_in2_r2_nz 0
5191 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5193 o->in2 = tcg_temp_new_i64();
5194 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5196 #define SPEC_in2_r2_8s 0
5198 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5200 o->in2 = tcg_temp_new_i64();
5201 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5203 #define SPEC_in2_r2_8u 0
5205 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5207 o->in2 = tcg_temp_new_i64();
5208 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5210 #define SPEC_in2_r2_16s 0
5212 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5214 o->in2 = tcg_temp_new_i64();
5215 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5217 #define SPEC_in2_r2_16u 0
5219 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5221 o->in2 = load_reg(get_field(f, r3));
5223 #define SPEC_in2_r3 0
5225 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5227 o->in2 = tcg_temp_new_i64();
5228 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5230 #define SPEC_in2_r3_sr32 0
5232 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5234 o->in2 = tcg_temp_new_i64();
5235 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5237 #define SPEC_in2_r2_32s 0
5239 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5241 o->in2 = tcg_temp_new_i64();
5242 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5244 #define SPEC_in2_r2_32u 0
5246 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5248 o->in2 = tcg_temp_new_i64();
5249 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5251 #define SPEC_in2_r2_sr32 0
5253 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5255 o->in2 = load_freg32_i64(get_field(f, r2));
5257 #define SPEC_in2_e2 0
5259 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5261 o->in2 = fregs[get_field(f, r2)];
5262 o->g_in2 = true;
5264 #define SPEC_in2_f2_o 0
5266 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5268 int r2 = get_field(f, r2);
5269 o->in1 = fregs[r2];
5270 o->in2 = fregs[r2 + 2];
5271 o->g_in1 = o->g_in2 = true;
5273 #define SPEC_in2_x2_o SPEC_r2_f128
5275 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5277 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5279 #define SPEC_in2_ra2 0
5281 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5283 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5284 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5286 #define SPEC_in2_a2 0
5288 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5290 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5292 #define SPEC_in2_ri2 0
5294 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5296 help_l2_shift(s, f, o, 31);
5298 #define SPEC_in2_sh32 0
5300 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5302 help_l2_shift(s, f, o, 63);
5304 #define SPEC_in2_sh64 0
5306 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5308 in2_a2(s, f, o);
5309 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5311 #define SPEC_in2_m2_8u 0
5313 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5315 in2_a2(s, f, o);
5316 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5318 #define SPEC_in2_m2_16s 0
5320 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5322 in2_a2(s, f, o);
5323 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5325 #define SPEC_in2_m2_16u 0
5327 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5329 in2_a2(s, f, o);
5330 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5332 #define SPEC_in2_m2_32s 0
5334 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5336 in2_a2(s, f, o);
5337 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5339 #define SPEC_in2_m2_32u 0
5341 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5343 in2_a2(s, f, o);
5344 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5346 #define SPEC_in2_m2_64 0
5348 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5350 in2_ri2(s, f, o);
5351 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5353 #define SPEC_in2_mri2_16u 0
5355 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5357 in2_ri2(s, f, o);
5358 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5360 #define SPEC_in2_mri2_32s 0
5362 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5364 in2_ri2(s, f, o);
5365 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5367 #define SPEC_in2_mri2_32u 0
5369 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5371 in2_ri2(s, f, o);
5372 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5374 #define SPEC_in2_mri2_64 0
5376 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5378 o->in2 = tcg_const_i64(get_field(f, i2));
5380 #define SPEC_in2_i2 0
5382 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5384 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5386 #define SPEC_in2_i2_8u 0
5388 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5390 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5392 #define SPEC_in2_i2_16u 0
5394 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5396 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5398 #define SPEC_in2_i2_32u 0
5400 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5402 uint64_t i2 = (uint16_t)get_field(f, i2);
5403 o->in2 = tcg_const_i64(i2 << s->insn->data);
5405 #define SPEC_in2_i2_16u_shl 0
5407 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5409 uint64_t i2 = (uint32_t)get_field(f, i2);
5410 o->in2 = tcg_const_i64(i2 << s->insn->data);
5412 #define SPEC_in2_i2_32u_shl 0
5414 #ifndef CONFIG_USER_ONLY
5415 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5417 o->in2 = tcg_const_i64(s->fields->raw_insn);
5419 #define SPEC_in2_insn 0
5420 #endif
5422 /* ====================================================================== */
5424 /* Find opc within the table of insns. This is formulated as a switch
5425 statement so that (1) we get compile-time notice of cut-paste errors
5426 for duplicated opcodes, and (2) the compiler generates the binary
5427 search tree, rather than us having to post-process the table. */
5429 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5430 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5432 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5434 enum DisasInsnEnum {
5435 #include "insn-data.def"
5438 #undef D
5439 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5440 .opc = OPC, \
5441 .fmt = FMT_##FT, \
5442 .fac = FAC_##FC, \
5443 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5444 .name = #NM, \
5445 .help_in1 = in1_##I1, \
5446 .help_in2 = in2_##I2, \
5447 .help_prep = prep_##P, \
5448 .help_wout = wout_##W, \
5449 .help_cout = cout_##CC, \
5450 .help_op = op_##OP, \
5451 .data = D \
5454 /* Allow 0 to be used for NULL in the table below. */
5455 #define in1_0 NULL
5456 #define in2_0 NULL
5457 #define prep_0 NULL
5458 #define wout_0 NULL
5459 #define cout_0 NULL
5460 #define op_0 NULL
5462 #define SPEC_in1_0 0
5463 #define SPEC_in2_0 0
5464 #define SPEC_prep_0 0
5465 #define SPEC_wout_0 0
5467 /* Give smaller names to the various facilities. */
5468 #define FAC_Z S390_FEAT_ZARCH
5469 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5470 #define FAC_DFP S390_FEAT_DFP
5471 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5472 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5473 #define FAC_EE S390_FEAT_EXECUTE_EXT
5474 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5475 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5476 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5477 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5478 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5479 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5480 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5481 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5482 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5483 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5484 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5485 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5486 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5487 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5488 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5489 #define FAC_SFLE S390_FEAT_STFLE
5490 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5491 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5492 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5493 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5494 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5495 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5496 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5497 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5498 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5500 static const DisasInsn insn_info[] = {
5501 #include "insn-data.def"
5504 #undef D
5505 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5506 case OPC: return &insn_info[insn_ ## NM];
5508 static const DisasInsn *lookup_opc(uint16_t opc)
5510 switch (opc) {
5511 #include "insn-data.def"
5512 default:
5513 return NULL;
5517 #undef D
5518 #undef C
5520 /* Extract a field from the insn. The INSN should be left-aligned in
5521 the uint64_t so that we can more easily utilize the big-bit-endian
5522 definitions we extract from the Principals of Operation. */
5524 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5526 uint32_t r, m;
5528 if (f->size == 0) {
5529 return;
5532 /* Zero extract the field from the insn. */
5533 r = (insn << f->beg) >> (64 - f->size);
5535 /* Sign-extend, or un-swap the field as necessary. */
5536 switch (f->type) {
5537 case 0: /* unsigned */
5538 break;
5539 case 1: /* signed */
5540 assert(f->size <= 32);
5541 m = 1u << (f->size - 1);
5542 r = (r ^ m) - m;
5543 break;
5544 case 2: /* dl+dh split, signed 20 bit. */
5545 r = ((int8_t)r << 12) | (r >> 8);
5546 break;
5547 default:
5548 abort();
5551 /* Validate that the "compressed" encoding we selected above is valid.
5552 I.e. we havn't make two different original fields overlap. */
5553 assert(((o->presentC >> f->indexC) & 1) == 0);
5554 o->presentC |= 1 << f->indexC;
5555 o->presentO |= 1 << f->indexO;
5557 o->c[f->indexC] = r;
5560 /* Lookup the insn at the current PC, extracting the operands into O and
5561 returning the info struct for the insn. Returns NULL for invalid insn. */
5563 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5564 DisasFields *f)
5566 uint64_t insn, pc = s->pc;
5567 int op, op2, ilen;
5568 const DisasInsn *info;
5570 if (unlikely(s->ex_value)) {
5571 /* Drop the EX data now, so that it's clear on exception paths. */
5572 TCGv_i64 zero = tcg_const_i64(0);
5573 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5574 tcg_temp_free_i64(zero);
5576 /* Extract the values saved by EXECUTE. */
5577 insn = s->ex_value & 0xffffffffffff0000ull;
5578 ilen = s->ex_value & 0xf;
5579 op = insn >> 56;
5580 } else {
5581 insn = ld_code2(env, pc);
5582 op = (insn >> 8) & 0xff;
5583 ilen = get_ilen(op);
5584 switch (ilen) {
5585 case 2:
5586 insn = insn << 48;
5587 break;
5588 case 4:
5589 insn = ld_code4(env, pc) << 32;
5590 break;
5591 case 6:
5592 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5593 break;
5594 default:
5595 g_assert_not_reached();
5598 s->next_pc = s->pc + ilen;
5599 s->ilen = ilen;
5601 /* We can't actually determine the insn format until we've looked up
5602 the full insn opcode. Which we can't do without locating the
5603 secondary opcode. Assume by default that OP2 is at bit 40; for
5604 those smaller insns that don't actually have a secondary opcode
5605 this will correctly result in OP2 = 0. */
5606 switch (op) {
5607 case 0x01: /* E */
5608 case 0x80: /* S */
5609 case 0x82: /* S */
5610 case 0x93: /* S */
5611 case 0xb2: /* S, RRF, RRE, IE */
5612 case 0xb3: /* RRE, RRD, RRF */
5613 case 0xb9: /* RRE, RRF */
5614 case 0xe5: /* SSE, SIL */
5615 op2 = (insn << 8) >> 56;
5616 break;
5617 case 0xa5: /* RI */
5618 case 0xa7: /* RI */
5619 case 0xc0: /* RIL */
5620 case 0xc2: /* RIL */
5621 case 0xc4: /* RIL */
5622 case 0xc6: /* RIL */
5623 case 0xc8: /* SSF */
5624 case 0xcc: /* RIL */
5625 op2 = (insn << 12) >> 60;
5626 break;
5627 case 0xc5: /* MII */
5628 case 0xc7: /* SMI */
5629 case 0xd0 ... 0xdf: /* SS */
5630 case 0xe1: /* SS */
5631 case 0xe2: /* SS */
5632 case 0xe8: /* SS */
5633 case 0xe9: /* SS */
5634 case 0xea: /* SS */
5635 case 0xee ... 0xf3: /* SS */
5636 case 0xf8 ... 0xfd: /* SS */
5637 op2 = 0;
5638 break;
5639 default:
5640 op2 = (insn << 40) >> 56;
5641 break;
5644 memset(f, 0, sizeof(*f));
5645 f->raw_insn = insn;
5646 f->op = op;
5647 f->op2 = op2;
5649 /* Lookup the instruction. */
5650 info = lookup_opc(op << 8 | op2);
5652 /* If we found it, extract the operands. */
5653 if (info != NULL) {
5654 DisasFormat fmt = info->fmt;
5655 int i;
5657 for (i = 0; i < NUM_C_FIELD; ++i) {
5658 extract_field(f, &format_info[fmt].op[i], insn);
5661 return info;
5664 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5666 const DisasInsn *insn;
5667 ExitStatus ret = NO_EXIT;
5668 DisasFields f;
5669 DisasOps o;
5671 /* Search for the insn in the table. */
5672 insn = extract_insn(env, s, &f);
5674 /* Not found means unimplemented/illegal opcode. */
5675 if (insn == NULL) {
5676 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5677 f.op, f.op2);
5678 gen_illegal_opcode(s);
5679 return EXIT_NORETURN;
5682 #ifndef CONFIG_USER_ONLY
5683 if (s->tb->flags & FLAG_MASK_PER) {
5684 TCGv_i64 addr = tcg_const_i64(s->pc);
5685 gen_helper_per_ifetch(cpu_env, addr);
5686 tcg_temp_free_i64(addr);
5688 #endif
5690 /* Check for insn specification exceptions. */
5691 if (insn->spec) {
5692 int spec = insn->spec, excp = 0, r;
5694 if (spec & SPEC_r1_even) {
5695 r = get_field(&f, r1);
5696 if (r & 1) {
5697 excp = PGM_SPECIFICATION;
5700 if (spec & SPEC_r2_even) {
5701 r = get_field(&f, r2);
5702 if (r & 1) {
5703 excp = PGM_SPECIFICATION;
5706 if (spec & SPEC_r3_even) {
5707 r = get_field(&f, r3);
5708 if (r & 1) {
5709 excp = PGM_SPECIFICATION;
5712 if (spec & SPEC_r1_f128) {
5713 r = get_field(&f, r1);
5714 if (r > 13) {
5715 excp = PGM_SPECIFICATION;
5718 if (spec & SPEC_r2_f128) {
5719 r = get_field(&f, r2);
5720 if (r > 13) {
5721 excp = PGM_SPECIFICATION;
5724 if (excp) {
5725 gen_program_exception(s, excp);
5726 return EXIT_NORETURN;
5730 /* Set up the strutures we use to communicate with the helpers. */
5731 s->insn = insn;
5732 s->fields = &f;
5733 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5734 TCGV_UNUSED_I64(o.out);
5735 TCGV_UNUSED_I64(o.out2);
5736 TCGV_UNUSED_I64(o.in1);
5737 TCGV_UNUSED_I64(o.in2);
5738 TCGV_UNUSED_I64(o.addr1);
5740 /* Implement the instruction. */
5741 if (insn->help_in1) {
5742 insn->help_in1(s, &f, &o);
5744 if (insn->help_in2) {
5745 insn->help_in2(s, &f, &o);
5747 if (insn->help_prep) {
5748 insn->help_prep(s, &f, &o);
5750 if (insn->help_op) {
5751 ret = insn->help_op(s, &o);
5753 if (insn->help_wout) {
5754 insn->help_wout(s, &f, &o);
5756 if (insn->help_cout) {
5757 insn->help_cout(s, &o);
5760 /* Free any temporaries created by the helpers. */
5761 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5762 tcg_temp_free_i64(o.out);
5764 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5765 tcg_temp_free_i64(o.out2);
5767 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5768 tcg_temp_free_i64(o.in1);
5770 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5771 tcg_temp_free_i64(o.in2);
5773 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5774 tcg_temp_free_i64(o.addr1);
5777 #ifndef CONFIG_USER_ONLY
5778 if (s->tb->flags & FLAG_MASK_PER) {
5779 /* An exception might be triggered, save PSW if not already done. */
5780 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5781 tcg_gen_movi_i64(psw_addr, s->next_pc);
5784 /* Save off cc. */
5785 update_cc_op(s);
5787 /* Call the helper to check for a possible PER exception. */
5788 gen_helper_per_check_exception(cpu_env);
5790 #endif
5792 /* Advance to the next instruction. */
5793 s->pc = s->next_pc;
5794 return ret;
5797 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
5799 CPUS390XState *env = cs->env_ptr;
5800 DisasContext dc;
5801 target_ulong pc_start;
5802 uint64_t next_page_start;
5803 int num_insns, max_insns;
5804 ExitStatus status;
5805 bool do_debug;
5807 pc_start = tb->pc;
5809 /* 31-bit mode */
5810 if (!(tb->flags & FLAG_MASK_64)) {
5811 pc_start &= 0x7fffffff;
5814 dc.tb = tb;
5815 dc.pc = pc_start;
5816 dc.cc_op = CC_OP_DYNAMIC;
5817 dc.ex_value = tb->cs_base;
5818 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5820 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5822 num_insns = 0;
5823 max_insns = tb->cflags & CF_COUNT_MASK;
5824 if (max_insns == 0) {
5825 max_insns = CF_COUNT_MASK;
5827 if (max_insns > TCG_MAX_INSNS) {
5828 max_insns = TCG_MAX_INSNS;
5831 gen_tb_start(tb);
5833 do {
5834 tcg_gen_insn_start(dc.pc, dc.cc_op);
5835 num_insns++;
5837 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5838 status = EXIT_PC_STALE;
5839 do_debug = true;
5840 /* The address covered by the breakpoint must be included in
5841 [tb->pc, tb->pc + tb->size) in order to for it to be
5842 properly cleared -- thus we increment the PC here so that
5843 the logic setting tb->size below does the right thing. */
5844 dc.pc += 2;
5845 break;
5848 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5849 gen_io_start();
5852 status = translate_one(env, &dc);
5854 /* If we reach a page boundary, are single stepping,
5855 or exhaust instruction count, stop generation. */
5856 if (status == NO_EXIT
5857 && (dc.pc >= next_page_start
5858 || tcg_op_buf_full()
5859 || num_insns >= max_insns
5860 || singlestep
5861 || cs->singlestep_enabled
5862 || dc.ex_value)) {
5863 status = EXIT_PC_STALE;
5865 } while (status == NO_EXIT);
5867 if (tb->cflags & CF_LAST_IO) {
5868 gen_io_end();
5871 switch (status) {
5872 case EXIT_GOTO_TB:
5873 case EXIT_NORETURN:
5874 break;
5875 case EXIT_PC_STALE:
5876 case EXIT_PC_STALE_NOCHAIN:
5877 update_psw_addr(&dc);
5878 /* FALLTHRU */
5879 case EXIT_PC_UPDATED:
5880 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5881 cc op type is in env */
5882 update_cc_op(&dc);
5883 /* FALLTHRU */
5884 case EXIT_PC_CC_UPDATED:
5885 /* Exit the TB, either by raising a debug exception or by return. */
5886 if (do_debug) {
5887 gen_exception(EXCP_DEBUG);
5888 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5889 tcg_gen_exit_tb(0);
5890 } else {
5891 tcg_gen_lookup_and_goto_ptr(psw_addr);
5893 break;
5894 default:
5895 g_assert_not_reached();
5898 gen_tb_end(tb, num_insns);
5900 tb->size = dc.pc - pc_start;
5901 tb->icount = num_insns;
5903 #if defined(S390X_DEBUG_DISAS)
5904 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5905 && qemu_log_in_addr_range(pc_start)) {
5906 qemu_log_lock();
5907 if (unlikely(dc.ex_value)) {
5908 /* ??? Unfortunately log_target_disas can't use host memory. */
5909 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5910 } else {
5911 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5912 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5913 qemu_log("\n");
5915 qemu_log_unlock();
5917 #endif
5920 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5921 target_ulong *data)
5923 int cc_op = data[1];
5924 env->psw.addr = data[0];
5925 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5926 env->cc_op = cc_op;