2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 /* We only support generating code for 64-bit mode. */
28 #if TCG_TARGET_REG_BITS != 64
29 #error "unsupported code generation mode"
32 #include "../tcg-pool.inc.c"
35 /* ??? The translation blocks produced by TCG are generally small enough to
36 be entirely reachable with a 16-bit displacement. Leaving the option for
37 a 32-bit displacement here Just In Case. */
38 #define USE_LONG_BRANCHES 0
40 #define TCG_CT_CONST_S16 0x100
41 #define TCG_CT_CONST_S32 0x200
42 #define TCG_CT_CONST_S33 0x400
43 #define TCG_CT_CONST_ZERO 0x800
45 /* Several places within the instruction set 0 means "no register"
46 rather than TCG_REG_R0. */
47 #define TCG_REG_NONE 0
49 /* A scratch register that may be be used throughout the backend. */
50 #define TCG_TMP0 TCG_REG_R1
52 /* A scratch register that holds a pointer to the beginning of the TB.
53 We don't need this when we have pc-relative loads with the general
54 instructions extension facility. */
55 #define TCG_REG_TB TCG_REG_R12
56 #define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT))
58 #ifndef CONFIG_SOFTMMU
59 #define TCG_GUEST_BASE_REG TCG_REG_R13
62 /* All of the following instructions are prefixed with their instruction
63 format, and are defined as 8- or 16-bit quantities, even when the two
64 halves of the 16-bit quantity may appear 32 bits apart in the insn.
65 This makes it easy to copy the values from the tables in Appendix B. */
66 typedef enum S390Opcode
{
256 #ifdef CONFIG_DEBUG_TCG
257 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
258 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
259 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
263 /* Since R6 is a potential argument register, choose it last of the
264 call-saved registers. Likewise prefer the call-clobbered registers
265 in reverse order to maximize the chance of avoiding the arguments. */
266 static const int tcg_target_reg_alloc_order
[] = {
267 /* Call saved registers. */
276 /* Call clobbered registers. */
280 /* Argument registers, in reverse order of allocation. */
287 static const int tcg_target_call_iarg_regs
[] = {
295 static const int tcg_target_call_oarg_regs
[] = {
303 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
304 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
305 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
306 #define S390_CC_NEVER 0
307 #define S390_CC_ALWAYS 15
309 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
310 static const uint8_t tcg_cond_to_s390_cond
[] = {
311 [TCG_COND_EQ
] = S390_CC_EQ
,
312 [TCG_COND_NE
] = S390_CC_NE
,
313 [TCG_COND_LT
] = S390_CC_LT
,
314 [TCG_COND_LE
] = S390_CC_LE
,
315 [TCG_COND_GT
] = S390_CC_GT
,
316 [TCG_COND_GE
] = S390_CC_GE
,
317 [TCG_COND_LTU
] = S390_CC_LT
,
318 [TCG_COND_LEU
] = S390_CC_LE
,
319 [TCG_COND_GTU
] = S390_CC_GT
,
320 [TCG_COND_GEU
] = S390_CC_GE
,
323 /* Condition codes that result from a LOAD AND TEST. Here, we have no
324 unsigned instruction variation, however since the test is vs zero we
325 can re-map the outcomes appropriately. */
326 static const uint8_t tcg_cond_to_ltr_cond
[] = {
327 [TCG_COND_EQ
] = S390_CC_EQ
,
328 [TCG_COND_NE
] = S390_CC_NE
,
329 [TCG_COND_LT
] = S390_CC_LT
,
330 [TCG_COND_LE
] = S390_CC_LE
,
331 [TCG_COND_GT
] = S390_CC_GT
,
332 [TCG_COND_GE
] = S390_CC_GE
,
333 [TCG_COND_LTU
] = S390_CC_NEVER
,
334 [TCG_COND_LEU
] = S390_CC_EQ
,
335 [TCG_COND_GTU
] = S390_CC_NE
,
336 [TCG_COND_GEU
] = S390_CC_ALWAYS
,
339 #ifdef CONFIG_SOFTMMU
340 static void * const qemu_ld_helpers
[16] = {
341 [MO_UB
] = helper_ret_ldub_mmu
,
342 [MO_SB
] = helper_ret_ldsb_mmu
,
343 [MO_LEUW
] = helper_le_lduw_mmu
,
344 [MO_LESW
] = helper_le_ldsw_mmu
,
345 [MO_LEUL
] = helper_le_ldul_mmu
,
346 [MO_LESL
] = helper_le_ldsl_mmu
,
347 [MO_LEQ
] = helper_le_ldq_mmu
,
348 [MO_BEUW
] = helper_be_lduw_mmu
,
349 [MO_BESW
] = helper_be_ldsw_mmu
,
350 [MO_BEUL
] = helper_be_ldul_mmu
,
351 [MO_BESL
] = helper_be_ldsl_mmu
,
352 [MO_BEQ
] = helper_be_ldq_mmu
,
355 static void * const qemu_st_helpers
[16] = {
356 [MO_UB
] = helper_ret_stb_mmu
,
357 [MO_LEUW
] = helper_le_stw_mmu
,
358 [MO_LEUL
] = helper_le_stl_mmu
,
359 [MO_LEQ
] = helper_le_stq_mmu
,
360 [MO_BEUW
] = helper_be_stw_mmu
,
361 [MO_BEUL
] = helper_be_stl_mmu
,
362 [MO_BEQ
] = helper_be_stq_mmu
,
366 static tcg_insn_unit
*tb_ret_addr
;
367 uint64_t s390_facilities
;
369 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
370 intptr_t value
, intptr_t addend
)
376 pcrel2
= (tcg_insn_unit
*)value
- code_ptr
;
380 if (pcrel2
== (int16_t)pcrel2
) {
381 tcg_patch16(code_ptr
, pcrel2
);
386 if (pcrel2
== (int32_t)pcrel2
) {
387 tcg_patch32(code_ptr
, pcrel2
);
392 if (value
== sextract64(value
, 0, 20)) {
393 old
= *(uint32_t *)code_ptr
& 0xf00000ff;
394 old
|= ((value
& 0xfff) << 16) | ((value
& 0xff000) >> 4);
395 tcg_patch32(code_ptr
, old
);
400 g_assert_not_reached();
405 /* parse target specific constraints */
406 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
407 const char *ct_str
, TCGType type
)
410 case 'r': /* all registers */
411 ct
->ct
|= TCG_CT_REG
;
414 case 'L': /* qemu_ld/st constraint */
415 ct
->ct
|= TCG_CT_REG
;
417 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
418 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
419 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R4
);
421 case 'a': /* force R2 for division */
422 ct
->ct
|= TCG_CT_REG
;
424 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R2
);
426 case 'b': /* force R3 for division */
427 ct
->ct
|= TCG_CT_REG
;
429 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R3
);
432 ct
->ct
|= TCG_CT_CONST_S33
;
435 ct
->ct
|= TCG_CT_CONST_S16
;
438 ct
->ct
|= TCG_CT_CONST_S32
;
441 ct
->ct
|= TCG_CT_CONST_ZERO
;
449 /* Test if a constant matches the constraint. */
450 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
451 const TCGArgConstraint
*arg_ct
)
455 if (ct
& TCG_CT_CONST
) {
459 if (type
== TCG_TYPE_I32
) {
463 /* The following are mutually exclusive. */
464 if (ct
& TCG_CT_CONST_S16
) {
465 return val
== (int16_t)val
;
466 } else if (ct
& TCG_CT_CONST_S32
) {
467 return val
== (int32_t)val
;
468 } else if (ct
& TCG_CT_CONST_S33
) {
469 return val
>= -0xffffffffll
&& val
<= 0xffffffffll
;
470 } else if (ct
& TCG_CT_CONST_ZERO
) {
477 /* Emit instructions according to the given instruction format. */
479 static void tcg_out_insn_RR(TCGContext
*s
, S390Opcode op
, TCGReg r1
, TCGReg r2
)
481 tcg_out16(s
, (op
<< 8) | (r1
<< 4) | r2
);
484 static void tcg_out_insn_RRE(TCGContext
*s
, S390Opcode op
,
485 TCGReg r1
, TCGReg r2
)
487 tcg_out32(s
, (op
<< 16) | (r1
<< 4) | r2
);
490 static void tcg_out_insn_RRF(TCGContext
*s
, S390Opcode op
,
491 TCGReg r1
, TCGReg r2
, int m3
)
493 tcg_out32(s
, (op
<< 16) | (m3
<< 12) | (r1
<< 4) | r2
);
496 static void tcg_out_insn_RI(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
498 tcg_out32(s
, (op
<< 16) | (r1
<< 20) | (i2
& 0xffff));
501 static void tcg_out_insn_RIE(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
504 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | m3
);
505 tcg_out32(s
, (i2
<< 16) | (op
& 0xff));
508 static void tcg_out_insn_RIL(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
510 tcg_out16(s
, op
| (r1
<< 4));
514 static void tcg_out_insn_RS(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
515 TCGReg b2
, TCGReg r3
, int disp
)
517 tcg_out32(s
, (op
<< 24) | (r1
<< 20) | (r3
<< 16) | (b2
<< 12)
521 static void tcg_out_insn_RSY(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
522 TCGReg b2
, TCGReg r3
, int disp
)
524 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | r3
);
525 tcg_out32(s
, (op
& 0xff) | (b2
<< 28)
526 | ((disp
& 0xfff) << 16) | ((disp
& 0xff000) >> 4));
529 #define tcg_out_insn_RX tcg_out_insn_RS
530 #define tcg_out_insn_RXY tcg_out_insn_RSY
532 /* Emit an opcode with "type-checking" of the format. */
533 #define tcg_out_insn(S, FMT, OP, ...) \
534 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
537 /* emit 64-bit shifts */
538 static void tcg_out_sh64(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
539 TCGReg src
, TCGReg sh_reg
, int sh_imm
)
541 tcg_out_insn_RSY(s
, op
, dest
, sh_reg
, src
, sh_imm
);
544 /* emit 32-bit shifts */
545 static void tcg_out_sh32(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
546 TCGReg sh_reg
, int sh_imm
)
548 tcg_out_insn_RS(s
, op
, dest
, sh_reg
, 0, sh_imm
);
551 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg dst
, TCGReg src
)
554 if (type
== TCG_TYPE_I32
) {
555 tcg_out_insn(s
, RR
, LR
, dst
, src
);
557 tcg_out_insn(s
, RRE
, LGR
, dst
, src
);
563 static const S390Opcode lli_insns
[4] = {
564 RI_LLILL
, RI_LLILH
, RI_LLIHL
, RI_LLIHH
567 static bool maybe_out_small_movi(TCGContext
*s
, TCGType type
,
568 TCGReg ret
, tcg_target_long sval
)
570 tcg_target_ulong uval
= sval
;
573 if (type
== TCG_TYPE_I32
) {
574 uval
= (uint32_t)sval
;
575 sval
= (int32_t)sval
;
578 /* Try all 32-bit insns that can load it in one go. */
579 if (sval
>= -0x8000 && sval
< 0x8000) {
580 tcg_out_insn(s
, RI
, LGHI
, ret
, sval
);
584 for (i
= 0; i
< 4; i
++) {
585 tcg_target_long mask
= 0xffffull
<< i
*16;
586 if ((uval
& mask
) == uval
) {
587 tcg_out_insn_RI(s
, lli_insns
[i
], ret
, uval
>> i
*16);
595 /* load a register with an immediate value */
596 static void tcg_out_movi_int(TCGContext
*s
, TCGType type
, TCGReg ret
,
597 tcg_target_long sval
, bool in_prologue
)
599 tcg_target_ulong uval
;
601 /* Try all 32-bit insns that can load it in one go. */
602 if (maybe_out_small_movi(s
, type
, ret
, sval
)) {
607 if (type
== TCG_TYPE_I32
) {
608 uval
= (uint32_t)sval
;
609 sval
= (int32_t)sval
;
612 /* Try all 48-bit insns that can load it in one go. */
613 if (s390_facilities
& FACILITY_EXT_IMM
) {
614 if (sval
== (int32_t)sval
) {
615 tcg_out_insn(s
, RIL
, LGFI
, ret
, sval
);
618 if (uval
<= 0xffffffff) {
619 tcg_out_insn(s
, RIL
, LLILF
, ret
, uval
);
622 if ((uval
& 0xffffffff) == 0) {
623 tcg_out_insn(s
, RIL
, LLIHF
, ret
, uval
>> 32);
628 /* Try for PC-relative address load. For odd addresses,
629 attempt to use an offset from the start of the TB. */
630 if ((sval
& 1) == 0) {
631 ptrdiff_t off
= tcg_pcrel_diff(s
, (void *)sval
) >> 1;
632 if (off
== (int32_t)off
) {
633 tcg_out_insn(s
, RIL
, LARL
, ret
, off
);
636 } else if (USE_REG_TB
&& !in_prologue
) {
637 ptrdiff_t off
= sval
- (uintptr_t)s
->code_gen_ptr
;
638 if (off
== sextract64(off
, 0, 20)) {
639 /* This is certain to be an address within TB, and therefore
640 OFF will be negative; don't try RX_LA. */
641 tcg_out_insn(s
, RXY
, LAY
, ret
, TCG_REG_TB
, TCG_REG_NONE
, off
);
646 /* A 32-bit unsigned value can be loaded in 2 insns. And given
647 that LLILL, LLIHL, LLILF above did not succeed, we know that
648 both insns are required. */
649 if (uval
<= 0xffffffff) {
650 tcg_out_insn(s
, RI
, LLILL
, ret
, uval
);
651 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
655 /* Otherwise, stuff it in the constant pool. */
656 if (s390_facilities
& FACILITY_GEN_INST_EXT
) {
657 tcg_out_insn(s
, RIL
, LGRL
, ret
, 0);
658 new_pool_label(s
, sval
, R_390_PC32DBL
, s
->code_ptr
- 2, 2);
659 } else if (USE_REG_TB
&& !in_prologue
) {
660 tcg_out_insn(s
, RXY
, LG
, ret
, TCG_REG_TB
, TCG_REG_NONE
, 0);
661 new_pool_label(s
, sval
, R_390_20
, s
->code_ptr
- 2,
662 -(intptr_t)s
->code_gen_ptr
);
664 TCGReg base
= ret
? ret
: TCG_TMP0
;
665 tcg_out_insn(s
, RIL
, LARL
, base
, 0);
666 new_pool_label(s
, sval
, R_390_PC32DBL
, s
->code_ptr
- 2, 2);
667 tcg_out_insn(s
, RXY
, LG
, ret
, base
, TCG_REG_NONE
, 0);
671 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
672 TCGReg ret
, tcg_target_long sval
)
674 tcg_out_movi_int(s
, type
, ret
, sval
, false);
677 /* Emit a load/store type instruction. Inputs are:
678 DATA: The register to be loaded or stored.
679 BASE+OFS: The effective address.
680 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
681 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
683 static void tcg_out_mem(TCGContext
*s
, S390Opcode opc_rx
, S390Opcode opc_rxy
,
684 TCGReg data
, TCGReg base
, TCGReg index
,
687 if (ofs
< -0x80000 || ofs
>= 0x80000) {
688 /* Combine the low 20 bits of the offset with the actual load insn;
689 the high 44 bits must come from an immediate load. */
690 tcg_target_long low
= ((ofs
& 0xfffff) ^ 0x80000) - 0x80000;
691 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- low
);
694 /* If we were already given an index register, add it in. */
695 if (index
!= TCG_REG_NONE
) {
696 tcg_out_insn(s
, RRE
, AGR
, TCG_TMP0
, index
);
701 if (opc_rx
&& ofs
>= 0 && ofs
< 0x1000) {
702 tcg_out_insn_RX(s
, opc_rx
, data
, base
, index
, ofs
);
704 tcg_out_insn_RXY(s
, opc_rxy
, data
, base
, index
, ofs
);
709 /* load data without address translation or endianness conversion */
710 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg data
,
711 TCGReg base
, intptr_t ofs
)
713 if (type
== TCG_TYPE_I32
) {
714 tcg_out_mem(s
, RX_L
, RXY_LY
, data
, base
, TCG_REG_NONE
, ofs
);
716 tcg_out_mem(s
, 0, RXY_LG
, data
, base
, TCG_REG_NONE
, ofs
);
720 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg data
,
721 TCGReg base
, intptr_t ofs
)
723 if (type
== TCG_TYPE_I32
) {
724 tcg_out_mem(s
, RX_ST
, RXY_STY
, data
, base
, TCG_REG_NONE
, ofs
);
726 tcg_out_mem(s
, 0, RXY_STG
, data
, base
, TCG_REG_NONE
, ofs
);
730 static inline bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
731 TCGReg base
, intptr_t ofs
)
736 /* load data from an absolute host address */
737 static void tcg_out_ld_abs(TCGContext
*s
, TCGType type
, TCGReg dest
, void *abs
)
739 intptr_t addr
= (intptr_t)abs
;
741 if ((s390_facilities
& FACILITY_GEN_INST_EXT
) && !(addr
& 1)) {
742 ptrdiff_t disp
= tcg_pcrel_diff(s
, abs
) >> 1;
743 if (disp
== (int32_t)disp
) {
744 if (type
== TCG_TYPE_I32
) {
745 tcg_out_insn(s
, RIL
, LRL
, dest
, disp
);
747 tcg_out_insn(s
, RIL
, LGRL
, dest
, disp
);
753 ptrdiff_t disp
= abs
- (void *)s
->code_gen_ptr
;
754 if (disp
== sextract64(disp
, 0, 20)) {
755 tcg_out_ld(s
, type
, dest
, TCG_REG_TB
, disp
);
760 tcg_out_movi(s
, TCG_TYPE_PTR
, dest
, addr
& ~0xffff);
761 tcg_out_ld(s
, type
, dest
, dest
, addr
& 0xffff);
764 static inline void tcg_out_risbg(TCGContext
*s
, TCGReg dest
, TCGReg src
,
765 int msb
, int lsb
, int ofs
, int z
)
768 tcg_out16(s
, (RIE_RISBG
& 0xff00) | (dest
<< 4) | src
);
769 tcg_out16(s
, (msb
<< 8) | (z
<< 7) | lsb
);
770 tcg_out16(s
, (ofs
<< 8) | (RIE_RISBG
& 0xff));
773 static void tgen_ext8s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
775 if (s390_facilities
& FACILITY_EXT_IMM
) {
776 tcg_out_insn(s
, RRE
, LGBR
, dest
, src
);
780 if (type
== TCG_TYPE_I32
) {
782 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 24);
784 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 24);
786 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 24);
788 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 56);
789 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 56);
793 static void tgen_ext8u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
795 if (s390_facilities
& FACILITY_EXT_IMM
) {
796 tcg_out_insn(s
, RRE
, LLGCR
, dest
, src
);
801 tcg_out_movi(s
, type
, TCG_TMP0
, 0xff);
804 tcg_out_movi(s
, type
, dest
, 0xff);
806 if (type
== TCG_TYPE_I32
) {
807 tcg_out_insn(s
, RR
, NR
, dest
, src
);
809 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
813 static void tgen_ext16s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
815 if (s390_facilities
& FACILITY_EXT_IMM
) {
816 tcg_out_insn(s
, RRE
, LGHR
, dest
, src
);
820 if (type
== TCG_TYPE_I32
) {
822 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 16);
824 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 16);
826 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 16);
828 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 48);
829 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 48);
833 static void tgen_ext16u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
835 if (s390_facilities
& FACILITY_EXT_IMM
) {
836 tcg_out_insn(s
, RRE
, LLGHR
, dest
, src
);
841 tcg_out_movi(s
, type
, TCG_TMP0
, 0xffff);
844 tcg_out_movi(s
, type
, dest
, 0xffff);
846 if (type
== TCG_TYPE_I32
) {
847 tcg_out_insn(s
, RR
, NR
, dest
, src
);
849 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
853 static inline void tgen_ext32s(TCGContext
*s
, TCGReg dest
, TCGReg src
)
855 tcg_out_insn(s
, RRE
, LGFR
, dest
, src
);
858 static inline void tgen_ext32u(TCGContext
*s
, TCGReg dest
, TCGReg src
)
860 tcg_out_insn(s
, RRE
, LLGFR
, dest
, src
);
863 /* Accept bit patterns like these:
868 Copied from gcc sources. */
869 static inline bool risbg_mask(uint64_t c
)
872 /* We don't change the number of transitions by inverting,
873 so make sure we start with the LSB zero. */
877 /* Reject all zeros or all ones. */
881 /* Find the first transition. */
883 /* Invert to look for a second transition. */
885 /* Erase the first transition. */
887 /* Find the second transition, if any. */
889 /* Match if all the bits are 1's, or if c is zero. */
893 static void tgen_andi_risbg(TCGContext
*s
, TCGReg out
, TCGReg in
, uint64_t val
)
896 if ((val
& 0x8000000000000001ull
) == 0x8000000000000001ull
) {
897 /* Achieve wraparound by swapping msb and lsb. */
898 msb
= 64 - ctz64(~val
);
899 lsb
= clz64(~val
) - 1;
902 lsb
= 63 - ctz64(val
);
904 tcg_out_risbg(s
, out
, in
, msb
, lsb
, 0, 1);
907 static void tgen_andi(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
909 static const S390Opcode ni_insns
[4] = {
910 RI_NILL
, RI_NILH
, RI_NIHL
, RI_NIHH
912 static const S390Opcode nif_insns
[2] = {
915 uint64_t valid
= (type
== TCG_TYPE_I32
? 0xffffffffull
: -1ull);
918 /* Look for the zero-extensions. */
919 if ((val
& valid
) == 0xffffffff) {
920 tgen_ext32u(s
, dest
, dest
);
923 if (s390_facilities
& FACILITY_EXT_IMM
) {
924 if ((val
& valid
) == 0xff) {
925 tgen_ext8u(s
, TCG_TYPE_I64
, dest
, dest
);
928 if ((val
& valid
) == 0xffff) {
929 tgen_ext16u(s
, TCG_TYPE_I64
, dest
, dest
);
934 /* Try all 32-bit insns that can perform it in one go. */
935 for (i
= 0; i
< 4; i
++) {
936 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
937 if (((val
| ~valid
) & mask
) == mask
) {
938 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
943 /* Try all 48-bit insns that can perform it in one go. */
944 if (s390_facilities
& FACILITY_EXT_IMM
) {
945 for (i
= 0; i
< 2; i
++) {
946 tcg_target_ulong mask
= ~(0xffffffffull
<< i
*32);
947 if (((val
| ~valid
) & mask
) == mask
) {
948 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
953 if ((s390_facilities
& FACILITY_GEN_INST_EXT
) && risbg_mask(val
)) {
954 tgen_andi_risbg(s
, dest
, dest
, val
);
958 /* Use the constant pool if USE_REG_TB, but not for small constants. */
960 if (!maybe_out_small_movi(s
, type
, TCG_TMP0
, val
)) {
961 tcg_out_insn(s
, RXY
, NG
, dest
, TCG_REG_TB
, TCG_REG_NONE
, 0);
962 new_pool_label(s
, val
& valid
, R_390_20
, s
->code_ptr
- 2,
963 -(intptr_t)s
->code_gen_ptr
);
967 tcg_out_movi(s
, type
, TCG_TMP0
, val
);
969 if (type
== TCG_TYPE_I32
) {
970 tcg_out_insn(s
, RR
, NR
, dest
, TCG_TMP0
);
972 tcg_out_insn(s
, RRE
, NGR
, dest
, TCG_TMP0
);
976 static void tgen_ori(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
978 static const S390Opcode oi_insns
[4] = {
979 RI_OILL
, RI_OILH
, RI_OIHL
, RI_OIHH
981 static const S390Opcode oif_insns
[2] = {
987 /* Look for no-op. */
988 if (unlikely(val
== 0)) {
992 /* Try all 32-bit insns that can perform it in one go. */
993 for (i
= 0; i
< 4; i
++) {
994 tcg_target_ulong mask
= (0xffffull
<< i
*16);
995 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
996 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1001 /* Try all 48-bit insns that can perform it in one go. */
1002 if (s390_facilities
& FACILITY_EXT_IMM
) {
1003 for (i
= 0; i
< 2; i
++) {
1004 tcg_target_ulong mask
= (0xffffffffull
<< i
*32);
1005 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1006 tcg_out_insn_RIL(s
, oif_insns
[i
], dest
, val
>> i
*32);
1012 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1013 if (maybe_out_small_movi(s
, type
, TCG_TMP0
, val
)) {
1014 if (type
== TCG_TYPE_I32
) {
1015 tcg_out_insn(s
, RR
, OR
, dest
, TCG_TMP0
);
1017 tcg_out_insn(s
, RRE
, OGR
, dest
, TCG_TMP0
);
1019 } else if (USE_REG_TB
) {
1020 tcg_out_insn(s
, RXY
, OG
, dest
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1021 new_pool_label(s
, val
, R_390_20
, s
->code_ptr
- 2,
1022 -(intptr_t)s
->code_gen_ptr
);
1024 /* Perform the OR via sequential modifications to the high and
1025 low parts. Do this via recursion to handle 16-bit vs 32-bit
1026 masks in each half. */
1027 tcg_debug_assert(s390_facilities
& FACILITY_EXT_IMM
);
1028 tgen_ori(s
, type
, dest
, val
& 0x00000000ffffffffull
);
1029 tgen_ori(s
, type
, dest
, val
& 0xffffffff00000000ull
);
1033 static void tgen_xori(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
1035 /* Try all 48-bit insns that can perform it in one go. */
1036 if (s390_facilities
& FACILITY_EXT_IMM
) {
1037 if ((val
& 0xffffffff00000000ull
) == 0) {
1038 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1041 if ((val
& 0x00000000ffffffffull
) == 0) {
1042 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 32);
1047 /* Use the constant pool if USE_REG_TB, but not for small constants. */
1048 if (maybe_out_small_movi(s
, type
, TCG_TMP0
, val
)) {
1049 if (type
== TCG_TYPE_I32
) {
1050 tcg_out_insn(s
, RR
, XR
, dest
, TCG_TMP0
);
1052 tcg_out_insn(s
, RRE
, XGR
, dest
, TCG_TMP0
);
1054 } else if (USE_REG_TB
) {
1055 tcg_out_insn(s
, RXY
, XG
, dest
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1056 new_pool_label(s
, val
, R_390_20
, s
->code_ptr
- 2,
1057 -(intptr_t)s
->code_gen_ptr
);
1059 /* Perform the xor by parts. */
1060 tcg_debug_assert(s390_facilities
& FACILITY_EXT_IMM
);
1061 if (val
& 0xffffffff) {
1062 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1064 if (val
> 0xffffffff) {
1065 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 32);
1070 static int tgen_cmp(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg r1
,
1071 TCGArg c2
, bool c2const
, bool need_carry
)
1073 bool is_unsigned
= is_unsigned_cond(c
);
1078 if (!(is_unsigned
&& need_carry
)) {
1079 if (type
== TCG_TYPE_I32
) {
1080 tcg_out_insn(s
, RR
, LTR
, r1
, r1
);
1082 tcg_out_insn(s
, RRE
, LTGR
, r1
, r1
);
1084 return tcg_cond_to_ltr_cond
[c
];
1088 if (!is_unsigned
&& c2
== (int16_t)c2
) {
1089 op
= (type
== TCG_TYPE_I32
? RI_CHI
: RI_CGHI
);
1090 tcg_out_insn_RI(s
, op
, r1
, c2
);
1094 if (s390_facilities
& FACILITY_EXT_IMM
) {
1095 if (type
== TCG_TYPE_I32
) {
1096 op
= (is_unsigned
? RIL_CLFI
: RIL_CFI
);
1097 tcg_out_insn_RIL(s
, op
, r1
, c2
);
1099 } else if (c2
== (is_unsigned
? (uint32_t)c2
: (int32_t)c2
)) {
1100 op
= (is_unsigned
? RIL_CLGFI
: RIL_CGFI
);
1101 tcg_out_insn_RIL(s
, op
, r1
, c2
);
1106 /* Use the constant pool, but not for small constants. */
1107 if (maybe_out_small_movi(s
, type
, TCG_TMP0
, c2
)) {
1109 /* fall through to reg-reg */
1110 } else if (USE_REG_TB
) {
1111 if (type
== TCG_TYPE_I32
) {
1112 op
= (is_unsigned
? RXY_CLY
: RXY_CY
);
1113 tcg_out_insn_RXY(s
, op
, r1
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1114 new_pool_label(s
, (uint32_t)c2
, R_390_20
, s
->code_ptr
- 2,
1115 4 - (intptr_t)s
->code_gen_ptr
);
1117 op
= (is_unsigned
? RXY_CLG
: RXY_CG
);
1118 tcg_out_insn_RXY(s
, op
, r1
, TCG_REG_TB
, TCG_REG_NONE
, 0);
1119 new_pool_label(s
, c2
, R_390_20
, s
->code_ptr
- 2,
1120 -(intptr_t)s
->code_gen_ptr
);
1124 if (type
== TCG_TYPE_I32
) {
1125 op
= (is_unsigned
? RIL_CLRL
: RIL_CRL
);
1126 tcg_out_insn_RIL(s
, op
, r1
, 0);
1127 new_pool_label(s
, (uint32_t)c2
, R_390_PC32DBL
,
1128 s
->code_ptr
- 2, 2 + 4);
1130 op
= (is_unsigned
? RIL_CLGRL
: RIL_CGRL
);
1131 tcg_out_insn_RIL(s
, op
, r1
, 0);
1132 new_pool_label(s
, c2
, R_390_PC32DBL
, s
->code_ptr
- 2, 2);
1138 if (type
== TCG_TYPE_I32
) {
1139 op
= (is_unsigned
? RR_CLR
: RR_CR
);
1140 tcg_out_insn_RR(s
, op
, r1
, c2
);
1142 op
= (is_unsigned
? RRE_CLGR
: RRE_CGR
);
1143 tcg_out_insn_RRE(s
, op
, r1
, c2
);
1147 return tcg_cond_to_s390_cond
[c
];
1150 static void tgen_setcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
1151 TCGReg dest
, TCGReg c1
, TCGArg c2
, int c2const
)
1156 /* With LOC2, we can always emit the minimum 3 insns. */
1157 if (s390_facilities
& FACILITY_LOAD_ON_COND2
) {
1158 /* Emit: d = 0, d = (cc ? 1 : d). */
1159 cc
= tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, false);
1160 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, 0);
1161 tcg_out_insn(s
, RIE
, LOCGHI
, dest
, 1, cc
);
1165 have_loc
= (s390_facilities
& FACILITY_LOAD_ON_COND
) != 0;
1167 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
1171 /* X != 0 is X > 0. */
1172 if (c2const
&& c2
== 0) {
1173 cond
= TCG_COND_GTU
;
1181 /* The result of a compare has CC=2 for GT and CC=3 unused.
1182 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1183 tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, true);
1184 tcg_out_movi(s
, type
, dest
, 0);
1185 tcg_out_insn(s
, RRE
, ALCGR
, dest
, dest
);
1189 /* X == 0 is X <= 0. */
1190 if (c2const
&& c2
== 0) {
1191 cond
= TCG_COND_LEU
;
1199 /* As above, but we're looking for borrow, or !carry.
1200 The second insn computes d - d - borrow, or -1 for true
1201 and 0 for false. So we must mask to 1 bit afterward. */
1202 tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, true);
1203 tcg_out_insn(s
, RRE
, SLBGR
, dest
, dest
);
1204 tgen_andi(s
, type
, dest
, 1);
1211 /* Swap operands so that we can use LEU/GTU/GT/LE. */
1216 tcg_out_movi(s
, type
, TCG_TMP0
, c2
);
1225 cond
= tcg_swap_cond(cond
);
1229 g_assert_not_reached();
1232 cc
= tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
, false);
1234 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1235 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, 0);
1236 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_TMP0
, 1);
1237 tcg_out_insn(s
, RRF
, LOCGR
, dest
, TCG_TMP0
, cc
);
1239 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1240 tcg_out_movi(s
, type
, dest
, 1);
1241 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1242 tcg_out_movi(s
, type
, dest
, 0);
1246 static void tgen_movcond(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg dest
,
1247 TCGReg c1
, TCGArg c2
, int c2const
,
1248 TCGArg v3
, int v3const
)
1251 if (s390_facilities
& FACILITY_LOAD_ON_COND
) {
1252 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
, false);
1254 tcg_out_insn(s
, RIE
, LOCGHI
, dest
, v3
, cc
);
1256 tcg_out_insn(s
, RRF
, LOCGR
, dest
, v3
, cc
);
1259 c
= tcg_invert_cond(c
);
1260 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
, false);
1262 /* Emit: if (cc) goto over; dest = r3; over: */
1263 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1264 tcg_out_insn(s
, RRE
, LGR
, dest
, v3
);
1268 static void tgen_clz(TCGContext
*s
, TCGReg dest
, TCGReg a1
,
1269 TCGArg a2
, int a2const
)
1271 /* Since this sets both R and R+1, we have no choice but to store the
1272 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1273 QEMU_BUILD_BUG_ON(TCG_TMP0
!= TCG_REG_R1
);
1274 tcg_out_insn(s
, RRE
, FLOGR
, TCG_REG_R0
, a1
);
1276 if (a2const
&& a2
== 64) {
1277 tcg_out_mov(s
, TCG_TYPE_I64
, dest
, TCG_REG_R0
);
1280 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, a2
);
1282 tcg_out_mov(s
, TCG_TYPE_I64
, dest
, a2
);
1284 if (s390_facilities
& FACILITY_LOAD_ON_COND
) {
1285 /* Emit: if (one bit found) dest = r0. */
1286 tcg_out_insn(s
, RRF
, LOCGR
, dest
, TCG_REG_R0
, 2);
1288 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1289 tcg_out_insn(s
, RI
, BRC
, 8, (4 + 4) >> 1);
1290 tcg_out_insn(s
, RRE
, LGR
, dest
, TCG_REG_R0
);
1295 static void tgen_deposit(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1296 int ofs
, int len
, int z
)
1298 int lsb
= (63 - ofs
);
1299 int msb
= lsb
- (len
- 1);
1300 tcg_out_risbg(s
, dest
, src
, msb
, lsb
, ofs
, z
);
1303 static void tgen_extract(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1306 tcg_out_risbg(s
, dest
, src
, 64 - len
, 63, 64 - ofs
, 1);
1309 static void tgen_gotoi(TCGContext
*s
, int cc
, tcg_insn_unit
*dest
)
1311 ptrdiff_t off
= dest
- s
->code_ptr
;
1312 if (off
== (int16_t)off
) {
1313 tcg_out_insn(s
, RI
, BRC
, cc
, off
);
1314 } else if (off
== (int32_t)off
) {
1315 tcg_out_insn(s
, RIL
, BRCL
, cc
, off
);
1317 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, (uintptr_t)dest
);
1318 tcg_out_insn(s
, RR
, BCR
, cc
, TCG_TMP0
);
1322 static void tgen_branch(TCGContext
*s
, int cc
, TCGLabel
*l
)
1325 tgen_gotoi(s
, cc
, l
->u
.value_ptr
);
1326 } else if (USE_LONG_BRANCHES
) {
1327 tcg_out16(s
, RIL_BRCL
| (cc
<< 4));
1328 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC32DBL
, l
, 2);
1331 tcg_out16(s
, RI_BRC
| (cc
<< 4));
1332 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC16DBL
, l
, 2);
1337 static void tgen_compare_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1338 TCGReg r1
, TCGReg r2
, TCGLabel
*l
)
1343 off
= l
->u
.value_ptr
- s
->code_ptr
;
1344 tcg_debug_assert(off
== (int16_t)off
);
1346 tcg_out_reloc(s
, s
->code_ptr
+ 1, R_390_PC16DBL
, l
, 2);
1349 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | r2
);
1351 tcg_out16(s
, cc
<< 12 | (opc
& 0xff));
1354 static void tgen_compare_imm_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1355 TCGReg r1
, int i2
, TCGLabel
*l
)
1357 tcg_target_long off
= 0;
1360 off
= l
->u
.value_ptr
- s
->code_ptr
;
1361 tcg_debug_assert(off
== (int16_t)off
);
1363 tcg_out_reloc(s
, s
->code_ptr
+ 1, R_390_PC16DBL
, l
, 2);
1366 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | cc
);
1368 tcg_out16(s
, (i2
<< 8) | (opc
& 0xff));
1371 static void tgen_brcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1372 TCGReg r1
, TCGArg c2
, int c2const
, TCGLabel
*l
)
1376 if (s390_facilities
& FACILITY_GEN_INST_EXT
) {
1377 bool is_unsigned
= is_unsigned_cond(c
);
1381 cc
= tcg_cond_to_s390_cond
[c
];
1384 opc
= (type
== TCG_TYPE_I32
1385 ? (is_unsigned
? RIE_CLRJ
: RIE_CRJ
)
1386 : (is_unsigned
? RIE_CLGRJ
: RIE_CGRJ
));
1387 tgen_compare_branch(s
, opc
, cc
, r1
, c2
, l
);
1391 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1392 If the immediate we've been given does not fit that range, we'll
1393 fall back to separate compare and branch instructions using the
1394 larger comparison range afforded by COMPARE IMMEDIATE. */
1395 if (type
== TCG_TYPE_I32
) {
1398 in_range
= (uint32_t)c2
== (uint8_t)c2
;
1401 in_range
= (int32_t)c2
== (int8_t)c2
;
1406 in_range
= (uint64_t)c2
== (uint8_t)c2
;
1409 in_range
= (int64_t)c2
== (int8_t)c2
;
1413 tgen_compare_imm_branch(s
, opc
, cc
, r1
, c2
, l
);
1418 cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
, false);
1419 tgen_branch(s
, cc
, l
);
1422 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*dest
)
1424 ptrdiff_t off
= dest
- s
->code_ptr
;
1425 if (off
== (int32_t)off
) {
1426 tcg_out_insn(s
, RIL
, BRASL
, TCG_REG_R14
, off
);
1428 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, (uintptr_t)dest
);
1429 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, TCG_TMP0
);
1433 static void tcg_out_qemu_ld_direct(TCGContext
*s
, MemOp opc
, TCGReg data
,
1434 TCGReg base
, TCGReg index
, int disp
)
1436 switch (opc
& (MO_SSIZE
| MO_BSWAP
)) {
1438 tcg_out_insn(s
, RXY
, LLGC
, data
, base
, index
, disp
);
1441 tcg_out_insn(s
, RXY
, LGB
, data
, base
, index
, disp
);
1444 case MO_UW
| MO_BSWAP
:
1445 /* swapped unsigned halfword load with upper bits zeroed */
1446 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1447 tgen_ext16u(s
, TCG_TYPE_I64
, data
, data
);
1450 tcg_out_insn(s
, RXY
, LLGH
, data
, base
, index
, disp
);
1453 case MO_SW
| MO_BSWAP
:
1454 /* swapped sign-extended halfword load */
1455 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1456 tgen_ext16s(s
, TCG_TYPE_I64
, data
, data
);
1459 tcg_out_insn(s
, RXY
, LGH
, data
, base
, index
, disp
);
1462 case MO_UL
| MO_BSWAP
:
1463 /* swapped unsigned int load with upper bits zeroed */
1464 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1465 tgen_ext32u(s
, data
, data
);
1468 tcg_out_insn(s
, RXY
, LLGF
, data
, base
, index
, disp
);
1471 case MO_SL
| MO_BSWAP
:
1472 /* swapped sign-extended int load */
1473 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1474 tgen_ext32s(s
, data
, data
);
1477 tcg_out_insn(s
, RXY
, LGF
, data
, base
, index
, disp
);
1480 case MO_Q
| MO_BSWAP
:
1481 tcg_out_insn(s
, RXY
, LRVG
, data
, base
, index
, disp
);
1484 tcg_out_insn(s
, RXY
, LG
, data
, base
, index
, disp
);
1492 static void tcg_out_qemu_st_direct(TCGContext
*s
, MemOp opc
, TCGReg data
,
1493 TCGReg base
, TCGReg index
, int disp
)
1495 switch (opc
& (MO_SIZE
| MO_BSWAP
)) {
1497 if (disp
>= 0 && disp
< 0x1000) {
1498 tcg_out_insn(s
, RX
, STC
, data
, base
, index
, disp
);
1500 tcg_out_insn(s
, RXY
, STCY
, data
, base
, index
, disp
);
1504 case MO_UW
| MO_BSWAP
:
1505 tcg_out_insn(s
, RXY
, STRVH
, data
, base
, index
, disp
);
1508 if (disp
>= 0 && disp
< 0x1000) {
1509 tcg_out_insn(s
, RX
, STH
, data
, base
, index
, disp
);
1511 tcg_out_insn(s
, RXY
, STHY
, data
, base
, index
, disp
);
1515 case MO_UL
| MO_BSWAP
:
1516 tcg_out_insn(s
, RXY
, STRV
, data
, base
, index
, disp
);
1519 if (disp
>= 0 && disp
< 0x1000) {
1520 tcg_out_insn(s
, RX
, ST
, data
, base
, index
, disp
);
1522 tcg_out_insn(s
, RXY
, STY
, data
, base
, index
, disp
);
1526 case MO_Q
| MO_BSWAP
:
1527 tcg_out_insn(s
, RXY
, STRVG
, data
, base
, index
, disp
);
1530 tcg_out_insn(s
, RXY
, STG
, data
, base
, index
, disp
);
1538 #if defined(CONFIG_SOFTMMU)
1539 #include "../tcg-ldst.inc.c"
1541 /* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
1542 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1543 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
1545 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1546 addend into R2. Returns a register with the santitized guest address. */
1547 static TCGReg
tcg_out_tlb_read(TCGContext
*s
, TCGReg addr_reg
, MemOp opc
,
1548 int mem_index
, bool is_ld
)
1550 unsigned s_bits
= opc
& MO_SIZE
;
1551 unsigned a_bits
= get_alignment_bits(opc
);
1552 unsigned s_mask
= (1 << s_bits
) - 1;
1553 unsigned a_mask
= (1 << a_bits
) - 1;
1554 int fast_off
= TLB_MASK_TABLE_OFS(mem_index
);
1555 int mask_off
= fast_off
+ offsetof(CPUTLBDescFast
, mask
);
1556 int table_off
= fast_off
+ offsetof(CPUTLBDescFast
, table
);
1560 tcg_out_sh64(s
, RSY_SRLG
, TCG_REG_R2
, addr_reg
, TCG_REG_NONE
,
1561 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1562 tcg_out_insn(s
, RXY
, NG
, TCG_REG_R2
, TCG_AREG0
, TCG_REG_NONE
, mask_off
);
1563 tcg_out_insn(s
, RXY
, AG
, TCG_REG_R2
, TCG_AREG0
, TCG_REG_NONE
, table_off
);
1565 /* For aligned accesses, we check the first byte and include the alignment
1566 bits within the address. For unaligned access, we check that we don't
1567 cross pages using the address of the last byte of the access. */
1568 a_off
= (a_bits
>= s_bits
? 0 : s_mask
- a_mask
);
1569 tlb_mask
= (uint64_t)TARGET_PAGE_MASK
| a_mask
;
1570 if ((s390_facilities
& FACILITY_GEN_INST_EXT
) && a_off
== 0) {
1571 tgen_andi_risbg(s
, TCG_REG_R3
, addr_reg
, tlb_mask
);
1573 tcg_out_insn(s
, RX
, LA
, TCG_REG_R3
, addr_reg
, TCG_REG_NONE
, a_off
);
1574 tgen_andi(s
, TCG_TYPE_TL
, TCG_REG_R3
, tlb_mask
);
1578 ofs
= offsetof(CPUTLBEntry
, addr_read
);
1580 ofs
= offsetof(CPUTLBEntry
, addr_write
);
1582 if (TARGET_LONG_BITS
== 32) {
1583 tcg_out_insn(s
, RX
, C
, TCG_REG_R3
, TCG_REG_R2
, TCG_REG_NONE
, ofs
);
1585 tcg_out_insn(s
, RXY
, CG
, TCG_REG_R3
, TCG_REG_R2
, TCG_REG_NONE
, ofs
);
1588 tcg_out_insn(s
, RXY
, LG
, TCG_REG_R2
, TCG_REG_R2
, TCG_REG_NONE
,
1589 offsetof(CPUTLBEntry
, addend
));
1591 if (TARGET_LONG_BITS
== 32) {
1592 tgen_ext32u(s
, TCG_REG_R3
, addr_reg
);
1598 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1599 TCGReg data
, TCGReg addr
,
1600 tcg_insn_unit
*raddr
, tcg_insn_unit
*label_ptr
)
1602 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1604 label
->is_ld
= is_ld
;
1606 label
->datalo_reg
= data
;
1607 label
->addrlo_reg
= addr
;
1608 label
->raddr
= raddr
;
1609 label
->label_ptr
[0] = label_ptr
;
1612 static bool tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1614 TCGReg addr_reg
= lb
->addrlo_reg
;
1615 TCGReg data_reg
= lb
->datalo_reg
;
1616 TCGMemOpIdx oi
= lb
->oi
;
1617 MemOp opc
= get_memop(oi
);
1619 if (!patch_reloc(lb
->label_ptr
[0], R_390_PC16DBL
,
1620 (intptr_t)s
->code_ptr
, 2)) {
1624 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_AREG0
);
1625 if (TARGET_LONG_BITS
== 64) {
1626 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R3
, addr_reg
);
1628 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R4
, oi
);
1629 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R5
, (uintptr_t)lb
->raddr
);
1630 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)]);
1631 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1633 tgen_gotoi(s
, S390_CC_ALWAYS
, lb
->raddr
);
1637 static bool tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1639 TCGReg addr_reg
= lb
->addrlo_reg
;
1640 TCGReg data_reg
= lb
->datalo_reg
;
1641 TCGMemOpIdx oi
= lb
->oi
;
1642 MemOp opc
= get_memop(oi
);
1644 if (!patch_reloc(lb
->label_ptr
[0], R_390_PC16DBL
,
1645 (intptr_t)s
->code_ptr
, 2)) {
1649 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_AREG0
);
1650 if (TARGET_LONG_BITS
== 64) {
1651 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R3
, addr_reg
);
1653 switch (opc
& MO_SIZE
) {
1655 tgen_ext8u(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1658 tgen_ext16u(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1661 tgen_ext32u(s
, TCG_REG_R4
, data_reg
);
1664 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1669 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R5
, oi
);
1670 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R6
, (uintptr_t)lb
->raddr
);
1671 tcg_out_call(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1673 tgen_gotoi(s
, S390_CC_ALWAYS
, lb
->raddr
);
1677 static void tcg_prepare_user_ldst(TCGContext
*s
, TCGReg
*addr_reg
,
1678 TCGReg
*index_reg
, tcg_target_long
*disp
)
1680 if (TARGET_LONG_BITS
== 32) {
1681 tgen_ext32u(s
, TCG_TMP0
, *addr_reg
);
1682 *addr_reg
= TCG_TMP0
;
1684 if (guest_base
< 0x80000) {
1685 *index_reg
= TCG_REG_NONE
;
1688 *index_reg
= TCG_GUEST_BASE_REG
;
1692 #endif /* CONFIG_SOFTMMU */
1694 static void tcg_out_qemu_ld(TCGContext
* s
, TCGReg data_reg
, TCGReg addr_reg
,
1697 MemOp opc
= get_memop(oi
);
1698 #ifdef CONFIG_SOFTMMU
1699 unsigned mem_index
= get_mmuidx(oi
);
1700 tcg_insn_unit
*label_ptr
;
1703 base_reg
= tcg_out_tlb_read(s
, addr_reg
, opc
, mem_index
, 1);
1705 tcg_out16(s
, RI_BRC
| (S390_CC_NE
<< 4));
1706 label_ptr
= s
->code_ptr
;
1709 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, base_reg
, TCG_REG_R2
, 0);
1711 add_qemu_ldst_label(s
, 1, oi
, data_reg
, addr_reg
, s
->code_ptr
, label_ptr
);
1714 tcg_target_long disp
;
1716 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1717 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1721 static void tcg_out_qemu_st(TCGContext
* s
, TCGReg data_reg
, TCGReg addr_reg
,
1724 MemOp opc
= get_memop(oi
);
1725 #ifdef CONFIG_SOFTMMU
1726 unsigned mem_index
= get_mmuidx(oi
);
1727 tcg_insn_unit
*label_ptr
;
1730 base_reg
= tcg_out_tlb_read(s
, addr_reg
, opc
, mem_index
, 0);
1732 tcg_out16(s
, RI_BRC
| (S390_CC_NE
<< 4));
1733 label_ptr
= s
->code_ptr
;
1736 tcg_out_qemu_st_direct(s
, opc
, data_reg
, base_reg
, TCG_REG_R2
, 0);
1738 add_qemu_ldst_label(s
, 0, oi
, data_reg
, addr_reg
, s
->code_ptr
, label_ptr
);
1741 tcg_target_long disp
;
1743 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1744 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1748 # define OP_32_64(x) \
1749 case glue(glue(INDEX_op_,x),_i32): \
1750 case glue(glue(INDEX_op_,x),_i64)
1752 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1753 const TCGArg
*args
, const int *const_args
)
1759 case INDEX_op_exit_tb
:
1760 /* Reuse the zeroing that exists for goto_ptr. */
1763 tgen_gotoi(s
, S390_CC_ALWAYS
, s
->code_gen_epilogue
);
1765 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, a0
);
1766 tgen_gotoi(s
, S390_CC_ALWAYS
, tb_ret_addr
);
1770 case INDEX_op_goto_tb
:
1772 if (s
->tb_jmp_insn_offset
) {
1773 /* branch displacement must be aligned for atomic patching;
1774 * see if we need to add extra nop before branch
1776 if (!QEMU_PTR_IS_ALIGNED(s
->code_ptr
+ 1, 4)) {
1779 tcg_debug_assert(!USE_REG_TB
);
1780 tcg_out16(s
, RIL_BRCL
| (S390_CC_ALWAYS
<< 4));
1781 s
->tb_jmp_insn_offset
[a0
] = tcg_current_code_size(s
);
1784 /* load address stored at s->tb_jmp_target_addr + a0 */
1785 tcg_out_ld_abs(s
, TCG_TYPE_PTR
, TCG_REG_TB
,
1786 s
->tb_jmp_target_addr
+ a0
);
1788 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_TB
);
1790 set_jmp_reset_offset(s
, a0
);
1792 /* For the unlinked path of goto_tb, we need to reset
1793 TCG_REG_TB to the beginning of this TB. */
1795 int ofs
= -tcg_current_code_size(s
);
1796 assert(ofs
== (int16_t)ofs
);
1797 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_TB
, ofs
);
1801 case INDEX_op_goto_ptr
:
1804 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_TB
, a0
);
1806 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, a0
);
1810 /* ??? LLC (RXY format) is only present with the extended-immediate
1811 facility, whereas LLGC is always present. */
1812 tcg_out_mem(s
, 0, RXY_LLGC
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1816 /* ??? LB is no smaller than LGB, so no point to using it. */
1817 tcg_out_mem(s
, 0, RXY_LGB
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1821 /* ??? LLH (RXY format) is only present with the extended-immediate
1822 facility, whereas LLGH is always present. */
1823 tcg_out_mem(s
, 0, RXY_LLGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1826 case INDEX_op_ld16s_i32
:
1827 tcg_out_mem(s
, RX_LH
, RXY_LHY
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1830 case INDEX_op_ld_i32
:
1831 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1835 tcg_out_mem(s
, RX_STC
, RXY_STCY
, args
[0], args
[1],
1836 TCG_REG_NONE
, args
[2]);
1840 tcg_out_mem(s
, RX_STH
, RXY_STHY
, args
[0], args
[1],
1841 TCG_REG_NONE
, args
[2]);
1844 case INDEX_op_st_i32
:
1845 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1848 case INDEX_op_add_i32
:
1849 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1850 if (const_args
[2]) {
1853 if (a2
== (int16_t)a2
) {
1854 tcg_out_insn(s
, RI
, AHI
, a0
, a2
);
1857 if (s390_facilities
& FACILITY_EXT_IMM
) {
1858 tcg_out_insn(s
, RIL
, AFI
, a0
, a2
);
1862 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1863 } else if (a0
== a1
) {
1864 tcg_out_insn(s
, RR
, AR
, a0
, a2
);
1866 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1869 case INDEX_op_sub_i32
:
1870 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1871 if (const_args
[2]) {
1874 } else if (a0
== a1
) {
1875 tcg_out_insn(s
, RR
, SR
, a0
, a2
);
1877 tcg_out_insn(s
, RRF
, SRK
, a0
, a1
, a2
);
1881 case INDEX_op_and_i32
:
1882 a0
= args
[0], a1
= args
[1], a2
= (uint32_t)args
[2];
1883 if (const_args
[2]) {
1884 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
1885 tgen_andi(s
, TCG_TYPE_I32
, a0
, a2
);
1886 } else if (a0
== a1
) {
1887 tcg_out_insn(s
, RR
, NR
, a0
, a2
);
1889 tcg_out_insn(s
, RRF
, NRK
, a0
, a1
, a2
);
1892 case INDEX_op_or_i32
:
1893 a0
= args
[0], a1
= args
[1], a2
= (uint32_t)args
[2];
1894 if (const_args
[2]) {
1895 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
1896 tgen_ori(s
, TCG_TYPE_I32
, a0
, a2
);
1897 } else if (a0
== a1
) {
1898 tcg_out_insn(s
, RR
, OR
, a0
, a2
);
1900 tcg_out_insn(s
, RRF
, ORK
, a0
, a1
, a2
);
1903 case INDEX_op_xor_i32
:
1904 a0
= args
[0], a1
= args
[1], a2
= (uint32_t)args
[2];
1905 if (const_args
[2]) {
1906 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
1907 tgen_xori(s
, TCG_TYPE_I32
, a0
, a2
);
1908 } else if (a0
== a1
) {
1909 tcg_out_insn(s
, RR
, XR
, args
[0], args
[2]);
1911 tcg_out_insn(s
, RRF
, XRK
, a0
, a1
, a2
);
1915 case INDEX_op_neg_i32
:
1916 tcg_out_insn(s
, RR
, LCR
, args
[0], args
[1]);
1919 case INDEX_op_mul_i32
:
1920 if (const_args
[2]) {
1921 if ((int32_t)args
[2] == (int16_t)args
[2]) {
1922 tcg_out_insn(s
, RI
, MHI
, args
[0], args
[2]);
1924 tcg_out_insn(s
, RIL
, MSFI
, args
[0], args
[2]);
1927 tcg_out_insn(s
, RRE
, MSR
, args
[0], args
[2]);
1931 case INDEX_op_div2_i32
:
1932 tcg_out_insn(s
, RR
, DR
, TCG_REG_R2
, args
[4]);
1934 case INDEX_op_divu2_i32
:
1935 tcg_out_insn(s
, RRE
, DLR
, TCG_REG_R2
, args
[4]);
1938 case INDEX_op_shl_i32
:
1942 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1944 if (const_args
[2]) {
1945 tcg_out_sh32(s
, op
, a0
, TCG_REG_NONE
, a2
);
1947 tcg_out_sh32(s
, op
, a0
, a2
, 0);
1950 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1951 if (const_args
[2]) {
1952 tcg_out_sh64(s
, op2
, a0
, a1
, TCG_REG_NONE
, a2
);
1954 tcg_out_sh64(s
, op2
, a0
, a1
, a2
, 0);
1958 case INDEX_op_shr_i32
:
1962 case INDEX_op_sar_i32
:
1967 case INDEX_op_rotl_i32
:
1968 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1969 if (const_args
[2]) {
1970 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1972 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], args
[2], 0);
1975 case INDEX_op_rotr_i32
:
1976 if (const_args
[2]) {
1977 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1],
1978 TCG_REG_NONE
, (32 - args
[2]) & 31);
1980 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
1981 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_TMP0
, 0);
1985 case INDEX_op_ext8s_i32
:
1986 tgen_ext8s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1988 case INDEX_op_ext16s_i32
:
1989 tgen_ext16s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1991 case INDEX_op_ext8u_i32
:
1992 tgen_ext8u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1994 case INDEX_op_ext16u_i32
:
1995 tgen_ext16u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1999 /* The TCG bswap definition requires bits 0-47 already be zero.
2000 Thus we don't need the G-type insns to implement bswap16_i64. */
2001 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
2002 tcg_out_sh32(s
, RS_SRL
, args
[0], TCG_REG_NONE
, 16);
2005 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
2008 case INDEX_op_add2_i32
:
2009 if (const_args
[4]) {
2010 tcg_out_insn(s
, RIL
, ALFI
, args
[0], args
[4]);
2012 tcg_out_insn(s
, RR
, ALR
, args
[0], args
[4]);
2014 tcg_out_insn(s
, RRE
, ALCR
, args
[1], args
[5]);
2016 case INDEX_op_sub2_i32
:
2017 if (const_args
[4]) {
2018 tcg_out_insn(s
, RIL
, SLFI
, args
[0], args
[4]);
2020 tcg_out_insn(s
, RR
, SLR
, args
[0], args
[4]);
2022 tcg_out_insn(s
, RRE
, SLBR
, args
[1], args
[5]);
2026 tgen_branch(s
, S390_CC_ALWAYS
, arg_label(args
[0]));
2029 case INDEX_op_brcond_i32
:
2030 tgen_brcond(s
, TCG_TYPE_I32
, args
[2], args
[0],
2031 args
[1], const_args
[1], arg_label(args
[3]));
2033 case INDEX_op_setcond_i32
:
2034 tgen_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1],
2035 args
[2], const_args
[2]);
2037 case INDEX_op_movcond_i32
:
2038 tgen_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1],
2039 args
[2], const_args
[2], args
[3], const_args
[3]);
2042 case INDEX_op_qemu_ld_i32
:
2043 /* ??? Technically we can use a non-extending instruction. */
2044 case INDEX_op_qemu_ld_i64
:
2045 tcg_out_qemu_ld(s
, args
[0], args
[1], args
[2]);
2047 case INDEX_op_qemu_st_i32
:
2048 case INDEX_op_qemu_st_i64
:
2049 tcg_out_qemu_st(s
, args
[0], args
[1], args
[2]);
2052 case INDEX_op_ld16s_i64
:
2053 tcg_out_mem(s
, 0, RXY_LGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2055 case INDEX_op_ld32u_i64
:
2056 tcg_out_mem(s
, 0, RXY_LLGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2058 case INDEX_op_ld32s_i64
:
2059 tcg_out_mem(s
, 0, RXY_LGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2061 case INDEX_op_ld_i64
:
2062 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
2065 case INDEX_op_st32_i64
:
2066 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
2068 case INDEX_op_st_i64
:
2069 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
2072 case INDEX_op_add_i64
:
2073 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2074 if (const_args
[2]) {
2077 if (a2
== (int16_t)a2
) {
2078 tcg_out_insn(s
, RI
, AGHI
, a0
, a2
);
2081 if (s390_facilities
& FACILITY_EXT_IMM
) {
2082 if (a2
== (int32_t)a2
) {
2083 tcg_out_insn(s
, RIL
, AGFI
, a0
, a2
);
2085 } else if (a2
== (uint32_t)a2
) {
2086 tcg_out_insn(s
, RIL
, ALGFI
, a0
, a2
);
2088 } else if (-a2
== (uint32_t)-a2
) {
2089 tcg_out_insn(s
, RIL
, SLGFI
, a0
, -a2
);
2094 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
2095 } else if (a0
== a1
) {
2096 tcg_out_insn(s
, RRE
, AGR
, a0
, a2
);
2098 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
2101 case INDEX_op_sub_i64
:
2102 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2103 if (const_args
[2]) {
2106 } else if (a0
== a1
) {
2107 tcg_out_insn(s
, RRE
, SGR
, a0
, a2
);
2109 tcg_out_insn(s
, RRF
, SGRK
, a0
, a1
, a2
);
2113 case INDEX_op_and_i64
:
2114 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2115 if (const_args
[2]) {
2116 tcg_out_mov(s
, TCG_TYPE_I64
, a0
, a1
);
2117 tgen_andi(s
, TCG_TYPE_I64
, args
[0], args
[2]);
2118 } else if (a0
== a1
) {
2119 tcg_out_insn(s
, RRE
, NGR
, args
[0], args
[2]);
2121 tcg_out_insn(s
, RRF
, NGRK
, a0
, a1
, a2
);
2124 case INDEX_op_or_i64
:
2125 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2126 if (const_args
[2]) {
2127 tcg_out_mov(s
, TCG_TYPE_I64
, a0
, a1
);
2128 tgen_ori(s
, TCG_TYPE_I64
, a0
, a2
);
2129 } else if (a0
== a1
) {
2130 tcg_out_insn(s
, RRE
, OGR
, a0
, a2
);
2132 tcg_out_insn(s
, RRF
, OGRK
, a0
, a1
, a2
);
2135 case INDEX_op_xor_i64
:
2136 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2137 if (const_args
[2]) {
2138 tcg_out_mov(s
, TCG_TYPE_I64
, a0
, a1
);
2139 tgen_xori(s
, TCG_TYPE_I64
, a0
, a2
);
2140 } else if (a0
== a1
) {
2141 tcg_out_insn(s
, RRE
, XGR
, a0
, a2
);
2143 tcg_out_insn(s
, RRF
, XGRK
, a0
, a1
, a2
);
2147 case INDEX_op_neg_i64
:
2148 tcg_out_insn(s
, RRE
, LCGR
, args
[0], args
[1]);
2150 case INDEX_op_bswap64_i64
:
2151 tcg_out_insn(s
, RRE
, LRVGR
, args
[0], args
[1]);
2154 case INDEX_op_mul_i64
:
2155 if (const_args
[2]) {
2156 if (args
[2] == (int16_t)args
[2]) {
2157 tcg_out_insn(s
, RI
, MGHI
, args
[0], args
[2]);
2159 tcg_out_insn(s
, RIL
, MSGFI
, args
[0], args
[2]);
2162 tcg_out_insn(s
, RRE
, MSGR
, args
[0], args
[2]);
2166 case INDEX_op_div2_i64
:
2167 /* ??? We get an unnecessary sign-extension of the dividend
2168 into R3 with this definition, but as we do in fact always
2169 produce both quotient and remainder using INDEX_op_div_i64
2170 instead requires jumping through even more hoops. */
2171 tcg_out_insn(s
, RRE
, DSGR
, TCG_REG_R2
, args
[4]);
2173 case INDEX_op_divu2_i64
:
2174 tcg_out_insn(s
, RRE
, DLGR
, TCG_REG_R2
, args
[4]);
2176 case INDEX_op_mulu2_i64
:
2177 tcg_out_insn(s
, RRE
, MLGR
, TCG_REG_R2
, args
[3]);
2180 case INDEX_op_shl_i64
:
2183 if (const_args
[2]) {
2184 tcg_out_sh64(s
, op
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2186 tcg_out_sh64(s
, op
, args
[0], args
[1], args
[2], 0);
2189 case INDEX_op_shr_i64
:
2192 case INDEX_op_sar_i64
:
2196 case INDEX_op_rotl_i64
:
2197 if (const_args
[2]) {
2198 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2199 TCG_REG_NONE
, args
[2]);
2201 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], args
[2], 0);
2204 case INDEX_op_rotr_i64
:
2205 if (const_args
[2]) {
2206 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2207 TCG_REG_NONE
, (64 - args
[2]) & 63);
2209 /* We can use the smaller 32-bit negate because only the
2210 low 6 bits are examined for the rotate. */
2211 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
2212 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], TCG_TMP0
, 0);
2216 case INDEX_op_ext8s_i64
:
2217 tgen_ext8s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2219 case INDEX_op_ext16s_i64
:
2220 tgen_ext16s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2222 case INDEX_op_ext_i32_i64
:
2223 case INDEX_op_ext32s_i64
:
2224 tgen_ext32s(s
, args
[0], args
[1]);
2226 case INDEX_op_ext8u_i64
:
2227 tgen_ext8u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2229 case INDEX_op_ext16u_i64
:
2230 tgen_ext16u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2232 case INDEX_op_extu_i32_i64
:
2233 case INDEX_op_ext32u_i64
:
2234 tgen_ext32u(s
, args
[0], args
[1]);
2237 case INDEX_op_add2_i64
:
2238 if (const_args
[4]) {
2239 if ((int64_t)args
[4] >= 0) {
2240 tcg_out_insn(s
, RIL
, ALGFI
, args
[0], args
[4]);
2242 tcg_out_insn(s
, RIL
, SLGFI
, args
[0], -args
[4]);
2245 tcg_out_insn(s
, RRE
, ALGR
, args
[0], args
[4]);
2247 tcg_out_insn(s
, RRE
, ALCGR
, args
[1], args
[5]);
2249 case INDEX_op_sub2_i64
:
2250 if (const_args
[4]) {
2251 if ((int64_t)args
[4] >= 0) {
2252 tcg_out_insn(s
, RIL
, SLGFI
, args
[0], args
[4]);
2254 tcg_out_insn(s
, RIL
, ALGFI
, args
[0], -args
[4]);
2257 tcg_out_insn(s
, RRE
, SLGR
, args
[0], args
[4]);
2259 tcg_out_insn(s
, RRE
, SLBGR
, args
[1], args
[5]);
2262 case INDEX_op_brcond_i64
:
2263 tgen_brcond(s
, TCG_TYPE_I64
, args
[2], args
[0],
2264 args
[1], const_args
[1], arg_label(args
[3]));
2266 case INDEX_op_setcond_i64
:
2267 tgen_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1],
2268 args
[2], const_args
[2]);
2270 case INDEX_op_movcond_i64
:
2271 tgen_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1],
2272 args
[2], const_args
[2], args
[3], const_args
[3]);
2276 a0
= args
[0], a1
= args
[1], a2
= args
[2];
2277 if (const_args
[1]) {
2278 tgen_deposit(s
, a0
, a2
, args
[3], args
[4], 1);
2280 /* Since we can't support "0Z" as a constraint, we allow a1 in
2281 any register. Fix things up as if a matching constraint. */
2283 TCGType type
= (opc
== INDEX_op_deposit_i64
);
2285 tcg_out_mov(s
, type
, TCG_TMP0
, a2
);
2288 tcg_out_mov(s
, type
, a0
, a1
);
2290 tgen_deposit(s
, a0
, a2
, args
[3], args
[4], 0);
2295 tgen_extract(s
, args
[0], args
[1], args
[2], args
[3]);
2298 case INDEX_op_clz_i64
:
2299 tgen_clz(s
, args
[0], args
[1], args
[2], const_args
[2]);
2303 /* The host memory model is quite strong, we simply need to
2304 serialize the instruction stream. */
2305 if (args
[0] & TCG_MO_ST_LD
) {
2306 tcg_out_insn(s
, RR
, BCR
,
2307 s390_facilities
& FACILITY_FAST_BCR_SER
? 14 : 15, 0);
2311 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2312 case INDEX_op_mov_i64
:
2313 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2314 case INDEX_op_movi_i64
:
2315 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2321 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2323 static const TCGTargetOpDef r
= { .args_ct_str
= { "r" } };
2324 static const TCGTargetOpDef r_r
= { .args_ct_str
= { "r", "r" } };
2325 static const TCGTargetOpDef r_L
= { .args_ct_str
= { "r", "L" } };
2326 static const TCGTargetOpDef L_L
= { .args_ct_str
= { "L", "L" } };
2327 static const TCGTargetOpDef r_ri
= { .args_ct_str
= { "r", "ri" } };
2328 static const TCGTargetOpDef r_r_ri
= { .args_ct_str
= { "r", "r", "ri" } };
2329 static const TCGTargetOpDef r_0_ri
= { .args_ct_str
= { "r", "0", "ri" } };
2330 static const TCGTargetOpDef r_0_rI
= { .args_ct_str
= { "r", "0", "rI" } };
2331 static const TCGTargetOpDef r_0_rJ
= { .args_ct_str
= { "r", "0", "rJ" } };
2332 static const TCGTargetOpDef a2_r
2333 = { .args_ct_str
= { "r", "r", "0", "1", "r", "r" } };
2334 static const TCGTargetOpDef a2_ri
2335 = { .args_ct_str
= { "r", "r", "0", "1", "ri", "r" } };
2336 static const TCGTargetOpDef a2_rA
2337 = { .args_ct_str
= { "r", "r", "0", "1", "rA", "r" } };
2340 case INDEX_op_goto_ptr
:
2343 case INDEX_op_ld8u_i32
:
2344 case INDEX_op_ld8u_i64
:
2345 case INDEX_op_ld8s_i32
:
2346 case INDEX_op_ld8s_i64
:
2347 case INDEX_op_ld16u_i32
:
2348 case INDEX_op_ld16u_i64
:
2349 case INDEX_op_ld16s_i32
:
2350 case INDEX_op_ld16s_i64
:
2351 case INDEX_op_ld_i32
:
2352 case INDEX_op_ld32u_i64
:
2353 case INDEX_op_ld32s_i64
:
2354 case INDEX_op_ld_i64
:
2355 case INDEX_op_st8_i32
:
2356 case INDEX_op_st8_i64
:
2357 case INDEX_op_st16_i32
:
2358 case INDEX_op_st16_i64
:
2359 case INDEX_op_st_i32
:
2360 case INDEX_op_st32_i64
:
2361 case INDEX_op_st_i64
:
2364 case INDEX_op_add_i32
:
2365 case INDEX_op_add_i64
:
2367 case INDEX_op_sub_i32
:
2368 case INDEX_op_sub_i64
:
2369 case INDEX_op_and_i32
:
2370 case INDEX_op_and_i64
:
2371 case INDEX_op_or_i32
:
2372 case INDEX_op_or_i64
:
2373 case INDEX_op_xor_i32
:
2374 case INDEX_op_xor_i64
:
2375 return (s390_facilities
& FACILITY_DISTINCT_OPS
? &r_r_ri
: &r_0_ri
);
2377 case INDEX_op_mul_i32
:
2378 /* If we have the general-instruction-extensions, then we have
2379 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2380 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
2381 return (s390_facilities
& FACILITY_GEN_INST_EXT
? &r_0_ri
: &r_0_rI
);
2382 case INDEX_op_mul_i64
:
2383 return (s390_facilities
& FACILITY_GEN_INST_EXT
? &r_0_rJ
: &r_0_rI
);
2385 case INDEX_op_shl_i32
:
2386 case INDEX_op_shr_i32
:
2387 case INDEX_op_sar_i32
:
2388 return (s390_facilities
& FACILITY_DISTINCT_OPS
? &r_r_ri
: &r_0_ri
);
2390 case INDEX_op_shl_i64
:
2391 case INDEX_op_shr_i64
:
2392 case INDEX_op_sar_i64
:
2395 case INDEX_op_rotl_i32
:
2396 case INDEX_op_rotl_i64
:
2397 case INDEX_op_rotr_i32
:
2398 case INDEX_op_rotr_i64
:
2401 case INDEX_op_brcond_i32
:
2402 case INDEX_op_brcond_i64
:
2405 case INDEX_op_bswap16_i32
:
2406 case INDEX_op_bswap16_i64
:
2407 case INDEX_op_bswap32_i32
:
2408 case INDEX_op_bswap32_i64
:
2409 case INDEX_op_bswap64_i64
:
2410 case INDEX_op_neg_i32
:
2411 case INDEX_op_neg_i64
:
2412 case INDEX_op_ext8s_i32
:
2413 case INDEX_op_ext8s_i64
:
2414 case INDEX_op_ext8u_i32
:
2415 case INDEX_op_ext8u_i64
:
2416 case INDEX_op_ext16s_i32
:
2417 case INDEX_op_ext16s_i64
:
2418 case INDEX_op_ext16u_i32
:
2419 case INDEX_op_ext16u_i64
:
2420 case INDEX_op_ext32s_i64
:
2421 case INDEX_op_ext32u_i64
:
2422 case INDEX_op_ext_i32_i64
:
2423 case INDEX_op_extu_i32_i64
:
2424 case INDEX_op_extract_i32
:
2425 case INDEX_op_extract_i64
:
2428 case INDEX_op_clz_i64
:
2429 case INDEX_op_setcond_i32
:
2430 case INDEX_op_setcond_i64
:
2433 case INDEX_op_qemu_ld_i32
:
2434 case INDEX_op_qemu_ld_i64
:
2436 case INDEX_op_qemu_st_i64
:
2437 case INDEX_op_qemu_st_i32
:
2440 case INDEX_op_deposit_i32
:
2441 case INDEX_op_deposit_i64
:
2443 static const TCGTargetOpDef dep
2444 = { .args_ct_str
= { "r", "rZ", "r" } };
2447 case INDEX_op_movcond_i32
:
2448 case INDEX_op_movcond_i64
:
2450 static const TCGTargetOpDef movc
2451 = { .args_ct_str
= { "r", "r", "ri", "r", "0" } };
2452 static const TCGTargetOpDef movc_l
2453 = { .args_ct_str
= { "r", "r", "ri", "rI", "0" } };
2454 return (s390_facilities
& FACILITY_LOAD_ON_COND2
? &movc_l
: &movc
);
2456 case INDEX_op_div2_i32
:
2457 case INDEX_op_div2_i64
:
2458 case INDEX_op_divu2_i32
:
2459 case INDEX_op_divu2_i64
:
2461 static const TCGTargetOpDef div2
2462 = { .args_ct_str
= { "b", "a", "0", "1", "r" } };
2465 case INDEX_op_mulu2_i64
:
2467 static const TCGTargetOpDef mul2
2468 = { .args_ct_str
= { "b", "a", "0", "r" } };
2472 case INDEX_op_add2_i32
:
2473 case INDEX_op_sub2_i32
:
2474 return (s390_facilities
& FACILITY_EXT_IMM
? &a2_ri
: &a2_r
);
2475 case INDEX_op_add2_i64
:
2476 case INDEX_op_sub2_i64
:
2477 return (s390_facilities
& FACILITY_EXT_IMM
? &a2_rA
: &a2_r
);
2485 static void query_s390_facilities(void)
2487 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2489 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2490 is present on all 64-bit systems, but let's check for it anyway. */
2491 if (hwcap
& HWCAP_S390_STFLE
) {
2492 register int r0
__asm__("0");
2493 register void *r1
__asm__("1");
2496 r1
= &s390_facilities
;
2497 asm volatile(".word 0xb2b0,0x1000"
2498 : "=r"(r0
) : "0"(0), "r"(r1
) : "memory", "cc");
2502 static void tcg_target_init(TCGContext
*s
)
2504 query_s390_facilities();
2506 tcg_target_available_regs
[TCG_TYPE_I32
] = 0xffff;
2507 tcg_target_available_regs
[TCG_TYPE_I64
] = 0xffff;
2509 tcg_target_call_clobber_regs
= 0;
2510 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2511 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2512 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2513 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2514 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
2515 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
2516 /* The r6 register is technically call-saved, but it's also a parameter
2517 register, so it can get killed by setup for the qemu_st helper. */
2518 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R6
);
2519 /* The return register can be considered call-clobbered. */
2520 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2522 s
->reserved_regs
= 0;
2523 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
);
2524 /* XXX many insns can't be used with R0, so we better avoid it for now */
2525 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
);
2526 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2528 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TB
);
2532 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2533 + TCG_STATIC_CALL_ARGS_SIZE \
2534 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2536 static void tcg_target_qemu_prologue(TCGContext
*s
)
2538 /* stmg %r6,%r15,48(%r15) (save registers) */
2539 tcg_out_insn(s
, RXY
, STMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 48);
2541 /* aghi %r15,-frame_size */
2542 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_R15
, -FRAME_SIZE
);
2544 tcg_set_frame(s
, TCG_REG_CALL_STACK
,
2545 TCG_STATIC_CALL_ARGS_SIZE
+ TCG_TARGET_CALL_STACK_OFFSET
,
2546 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2548 #ifndef CONFIG_SOFTMMU
2549 if (guest_base
>= 0x80000) {
2550 tcg_out_movi_int(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, guest_base
, true);
2551 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2555 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2557 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_TB
,
2558 tcg_target_call_iarg_regs
[1]);
2561 /* br %r3 (go to TB) */
2562 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, tcg_target_call_iarg_regs
[1]);
2565 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2566 * and fall through to the rest of the epilogue.
2568 s
->code_gen_epilogue
= s
->code_ptr
;
2569 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, 0);
2572 tb_ret_addr
= s
->code_ptr
;
2574 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2575 tcg_out_insn(s
, RXY
, LMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
,
2578 /* br %r14 (return) */
2579 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_R14
);
2582 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
2584 memset(p
, 0x07, count
* sizeof(tcg_insn_unit
));
2589 uint8_t fde_def_cfa
[4];
2590 uint8_t fde_reg_ofs
[18];
2593 /* We're expecting a 2 byte uleb128 encoded value. */
2594 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2596 #define ELF_HOST_MACHINE EM_S390
2598 static const DebugFrame debug_frame
= {
2599 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2602 .h
.cie
.code_align
= 1,
2603 .h
.cie
.data_align
= 8, /* sleb128 8 */
2604 .h
.cie
.return_column
= TCG_REG_R14
,
2606 /* Total FDE size does not include the "len" member. */
2607 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2610 12, TCG_REG_CALL_STACK
, /* DW_CFA_def_cfa %r15, ... */
2611 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2615 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2616 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2617 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2618 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2619 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2620 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2621 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2622 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2623 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2627 void tcg_register_jit(void *buf
, size_t buf_size
)
2629 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));