2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 /* We only support generating code for 64-bit mode. */
28 #if TCG_TARGET_REG_BITS != 64
29 #error "unsupported code generation mode"
32 /* ??? The translation blocks produced by TCG are generally small enough to
33 be entirely reachable with a 16-bit displacement. Leaving the option for
34 a 32-bit displacement here Just In Case. */
35 #define USE_LONG_BRANCHES 0
37 #define TCG_CT_CONST_32 0x0100
38 #define TCG_CT_CONST_MULI 0x0800
39 #define TCG_CT_CONST_ORI 0x2000
40 #define TCG_CT_CONST_XORI 0x4000
41 #define TCG_CT_CONST_CMPI 0x8000
43 /* Several places within the instruction set 0 means "no register"
44 rather than TCG_REG_R0. */
45 #define TCG_REG_NONE 0
47 /* A scratch register that may be be used throughout the backend. */
48 #define TCG_TMP0 TCG_REG_R14
50 #ifdef CONFIG_USE_GUEST_BASE
51 #define TCG_GUEST_BASE_REG TCG_REG_R13
53 #define TCG_GUEST_BASE_REG TCG_REG_R0
61 /* All of the following instructions are prefixed with their instruction
62 format, and are defined as 8- or 16-bit quantities, even when the two
63 halves of the 16-bit quantity may appear 32 bits apart in the insn.
64 This makes it easy to copy the values from the tables in Appendix B. */
65 typedef enum S390Opcode
{
227 #define LD_SIGNED 0x04
228 #define LD_UINT8 0x00
229 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
230 #define LD_UINT16 0x01
231 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
232 #define LD_UINT32 0x02
233 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
234 #define LD_UINT64 0x03
235 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
238 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
239 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
240 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
244 /* Since R6 is a potential argument register, choose it last of the
245 call-saved registers. Likewise prefer the call-clobbered registers
246 in reverse order to maximize the chance of avoiding the arguments. */
247 static const int tcg_target_reg_alloc_order
[] = {
265 static const int tcg_target_call_iarg_regs
[] = {
273 static const int tcg_target_call_oarg_regs
[] = {
281 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
282 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
283 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
284 #define S390_CC_NEVER 0
285 #define S390_CC_ALWAYS 15
287 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
288 static const uint8_t tcg_cond_to_s390_cond
[] = {
289 [TCG_COND_EQ
] = S390_CC_EQ
,
290 [TCG_COND_NE
] = S390_CC_NE
,
291 [TCG_COND_LT
] = S390_CC_LT
,
292 [TCG_COND_LE
] = S390_CC_LE
,
293 [TCG_COND_GT
] = S390_CC_GT
,
294 [TCG_COND_GE
] = S390_CC_GE
,
295 [TCG_COND_LTU
] = S390_CC_LT
,
296 [TCG_COND_LEU
] = S390_CC_LE
,
297 [TCG_COND_GTU
] = S390_CC_GT
,
298 [TCG_COND_GEU
] = S390_CC_GE
,
301 /* Condition codes that result from a LOAD AND TEST. Here, we have no
302 unsigned instruction variation, however since the test is vs zero we
303 can re-map the outcomes appropriately. */
304 static const uint8_t tcg_cond_to_ltr_cond
[] = {
305 [TCG_COND_EQ
] = S390_CC_EQ
,
306 [TCG_COND_NE
] = S390_CC_NE
,
307 [TCG_COND_LT
] = S390_CC_LT
,
308 [TCG_COND_LE
] = S390_CC_LE
,
309 [TCG_COND_GT
] = S390_CC_GT
,
310 [TCG_COND_GE
] = S390_CC_GE
,
311 [TCG_COND_LTU
] = S390_CC_NEVER
,
312 [TCG_COND_LEU
] = S390_CC_EQ
,
313 [TCG_COND_GTU
] = S390_CC_NE
,
314 [TCG_COND_GEU
] = S390_CC_ALWAYS
,
317 #ifdef CONFIG_SOFTMMU
319 #include "exec/softmmu_defs.h"
321 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
323 static const void * const qemu_ld_helpers
[4] = {
330 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
331 uintxx_t val, int mmu_idx) */
332 static const void * const qemu_st_helpers
[4] = {
340 static uint8_t *tb_ret_addr
;
342 /* A list of relevant facilities used by this translator. Some of these
343 are required for proper operation, and these are checked at startup. */
345 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
346 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
347 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
348 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
349 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
351 static uint64_t facilities
;
353 static void patch_reloc(uint8_t *code_ptr
, int type
,
354 tcg_target_long value
, tcg_target_long addend
)
356 tcg_target_long code_ptr_tl
= (tcg_target_long
)code_ptr
;
357 tcg_target_long pcrel2
;
359 /* ??? Not the usual definition of "addend". */
360 pcrel2
= (value
- (code_ptr_tl
+ addend
)) >> 1;
364 assert(pcrel2
== (int16_t)pcrel2
);
365 *(int16_t *)code_ptr
= pcrel2
;
368 assert(pcrel2
== (int32_t)pcrel2
);
369 *(int32_t *)code_ptr
= pcrel2
;
377 /* parse target specific constraints */
378 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
380 const char *ct_str
= *pct_str
;
383 case 'r': /* all registers */
384 ct
->ct
|= TCG_CT_REG
;
385 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
387 case 'R': /* not R0 */
388 ct
->ct
|= TCG_CT_REG
;
389 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
390 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
392 case 'L': /* qemu_ld/st constraint */
393 ct
->ct
|= TCG_CT_REG
;
394 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
395 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R2
);
396 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R3
);
397 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R4
);
399 case 'a': /* force R2 for division */
400 ct
->ct
|= TCG_CT_REG
;
401 tcg_regset_clear(ct
->u
.regs
);
402 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R2
);
404 case 'b': /* force R3 for division */
405 ct
->ct
|= TCG_CT_REG
;
406 tcg_regset_clear(ct
->u
.regs
);
407 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R3
);
409 case 'W': /* force 32-bit ("word") immediate */
410 ct
->ct
|= TCG_CT_CONST_32
;
413 ct
->ct
|= TCG_CT_CONST_MULI
;
416 ct
->ct
|= TCG_CT_CONST_ORI
;
419 ct
->ct
|= TCG_CT_CONST_XORI
;
422 ct
->ct
|= TCG_CT_CONST_CMPI
;
433 /* Immediates to be used with logical OR. This is an optimization only,
434 since a full 64-bit immediate OR can always be performed with 4 sequential
435 OI[LH][LH] instructions. What we're looking for is immediates that we
436 can load efficiently, and the immediate load plus the reg-reg OR is
437 smaller than the sequential OI's. */
439 static int tcg_match_ori(int ct
, tcg_target_long val
)
441 if (facilities
& FACILITY_EXT_IMM
) {
442 if (ct
& TCG_CT_CONST_32
) {
443 /* All 32-bit ORs can be performed with 1 48-bit insn. */
448 /* Look for negative values. These are best to load with LGHI. */
450 if (val
== (int16_t)val
) {
453 if (facilities
& FACILITY_EXT_IMM
) {
454 if (val
== (int32_t)val
) {
463 /* Immediates to be used with logical XOR. This is almost, but not quite,
464 only an optimization. XOR with immediate is only supported with the
465 extended-immediate facility. That said, there are a few patterns for
466 which it is better to load the value into a register first. */
468 static int tcg_match_xori(int ct
, tcg_target_long val
)
470 if ((facilities
& FACILITY_EXT_IMM
) == 0) {
474 if (ct
& TCG_CT_CONST_32
) {
475 /* All 32-bit XORs can be performed with 1 48-bit insn. */
479 /* Look for negative values. These are best to load with LGHI. */
480 if (val
< 0 && val
== (int32_t)val
) {
487 /* Imediates to be used with comparisons. */
489 static int tcg_match_cmpi(int ct
, tcg_target_long val
)
491 if (facilities
& FACILITY_EXT_IMM
) {
492 /* The COMPARE IMMEDIATE instruction is available. */
493 if (ct
& TCG_CT_CONST_32
) {
494 /* We have a 32-bit immediate and can compare against anything. */
497 /* ??? We have no insight here into whether the comparison is
498 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
499 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
500 a 32-bit unsigned immediate. If we were to use the (semi)
501 obvious "val == (int32_t)val" we would be enabling unsigned
502 comparisons vs very large numbers. The only solution is to
503 take the intersection of the ranges. */
504 /* ??? Another possible solution is to simply lie and allow all
505 constants here and force the out-of-range values into a temp
506 register in tgen_cmp when we have knowledge of the actual
507 comparison code in use. */
508 return val
>= 0 && val
<= 0x7fffffff;
511 /* Only the LOAD AND TEST instruction is available. */
516 /* Test if a constant matches the constraint. */
517 static int tcg_target_const_match(tcg_target_long val
,
518 const TCGArgConstraint
*arg_ct
)
522 if (ct
& TCG_CT_CONST
) {
526 /* Handle the modifiers. */
527 if (ct
& TCG_CT_CONST_32
) {
531 /* The following are mutually exclusive. */
532 if (ct
& TCG_CT_CONST_MULI
) {
533 /* Immediates that may be used with multiply. If we have the
534 general-instruction-extensions, then we have MULTIPLY SINGLE
535 IMMEDIATE with a signed 32-bit, otherwise we have only
536 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
537 if (facilities
& FACILITY_GEN_INST_EXT
) {
538 return val
== (int32_t)val
;
540 return val
== (int16_t)val
;
542 } else if (ct
& TCG_CT_CONST_ORI
) {
543 return tcg_match_ori(ct
, val
);
544 } else if (ct
& TCG_CT_CONST_XORI
) {
545 return tcg_match_xori(ct
, val
);
546 } else if (ct
& TCG_CT_CONST_CMPI
) {
547 return tcg_match_cmpi(ct
, val
);
553 /* Emit instructions according to the given instruction format. */
555 static void tcg_out_insn_RR(TCGContext
*s
, S390Opcode op
, TCGReg r1
, TCGReg r2
)
557 tcg_out16(s
, (op
<< 8) | (r1
<< 4) | r2
);
560 static void tcg_out_insn_RRE(TCGContext
*s
, S390Opcode op
,
561 TCGReg r1
, TCGReg r2
)
563 tcg_out32(s
, (op
<< 16) | (r1
<< 4) | r2
);
566 static void tcg_out_insn_RRF(TCGContext
*s
, S390Opcode op
,
567 TCGReg r1
, TCGReg r2
, int m3
)
569 tcg_out32(s
, (op
<< 16) | (m3
<< 12) | (r1
<< 4) | r2
);
572 static void tcg_out_insn_RI(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
574 tcg_out32(s
, (op
<< 16) | (r1
<< 20) | (i2
& 0xffff));
577 static void tcg_out_insn_RIL(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
579 tcg_out16(s
, op
| (r1
<< 4));
583 static void tcg_out_insn_RS(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
584 TCGReg b2
, TCGReg r3
, int disp
)
586 tcg_out32(s
, (op
<< 24) | (r1
<< 20) | (r3
<< 16) | (b2
<< 12)
590 static void tcg_out_insn_RSY(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
591 TCGReg b2
, TCGReg r3
, int disp
)
593 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | r3
);
594 tcg_out32(s
, (op
& 0xff) | (b2
<< 28)
595 | ((disp
& 0xfff) << 16) | ((disp
& 0xff000) >> 4));
598 #define tcg_out_insn_RX tcg_out_insn_RS
599 #define tcg_out_insn_RXY tcg_out_insn_RSY
601 /* Emit an opcode with "type-checking" of the format. */
602 #define tcg_out_insn(S, FMT, OP, ...) \
603 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
606 /* emit 64-bit shifts */
607 static void tcg_out_sh64(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
608 TCGReg src
, TCGReg sh_reg
, int sh_imm
)
610 tcg_out_insn_RSY(s
, op
, dest
, sh_reg
, src
, sh_imm
);
613 /* emit 32-bit shifts */
614 static void tcg_out_sh32(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
615 TCGReg sh_reg
, int sh_imm
)
617 tcg_out_insn_RS(s
, op
, dest
, sh_reg
, 0, sh_imm
);
620 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg dst
, TCGReg src
)
623 if (type
== TCG_TYPE_I32
) {
624 tcg_out_insn(s
, RR
, LR
, dst
, src
);
626 tcg_out_insn(s
, RRE
, LGR
, dst
, src
);
631 /* load a register with an immediate value */
632 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
633 TCGReg ret
, tcg_target_long sval
)
635 static const S390Opcode lli_insns
[4] = {
636 RI_LLILL
, RI_LLILH
, RI_LLIHL
, RI_LLIHH
639 tcg_target_ulong uval
= sval
;
642 if (type
== TCG_TYPE_I32
) {
643 uval
= (uint32_t)sval
;
644 sval
= (int32_t)sval
;
647 /* Try all 32-bit insns that can load it in one go. */
648 if (sval
>= -0x8000 && sval
< 0x8000) {
649 tcg_out_insn(s
, RI
, LGHI
, ret
, sval
);
653 for (i
= 0; i
< 4; i
++) {
654 tcg_target_long mask
= 0xffffull
<< i
*16;
655 if ((uval
& mask
) == uval
) {
656 tcg_out_insn_RI(s
, lli_insns
[i
], ret
, uval
>> i
*16);
661 /* Try all 48-bit insns that can load it in one go. */
662 if (facilities
& FACILITY_EXT_IMM
) {
663 if (sval
== (int32_t)sval
) {
664 tcg_out_insn(s
, RIL
, LGFI
, ret
, sval
);
667 if (uval
<= 0xffffffff) {
668 tcg_out_insn(s
, RIL
, LLILF
, ret
, uval
);
671 if ((uval
& 0xffffffff) == 0) {
672 tcg_out_insn(s
, RIL
, LLIHF
, ret
, uval
>> 31 >> 1);
677 /* Try for PC-relative address load. */
678 if ((sval
& 1) == 0) {
679 intptr_t off
= (sval
- (intptr_t)s
->code_ptr
) >> 1;
680 if (off
== (int32_t)off
) {
681 tcg_out_insn(s
, RIL
, LARL
, ret
, off
);
686 /* If extended immediates are not present, then we may have to issue
687 several instructions to load the low 32 bits. */
688 if (!(facilities
& FACILITY_EXT_IMM
)) {
689 /* A 32-bit unsigned value can be loaded in 2 insns. And given
690 that the lli_insns loop above did not succeed, we know that
691 both insns are required. */
692 if (uval
<= 0xffffffff) {
693 tcg_out_insn(s
, RI
, LLILL
, ret
, uval
);
694 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
698 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
699 We first want to make sure that all the high bits get set. With
700 luck the low 16-bits can be considered negative to perform that for
701 free, otherwise we load an explicit -1. */
702 if (sval
>> 31 >> 1 == -1) {
704 tcg_out_insn(s
, RI
, LGHI
, ret
, uval
);
706 tcg_out_insn(s
, RI
, LGHI
, ret
, -1);
707 tcg_out_insn(s
, RI
, IILL
, ret
, uval
);
709 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
714 /* If we get here, both the high and low parts have non-zero bits. */
716 /* Recurse to load the lower 32-bits. */
717 tcg_out_movi(s
, TCG_TYPE_I64
, ret
, uval
& 0xffffffff);
719 /* Insert data into the high 32-bits. */
720 uval
= uval
>> 31 >> 1;
721 if (facilities
& FACILITY_EXT_IMM
) {
722 if (uval
< 0x10000) {
723 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
724 } else if ((uval
& 0xffff) == 0) {
725 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
727 tcg_out_insn(s
, RIL
, IIHF
, ret
, uval
);
731 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
733 if (uval
& 0xffff0000) {
734 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
740 /* Emit a load/store type instruction. Inputs are:
741 DATA: The register to be loaded or stored.
742 BASE+OFS: The effective address.
743 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
744 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
746 static void tcg_out_mem(TCGContext
*s
, S390Opcode opc_rx
, S390Opcode opc_rxy
,
747 TCGReg data
, TCGReg base
, TCGReg index
,
750 if (ofs
< -0x80000 || ofs
>= 0x80000) {
751 /* Combine the low 20 bits of the offset with the actual load insn;
752 the high 44 bits must come from an immediate load. */
753 tcg_target_long low
= ((ofs
& 0xfffff) ^ 0x80000) - 0x80000;
754 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- low
);
757 /* If we were already given an index register, add it in. */
758 if (index
!= TCG_REG_NONE
) {
759 tcg_out_insn(s
, RRE
, AGR
, TCG_TMP0
, index
);
764 if (opc_rx
&& ofs
>= 0 && ofs
< 0x1000) {
765 tcg_out_insn_RX(s
, opc_rx
, data
, base
, index
, ofs
);
767 tcg_out_insn_RXY(s
, opc_rxy
, data
, base
, index
, ofs
);
772 /* load data without address translation or endianness conversion */
773 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg data
,
774 TCGReg base
, tcg_target_long ofs
)
776 if (type
== TCG_TYPE_I32
) {
777 tcg_out_mem(s
, RX_L
, RXY_LY
, data
, base
, TCG_REG_NONE
, ofs
);
779 tcg_out_mem(s
, 0, RXY_LG
, data
, base
, TCG_REG_NONE
, ofs
);
783 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg data
,
784 TCGReg base
, tcg_target_long ofs
)
786 if (type
== TCG_TYPE_I32
) {
787 tcg_out_mem(s
, RX_ST
, RXY_STY
, data
, base
, TCG_REG_NONE
, ofs
);
789 tcg_out_mem(s
, 0, RXY_STG
, data
, base
, TCG_REG_NONE
, ofs
);
793 /* load data from an absolute host address */
794 static void tcg_out_ld_abs(TCGContext
*s
, TCGType type
, TCGReg dest
, void *abs
)
796 tcg_target_long addr
= (tcg_target_long
)abs
;
798 if (facilities
& FACILITY_GEN_INST_EXT
) {
799 tcg_target_long disp
= (addr
- (tcg_target_long
)s
->code_ptr
) >> 1;
800 if (disp
== (int32_t)disp
) {
801 if (type
== TCG_TYPE_I32
) {
802 tcg_out_insn(s
, RIL
, LRL
, dest
, disp
);
804 tcg_out_insn(s
, RIL
, LGRL
, dest
, disp
);
810 tcg_out_movi(s
, TCG_TYPE_PTR
, dest
, addr
& ~0xffff);
811 tcg_out_ld(s
, type
, dest
, dest
, addr
& 0xffff);
814 static inline void tcg_out_risbg(TCGContext
*s
, TCGReg dest
, TCGReg src
,
815 int msb
, int lsb
, int ofs
, int z
)
818 tcg_out16(s
, (RIE_RISBG
& 0xff00) | (dest
<< 4) | src
);
819 tcg_out16(s
, (msb
<< 8) | (z
<< 7) | lsb
);
820 tcg_out16(s
, (ofs
<< 8) | (RIE_RISBG
& 0xff));
823 static void tgen_ext8s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
825 if (facilities
& FACILITY_EXT_IMM
) {
826 tcg_out_insn(s
, RRE
, LGBR
, dest
, src
);
830 if (type
== TCG_TYPE_I32
) {
832 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 24);
834 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 24);
836 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 24);
838 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 56);
839 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 56);
843 static void tgen_ext8u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
845 if (facilities
& FACILITY_EXT_IMM
) {
846 tcg_out_insn(s
, RRE
, LLGCR
, dest
, src
);
851 tcg_out_movi(s
, type
, TCG_TMP0
, 0xff);
854 tcg_out_movi(s
, type
, dest
, 0xff);
856 if (type
== TCG_TYPE_I32
) {
857 tcg_out_insn(s
, RR
, NR
, dest
, src
);
859 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
863 static void tgen_ext16s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
865 if (facilities
& FACILITY_EXT_IMM
) {
866 tcg_out_insn(s
, RRE
, LGHR
, dest
, src
);
870 if (type
== TCG_TYPE_I32
) {
872 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 16);
874 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 16);
876 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 16);
878 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 48);
879 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 48);
883 static void tgen_ext16u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
885 if (facilities
& FACILITY_EXT_IMM
) {
886 tcg_out_insn(s
, RRE
, LLGHR
, dest
, src
);
891 tcg_out_movi(s
, type
, TCG_TMP0
, 0xffff);
894 tcg_out_movi(s
, type
, dest
, 0xffff);
896 if (type
== TCG_TYPE_I32
) {
897 tcg_out_insn(s
, RR
, NR
, dest
, src
);
899 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
903 static inline void tgen_ext32s(TCGContext
*s
, TCGReg dest
, TCGReg src
)
905 tcg_out_insn(s
, RRE
, LGFR
, dest
, src
);
908 static inline void tgen_ext32u(TCGContext
*s
, TCGReg dest
, TCGReg src
)
910 tcg_out_insn(s
, RRE
, LLGFR
, dest
, src
);
913 /* Accept bit patterns like these:
918 Copied from gcc sources. */
919 static inline bool risbg_mask(uint64_t c
)
922 /* We don't change the number of transitions by inverting,
923 so make sure we start with the LSB zero. */
927 /* Reject all zeros or all ones. */
931 /* Find the first transition. */
933 /* Invert to look for a second transition. */
935 /* Erase the first transition. */
937 /* Find the second transition, if any. */
939 /* Match if all the bits are 1's, or if c is zero. */
943 static void tgen_andi(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
945 static const S390Opcode ni_insns
[4] = {
946 RI_NILL
, RI_NILH
, RI_NIHL
, RI_NIHH
948 static const S390Opcode nif_insns
[2] = {
951 uint64_t valid
= (type
== TCG_TYPE_I32
? 0xffffffffull
: -1ull);
954 /* Look for the zero-extensions. */
955 if ((val
& valid
) == 0xffffffff) {
956 tgen_ext32u(s
, dest
, dest
);
959 if (facilities
& FACILITY_EXT_IMM
) {
960 if ((val
& valid
) == 0xff) {
961 tgen_ext8u(s
, TCG_TYPE_I64
, dest
, dest
);
964 if ((val
& valid
) == 0xffff) {
965 tgen_ext16u(s
, TCG_TYPE_I64
, dest
, dest
);
970 /* Try all 32-bit insns that can perform it in one go. */
971 for (i
= 0; i
< 4; i
++) {
972 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
973 if (((val
| ~valid
) & mask
) == mask
) {
974 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
979 /* Try all 48-bit insns that can perform it in one go. */
980 if (facilities
& FACILITY_EXT_IMM
) {
981 for (i
= 0; i
< 2; i
++) {
982 tcg_target_ulong mask
= ~(0xffffffffull
<< i
*32);
983 if (((val
| ~valid
) & mask
) == mask
) {
984 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
989 if ((facilities
& FACILITY_GEN_INST_EXT
) && risbg_mask(val
)) {
991 if ((val
& 0x8000000000000001ull
) == 0x8000000000000001ull
) {
992 /* Achieve wraparound by swapping msb and lsb. */
993 msb
= 63 - ctz64(~val
);
994 lsb
= clz64(~val
) + 1;
997 lsb
= 63 - ctz64(val
);
999 tcg_out_risbg(s
, dest
, dest
, msb
, lsb
, 0, 1);
1003 /* Fall back to loading the constant. */
1004 tcg_out_movi(s
, type
, TCG_TMP0
, val
);
1005 if (type
== TCG_TYPE_I32
) {
1006 tcg_out_insn(s
, RR
, NR
, dest
, TCG_TMP0
);
1008 tcg_out_insn(s
, RRE
, NGR
, dest
, TCG_TMP0
);
1012 static void tgen64_ori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1014 static const S390Opcode oi_insns
[4] = {
1015 RI_OILL
, RI_OILH
, RI_OIHL
, RI_OIHH
1017 static const S390Opcode nif_insns
[2] = {
1023 /* Look for no-op. */
1028 if (facilities
& FACILITY_EXT_IMM
) {
1029 /* Try all 32-bit insns that can perform it in one go. */
1030 for (i
= 0; i
< 4; i
++) {
1031 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1032 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1033 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1038 /* Try all 48-bit insns that can perform it in one go. */
1039 for (i
= 0; i
< 2; i
++) {
1040 tcg_target_ulong mask
= (0xffffffffull
<< i
*32);
1041 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1042 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
1047 /* Perform the OR via sequential modifications to the high and
1048 low parts. Do this via recursion to handle 16-bit vs 32-bit
1049 masks in each half. */
1050 tgen64_ori(s
, dest
, val
& 0x00000000ffffffffull
);
1051 tgen64_ori(s
, dest
, val
& 0xffffffff00000000ull
);
1053 /* With no extended-immediate facility, we don't need to be so
1054 clever. Just iterate over the insns and mask in the constant. */
1055 for (i
= 0; i
< 4; i
++) {
1056 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1057 if ((val
& mask
) != 0) {
1058 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1064 static void tgen64_xori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1066 /* Perform the xor by parts. */
1067 if (val
& 0xffffffff) {
1068 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1070 if (val
> 0xffffffff) {
1071 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 31 >> 1);
1075 static int tgen_cmp(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg r1
,
1076 TCGArg c2
, int c2const
)
1078 bool is_unsigned
= is_unsigned_cond(c
);
1081 if (type
== TCG_TYPE_I32
) {
1082 tcg_out_insn(s
, RR
, LTR
, r1
, r1
);
1084 tcg_out_insn(s
, RRE
, LTGR
, r1
, r1
);
1086 return tcg_cond_to_ltr_cond
[c
];
1089 if (type
== TCG_TYPE_I32
) {
1090 tcg_out_insn(s
, RIL
, CLFI
, r1
, c2
);
1092 tcg_out_insn(s
, RIL
, CLGFI
, r1
, c2
);
1095 if (type
== TCG_TYPE_I32
) {
1096 tcg_out_insn(s
, RIL
, CFI
, r1
, c2
);
1098 tcg_out_insn(s
, RIL
, CGFI
, r1
, c2
);
1104 if (type
== TCG_TYPE_I32
) {
1105 tcg_out_insn(s
, RR
, CLR
, r1
, c2
);
1107 tcg_out_insn(s
, RRE
, CLGR
, r1
, c2
);
1110 if (type
== TCG_TYPE_I32
) {
1111 tcg_out_insn(s
, RR
, CR
, r1
, c2
);
1113 tcg_out_insn(s
, RRE
, CGR
, r1
, c2
);
1117 return tcg_cond_to_s390_cond
[c
];
1120 static void tgen_setcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1121 TCGReg dest
, TCGReg c1
, TCGArg c2
, int c2const
)
1123 int cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1125 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1126 tcg_out_movi(s
, type
, dest
, 1);
1127 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1128 tcg_out_movi(s
, type
, dest
, 0);
1131 static void tgen_movcond(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg dest
,
1132 TCGReg c1
, TCGArg c2
, int c2const
, TCGReg r3
)
1135 if (facilities
& FACILITY_LOAD_ON_COND
) {
1136 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1137 tcg_out_insn(s
, RRF
, LOCGR
, dest
, r3
, cc
);
1139 c
= tcg_invert_cond(c
);
1140 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1142 /* Emit: if (cc) goto over; dest = r3; over: */
1143 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1144 tcg_out_insn(s
, RRE
, LGR
, dest
, r3
);
1148 bool tcg_target_deposit_valid(int ofs
, int len
)
1150 return (facilities
& FACILITY_GEN_INST_EXT
) != 0;
1153 static void tgen_deposit(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1156 int lsb
= (63 - ofs
);
1157 int msb
= lsb
- (len
- 1);
1158 tcg_out_risbg(s
, dest
, src
, msb
, lsb
, ofs
, 0);
1161 static void tgen_gotoi(TCGContext
*s
, int cc
, tcg_target_long dest
)
1163 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1164 if (off
> -0x8000 && off
< 0x7fff) {
1165 tcg_out_insn(s
, RI
, BRC
, cc
, off
);
1166 } else if (off
== (int32_t)off
) {
1167 tcg_out_insn(s
, RIL
, BRCL
, cc
, off
);
1169 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1170 tcg_out_insn(s
, RR
, BCR
, cc
, TCG_TMP0
);
1174 static void tgen_branch(TCGContext
*s
, int cc
, int labelno
)
1176 TCGLabel
* l
= &s
->labels
[labelno
];
1178 tgen_gotoi(s
, cc
, l
->u
.value
);
1179 } else if (USE_LONG_BRANCHES
) {
1180 tcg_out16(s
, RIL_BRCL
| (cc
<< 4));
1181 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC32DBL
, labelno
, -2);
1184 tcg_out16(s
, RI_BRC
| (cc
<< 4));
1185 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC16DBL
, labelno
, -2);
1190 static void tgen_compare_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1191 TCGReg r1
, TCGReg r2
, int labelno
)
1193 TCGLabel
* l
= &s
->labels
[labelno
];
1194 tcg_target_long off
;
1197 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1199 /* We need to keep the offset unchanged for retranslation. */
1200 off
= ((int16_t *)s
->code_ptr
)[1];
1201 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1204 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | r2
);
1206 tcg_out16(s
, cc
<< 12 | (opc
& 0xff));
1209 static void tgen_compare_imm_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1210 TCGReg r1
, int i2
, int labelno
)
1212 TCGLabel
* l
= &s
->labels
[labelno
];
1213 tcg_target_long off
;
1216 off
= (l
->u
.value
- (tcg_target_long
)s
->code_ptr
) >> 1;
1218 /* We need to keep the offset unchanged for retranslation. */
1219 off
= ((int16_t *)s
->code_ptr
)[1];
1220 tcg_out_reloc(s
, s
->code_ptr
+ 2, R_390_PC16DBL
, labelno
, -2);
1223 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | cc
);
1225 tcg_out16(s
, (i2
<< 8) | (opc
& 0xff));
1228 static void tgen_brcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1229 TCGReg r1
, TCGArg c2
, int c2const
, int labelno
)
1233 if (facilities
& FACILITY_GEN_INST_EXT
) {
1234 bool is_unsigned
= is_unsigned_cond(c
);
1238 cc
= tcg_cond_to_s390_cond
[c
];
1241 opc
= (type
== TCG_TYPE_I32
1242 ? (is_unsigned
? RIE_CLRJ
: RIE_CRJ
)
1243 : (is_unsigned
? RIE_CLGRJ
: RIE_CGRJ
));
1244 tgen_compare_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1248 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1249 If the immediate we've been given does not fit that range, we'll
1250 fall back to separate compare and branch instructions using the
1251 larger comparison range afforded by COMPARE IMMEDIATE. */
1252 if (type
== TCG_TYPE_I32
) {
1255 in_range
= (uint32_t)c2
== (uint8_t)c2
;
1258 in_range
= (int32_t)c2
== (int8_t)c2
;
1263 in_range
= (uint64_t)c2
== (uint8_t)c2
;
1266 in_range
= (int64_t)c2
== (int8_t)c2
;
1270 tgen_compare_imm_branch(s
, opc
, cc
, r1
, c2
, labelno
);
1275 cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
);
1276 tgen_branch(s
, cc
, labelno
);
1279 static void tgen_calli(TCGContext
*s
, tcg_target_long dest
)
1281 tcg_target_long off
= (dest
- (tcg_target_long
)s
->code_ptr
) >> 1;
1282 if (off
== (int32_t)off
) {
1283 tcg_out_insn(s
, RIL
, BRASL
, TCG_REG_R14
, off
);
1285 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, dest
);
1286 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, TCG_TMP0
);
1290 static void tcg_out_qemu_ld_direct(TCGContext
*s
, int opc
, TCGReg data
,
1291 TCGReg base
, TCGReg index
, int disp
)
1293 #ifdef TARGET_WORDS_BIGENDIAN
1294 const int bswap
= 0;
1296 const int bswap
= 1;
1300 tcg_out_insn(s
, RXY
, LLGC
, data
, base
, index
, disp
);
1303 tcg_out_insn(s
, RXY
, LGB
, data
, base
, index
, disp
);
1307 /* swapped unsigned halfword load with upper bits zeroed */
1308 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1309 tgen_ext16u(s
, TCG_TYPE_I64
, data
, data
);
1311 tcg_out_insn(s
, RXY
, LLGH
, data
, base
, index
, disp
);
1316 /* swapped sign-extended halfword load */
1317 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1318 tgen_ext16s(s
, TCG_TYPE_I64
, data
, data
);
1320 tcg_out_insn(s
, RXY
, LGH
, data
, base
, index
, disp
);
1325 /* swapped unsigned int load with upper bits zeroed */
1326 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1327 tgen_ext32u(s
, data
, data
);
1329 tcg_out_insn(s
, RXY
, LLGF
, data
, base
, index
, disp
);
1334 /* swapped sign-extended int load */
1335 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1336 tgen_ext32s(s
, data
, data
);
1338 tcg_out_insn(s
, RXY
, LGF
, data
, base
, index
, disp
);
1343 tcg_out_insn(s
, RXY
, LRVG
, data
, base
, index
, disp
);
1345 tcg_out_insn(s
, RXY
, LG
, data
, base
, index
, disp
);
1353 static void tcg_out_qemu_st_direct(TCGContext
*s
, int opc
, TCGReg data
,
1354 TCGReg base
, TCGReg index
, int disp
)
1356 #ifdef TARGET_WORDS_BIGENDIAN
1357 const int bswap
= 0;
1359 const int bswap
= 1;
1363 if (disp
>= 0 && disp
< 0x1000) {
1364 tcg_out_insn(s
, RX
, STC
, data
, base
, index
, disp
);
1366 tcg_out_insn(s
, RXY
, STCY
, data
, base
, index
, disp
);
1371 tcg_out_insn(s
, RXY
, STRVH
, data
, base
, index
, disp
);
1372 } else if (disp
>= 0 && disp
< 0x1000) {
1373 tcg_out_insn(s
, RX
, STH
, data
, base
, index
, disp
);
1375 tcg_out_insn(s
, RXY
, STHY
, data
, base
, index
, disp
);
1380 tcg_out_insn(s
, RXY
, STRV
, data
, base
, index
, disp
);
1381 } else if (disp
>= 0 && disp
< 0x1000) {
1382 tcg_out_insn(s
, RX
, ST
, data
, base
, index
, disp
);
1384 tcg_out_insn(s
, RXY
, STY
, data
, base
, index
, disp
);
1389 tcg_out_insn(s
, RXY
, STRVG
, data
, base
, index
, disp
);
1391 tcg_out_insn(s
, RXY
, STG
, data
, base
, index
, disp
);
1399 #if defined(CONFIG_SOFTMMU)
1400 static TCGReg
tcg_prepare_qemu_ldst(TCGContext
* s
, TCGReg data_reg
,
1401 TCGReg addr_reg
, int mem_index
, int opc
,
1402 uint16_t **label2_ptr_p
, int is_store
)
1404 const TCGReg arg0
= tcg_target_call_iarg_regs
[0];
1405 const TCGReg arg1
= tcg_target_call_iarg_regs
[1];
1406 const TCGReg arg2
= tcg_target_call_iarg_regs
[2];
1407 const TCGReg arg3
= tcg_target_call_iarg_regs
[3];
1408 int s_bits
= opc
& 3;
1409 uint16_t *label1_ptr
;
1410 tcg_target_long ofs
;
1412 if (TARGET_LONG_BITS
== 32) {
1413 tgen_ext32u(s
, arg1
, addr_reg
);
1415 tcg_out_mov(s
, TCG_TYPE_I64
, arg1
, addr_reg
);
1418 tcg_out_sh64(s
, RSY_SRLG
, arg2
, addr_reg
, TCG_REG_NONE
,
1419 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1421 tgen_andi(s
, TCG_TYPE_I64
, arg1
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
1422 tgen_andi(s
, TCG_TYPE_I64
, arg2
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
1425 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1427 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1429 assert(ofs
< 0x80000);
1431 if (TARGET_LONG_BITS
== 32) {
1432 tcg_out_mem(s
, RX_C
, RXY_CY
, arg1
, arg2
, TCG_AREG0
, ofs
);
1434 tcg_out_mem(s
, 0, RXY_CG
, arg1
, arg2
, TCG_AREG0
, ofs
);
1437 if (TARGET_LONG_BITS
== 32) {
1438 tgen_ext32u(s
, arg1
, addr_reg
);
1440 tcg_out_mov(s
, TCG_TYPE_I64
, arg1
, addr_reg
);
1443 label1_ptr
= (uint16_t*)s
->code_ptr
;
1445 /* je label1 (offset will be patched in later) */
1446 tcg_out_insn(s
, RI
, BRC
, S390_CC_EQ
, 0);
1448 /* call load/store helper */
1450 /* Make sure to zero-extend the value to the full register
1451 for the calling convention. */
1454 tgen_ext8u(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1457 tgen_ext16u(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1460 tgen_ext32u(s
, arg2
, data_reg
);
1463 tcg_out_mov(s
, TCG_TYPE_I64
, arg2
, data_reg
);
1468 tcg_out_movi(s
, TCG_TYPE_I32
, arg3
, mem_index
);
1469 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, TCG_AREG0
);
1470 tgen_calli(s
, (tcg_target_ulong
)qemu_st_helpers
[s_bits
]);
1472 tcg_out_movi(s
, TCG_TYPE_I32
, arg2
, mem_index
);
1473 tcg_out_mov(s
, TCG_TYPE_I64
, arg0
, TCG_AREG0
);
1474 tgen_calli(s
, (tcg_target_ulong
)qemu_ld_helpers
[s_bits
]);
1476 /* sign extension */
1479 tgen_ext8s(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1482 tgen_ext16s(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1485 tgen_ext32s(s
, data_reg
, TCG_REG_R2
);
1488 /* unsigned -> just copy */
1489 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1494 /* jump to label2 (end) */
1495 *label2_ptr_p
= (uint16_t*)s
->code_ptr
;
1497 tcg_out_insn(s
, RI
, BRC
, S390_CC_ALWAYS
, 0);
1499 /* this is label1, patch branch */
1500 *(label1_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1501 (unsigned long)label1_ptr
) >> 1;
1503 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1504 assert(ofs
< 0x80000);
1506 tcg_out_mem(s
, 0, RXY_AG
, arg1
, arg2
, TCG_AREG0
, ofs
);
1511 static void tcg_finish_qemu_ldst(TCGContext
* s
, uint16_t *label2_ptr
)
1514 *(label2_ptr
+ 1) = ((unsigned long)s
->code_ptr
-
1515 (unsigned long)label2_ptr
) >> 1;
1518 static void tcg_prepare_user_ldst(TCGContext
*s
, TCGReg
*addr_reg
,
1519 TCGReg
*index_reg
, tcg_target_long
*disp
)
1521 if (TARGET_LONG_BITS
== 32) {
1522 tgen_ext32u(s
, TCG_TMP0
, *addr_reg
);
1523 *addr_reg
= TCG_TMP0
;
1525 if (GUEST_BASE
< 0x80000) {
1526 *index_reg
= TCG_REG_NONE
;
1529 *index_reg
= TCG_GUEST_BASE_REG
;
1533 #endif /* CONFIG_SOFTMMU */
1535 /* load data with address translation (if applicable)
1536 and endianness conversion */
1537 static void tcg_out_qemu_ld(TCGContext
* s
, const TCGArg
* args
, int opc
)
1539 TCGReg addr_reg
, data_reg
;
1540 #if defined(CONFIG_SOFTMMU)
1542 uint16_t *label2_ptr
;
1545 tcg_target_long disp
;
1551 #if defined(CONFIG_SOFTMMU)
1554 addr_reg
= tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1555 opc
, &label2_ptr
, 0);
1557 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, TCG_REG_NONE
, 0);
1559 tcg_finish_qemu_ldst(s
, label2_ptr
);
1561 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1562 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1566 static void tcg_out_qemu_st(TCGContext
* s
, const TCGArg
* args
, int opc
)
1568 TCGReg addr_reg
, data_reg
;
1569 #if defined(CONFIG_SOFTMMU)
1571 uint16_t *label2_ptr
;
1574 tcg_target_long disp
;
1580 #if defined(CONFIG_SOFTMMU)
1583 addr_reg
= tcg_prepare_qemu_ldst(s
, data_reg
, addr_reg
, mem_index
,
1584 opc
, &label2_ptr
, 1);
1586 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, TCG_REG_NONE
, 0);
1588 tcg_finish_qemu_ldst(s
, label2_ptr
);
1590 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1591 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1595 # define OP_32_64(x) \
1596 case glue(glue(INDEX_op_,x),_i32): \
1597 case glue(glue(INDEX_op_,x),_i64)
1599 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1600 const TCGArg
*args
, const int *const_args
)
1606 case INDEX_op_exit_tb
:
1608 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, args
[0]);
1609 tgen_gotoi(s
, S390_CC_ALWAYS
, (unsigned long)tb_ret_addr
);
1612 case INDEX_op_goto_tb
:
1613 if (s
->tb_jmp_offset
) {
1616 /* load address stored at s->tb_next + args[0] */
1617 tcg_out_ld_abs(s
, TCG_TYPE_PTR
, TCG_TMP0
, s
->tb_next
+ args
[0]);
1619 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_TMP0
);
1621 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1625 if (const_args
[0]) {
1626 tgen_calli(s
, args
[0]);
1628 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, args
[0]);
1632 case INDEX_op_mov_i32
:
1633 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1635 case INDEX_op_movi_i32
:
1636 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1640 /* ??? LLC (RXY format) is only present with the extended-immediate
1641 facility, whereas LLGC is always present. */
1642 tcg_out_mem(s
, 0, RXY_LLGC
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1646 /* ??? LB is no smaller than LGB, so no point to using it. */
1647 tcg_out_mem(s
, 0, RXY_LGB
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1651 /* ??? LLH (RXY format) is only present with the extended-immediate
1652 facility, whereas LLGH is always present. */
1653 tcg_out_mem(s
, 0, RXY_LLGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1656 case INDEX_op_ld16s_i32
:
1657 tcg_out_mem(s
, RX_LH
, RXY_LHY
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1660 case INDEX_op_ld_i32
:
1661 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1665 tcg_out_mem(s
, RX_STC
, RXY_STCY
, args
[0], args
[1],
1666 TCG_REG_NONE
, args
[2]);
1670 tcg_out_mem(s
, RX_STH
, RXY_STHY
, args
[0], args
[1],
1671 TCG_REG_NONE
, args
[2]);
1674 case INDEX_op_st_i32
:
1675 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1678 case INDEX_op_add_i32
:
1679 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1680 if (const_args
[2]) {
1683 if (a2
== (int16_t)a2
) {
1684 tcg_out_insn(s
, RI
, AHI
, a0
, a2
);
1687 if (facilities
& FACILITY_EXT_IMM
) {
1688 tcg_out_insn(s
, RIL
, AFI
, a0
, a2
);
1692 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1693 } else if (a0
== a1
) {
1694 tcg_out_insn(s
, RR
, AR
, a0
, a2
);
1696 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1699 case INDEX_op_sub_i32
:
1700 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1701 if (const_args
[2]) {
1705 tcg_out_insn(s
, RR
, SR
, args
[0], args
[2]);
1708 case INDEX_op_and_i32
:
1709 if (const_args
[2]) {
1710 tgen_andi(s
, TCG_TYPE_I32
, args
[0], args
[2]);
1712 tcg_out_insn(s
, RR
, NR
, args
[0], args
[2]);
1715 case INDEX_op_or_i32
:
1716 if (const_args
[2]) {
1717 tgen64_ori(s
, args
[0], args
[2] & 0xffffffff);
1719 tcg_out_insn(s
, RR
, OR
, args
[0], args
[2]);
1722 case INDEX_op_xor_i32
:
1723 if (const_args
[2]) {
1724 tgen64_xori(s
, args
[0], args
[2] & 0xffffffff);
1726 tcg_out_insn(s
, RR
, XR
, args
[0], args
[2]);
1730 case INDEX_op_neg_i32
:
1731 tcg_out_insn(s
, RR
, LCR
, args
[0], args
[1]);
1734 case INDEX_op_mul_i32
:
1735 if (const_args
[2]) {
1736 if ((int32_t)args
[2] == (int16_t)args
[2]) {
1737 tcg_out_insn(s
, RI
, MHI
, args
[0], args
[2]);
1739 tcg_out_insn(s
, RIL
, MSFI
, args
[0], args
[2]);
1742 tcg_out_insn(s
, RRE
, MSR
, args
[0], args
[2]);
1746 case INDEX_op_div2_i32
:
1747 tcg_out_insn(s
, RR
, DR
, TCG_REG_R2
, args
[4]);
1749 case INDEX_op_divu2_i32
:
1750 tcg_out_insn(s
, RRE
, DLR
, TCG_REG_R2
, args
[4]);
1753 case INDEX_op_shl_i32
:
1756 if (const_args
[2]) {
1757 tcg_out_sh32(s
, op
, args
[0], TCG_REG_NONE
, args
[2]);
1759 tcg_out_sh32(s
, op
, args
[0], args
[2], 0);
1762 case INDEX_op_shr_i32
:
1765 case INDEX_op_sar_i32
:
1769 case INDEX_op_rotl_i32
:
1770 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1771 if (const_args
[2]) {
1772 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1774 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], args
[2], 0);
1777 case INDEX_op_rotr_i32
:
1778 if (const_args
[2]) {
1779 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1],
1780 TCG_REG_NONE
, (32 - args
[2]) & 31);
1782 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
1783 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_TMP0
, 0);
1787 case INDEX_op_ext8s_i32
:
1788 tgen_ext8s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1790 case INDEX_op_ext16s_i32
:
1791 tgen_ext16s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1793 case INDEX_op_ext8u_i32
:
1794 tgen_ext8u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1796 case INDEX_op_ext16u_i32
:
1797 tgen_ext16u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1801 /* The TCG bswap definition requires bits 0-47 already be zero.
1802 Thus we don't need the G-type insns to implement bswap16_i64. */
1803 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1804 tcg_out_sh32(s
, RS_SRL
, args
[0], TCG_REG_NONE
, 16);
1807 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1810 case INDEX_op_add2_i32
:
1811 /* ??? Make use of ALFI. */
1812 tcg_out_insn(s
, RR
, ALR
, args
[0], args
[4]);
1813 tcg_out_insn(s
, RRE
, ALCR
, args
[1], args
[5]);
1815 case INDEX_op_sub2_i32
:
1816 /* ??? Make use of SLFI. */
1817 tcg_out_insn(s
, RR
, SLR
, args
[0], args
[4]);
1818 tcg_out_insn(s
, RRE
, SLBR
, args
[1], args
[5]);
1822 tgen_branch(s
, S390_CC_ALWAYS
, args
[0]);
1825 case INDEX_op_brcond_i32
:
1826 tgen_brcond(s
, TCG_TYPE_I32
, args
[2], args
[0],
1827 args
[1], const_args
[1], args
[3]);
1829 case INDEX_op_setcond_i32
:
1830 tgen_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1],
1831 args
[2], const_args
[2]);
1833 case INDEX_op_movcond_i32
:
1834 tgen_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1],
1835 args
[2], const_args
[2], args
[3]);
1838 case INDEX_op_qemu_ld8u
:
1839 tcg_out_qemu_ld(s
, args
, LD_UINT8
);
1841 case INDEX_op_qemu_ld8s
:
1842 tcg_out_qemu_ld(s
, args
, LD_INT8
);
1844 case INDEX_op_qemu_ld16u
:
1845 tcg_out_qemu_ld(s
, args
, LD_UINT16
);
1847 case INDEX_op_qemu_ld16s
:
1848 tcg_out_qemu_ld(s
, args
, LD_INT16
);
1850 case INDEX_op_qemu_ld32
:
1851 /* ??? Technically we can use a non-extending instruction. */
1852 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
1854 case INDEX_op_qemu_ld64
:
1855 tcg_out_qemu_ld(s
, args
, LD_UINT64
);
1858 case INDEX_op_qemu_st8
:
1859 tcg_out_qemu_st(s
, args
, LD_UINT8
);
1861 case INDEX_op_qemu_st16
:
1862 tcg_out_qemu_st(s
, args
, LD_UINT16
);
1864 case INDEX_op_qemu_st32
:
1865 tcg_out_qemu_st(s
, args
, LD_UINT32
);
1867 case INDEX_op_qemu_st64
:
1868 tcg_out_qemu_st(s
, args
, LD_UINT64
);
1871 case INDEX_op_mov_i64
:
1872 tcg_out_mov(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1874 case INDEX_op_movi_i64
:
1875 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1878 case INDEX_op_ld16s_i64
:
1879 tcg_out_mem(s
, 0, RXY_LGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1881 case INDEX_op_ld32u_i64
:
1882 tcg_out_mem(s
, 0, RXY_LLGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1884 case INDEX_op_ld32s_i64
:
1885 tcg_out_mem(s
, 0, RXY_LGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1887 case INDEX_op_ld_i64
:
1888 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1891 case INDEX_op_st32_i64
:
1892 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1894 case INDEX_op_st_i64
:
1895 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1898 case INDEX_op_add_i64
:
1899 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1900 if (const_args
[2]) {
1903 if (a2
== (int16_t)a2
) {
1904 tcg_out_insn(s
, RI
, AGHI
, a0
, a2
);
1907 if (facilities
& FACILITY_EXT_IMM
) {
1908 if (a2
== (int32_t)a2
) {
1909 tcg_out_insn(s
, RIL
, AGFI
, a0
, a2
);
1911 } else if (a2
== (uint32_t)a2
) {
1912 tcg_out_insn(s
, RIL
, ALGFI
, a0
, a2
);
1914 } else if (-a2
== (uint32_t)-a2
) {
1915 tcg_out_insn(s
, RIL
, SLGFI
, a0
, -a2
);
1920 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1921 } else if (a0
== a1
) {
1922 tcg_out_insn(s
, RRE
, AGR
, a0
, a2
);
1924 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1927 case INDEX_op_sub_i64
:
1928 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1929 if (const_args
[2]) {
1933 tcg_out_insn(s
, RRE
, SGR
, args
[0], args
[2]);
1937 case INDEX_op_and_i64
:
1938 if (const_args
[2]) {
1939 tgen_andi(s
, TCG_TYPE_I64
, args
[0], args
[2]);
1941 tcg_out_insn(s
, RRE
, NGR
, args
[0], args
[2]);
1944 case INDEX_op_or_i64
:
1945 if (const_args
[2]) {
1946 tgen64_ori(s
, args
[0], args
[2]);
1948 tcg_out_insn(s
, RRE
, OGR
, args
[0], args
[2]);
1951 case INDEX_op_xor_i64
:
1952 if (const_args
[2]) {
1953 tgen64_xori(s
, args
[0], args
[2]);
1955 tcg_out_insn(s
, RRE
, XGR
, args
[0], args
[2]);
1959 case INDEX_op_neg_i64
:
1960 tcg_out_insn(s
, RRE
, LCGR
, args
[0], args
[1]);
1962 case INDEX_op_bswap64_i64
:
1963 tcg_out_insn(s
, RRE
, LRVGR
, args
[0], args
[1]);
1966 case INDEX_op_mul_i64
:
1967 if (const_args
[2]) {
1968 if (args
[2] == (int16_t)args
[2]) {
1969 tcg_out_insn(s
, RI
, MGHI
, args
[0], args
[2]);
1971 tcg_out_insn(s
, RIL
, MSGFI
, args
[0], args
[2]);
1974 tcg_out_insn(s
, RRE
, MSGR
, args
[0], args
[2]);
1978 case INDEX_op_div2_i64
:
1979 /* ??? We get an unnecessary sign-extension of the dividend
1980 into R3 with this definition, but as we do in fact always
1981 produce both quotient and remainder using INDEX_op_div_i64
1982 instead requires jumping through even more hoops. */
1983 tcg_out_insn(s
, RRE
, DSGR
, TCG_REG_R2
, args
[4]);
1985 case INDEX_op_divu2_i64
:
1986 tcg_out_insn(s
, RRE
, DLGR
, TCG_REG_R2
, args
[4]);
1988 case INDEX_op_mulu2_i64
:
1989 tcg_out_insn(s
, RRE
, MLGR
, TCG_REG_R2
, args
[3]);
1992 case INDEX_op_shl_i64
:
1995 if (const_args
[2]) {
1996 tcg_out_sh64(s
, op
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1998 tcg_out_sh64(s
, op
, args
[0], args
[1], args
[2], 0);
2001 case INDEX_op_shr_i64
:
2004 case INDEX_op_sar_i64
:
2008 case INDEX_op_rotl_i64
:
2009 if (const_args
[2]) {
2010 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2011 TCG_REG_NONE
, args
[2]);
2013 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], args
[2], 0);
2016 case INDEX_op_rotr_i64
:
2017 if (const_args
[2]) {
2018 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2019 TCG_REG_NONE
, (64 - args
[2]) & 63);
2021 /* We can use the smaller 32-bit negate because only the
2022 low 6 bits are examined for the rotate. */
2023 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
2024 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], TCG_TMP0
, 0);
2028 case INDEX_op_ext8s_i64
:
2029 tgen_ext8s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2031 case INDEX_op_ext16s_i64
:
2032 tgen_ext16s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2034 case INDEX_op_ext32s_i64
:
2035 tgen_ext32s(s
, args
[0], args
[1]);
2037 case INDEX_op_ext8u_i64
:
2038 tgen_ext8u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2040 case INDEX_op_ext16u_i64
:
2041 tgen_ext16u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2043 case INDEX_op_ext32u_i64
:
2044 tgen_ext32u(s
, args
[0], args
[1]);
2047 case INDEX_op_add2_i64
:
2048 /* ??? Make use of ALGFI and SLGFI. */
2049 tcg_out_insn(s
, RRE
, ALGR
, args
[0], args
[4]);
2050 tcg_out_insn(s
, RRE
, ALCGR
, args
[1], args
[5]);
2052 case INDEX_op_sub2_i64
:
2053 /* ??? Make use of ALGFI and SLGFI. */
2054 tcg_out_insn(s
, RRE
, SLGR
, args
[0], args
[4]);
2055 tcg_out_insn(s
, RRE
, SLBGR
, args
[1], args
[5]);
2058 case INDEX_op_brcond_i64
:
2059 tgen_brcond(s
, TCG_TYPE_I64
, args
[2], args
[0],
2060 args
[1], const_args
[1], args
[3]);
2062 case INDEX_op_setcond_i64
:
2063 tgen_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1],
2064 args
[2], const_args
[2]);
2066 case INDEX_op_movcond_i64
:
2067 tgen_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1],
2068 args
[2], const_args
[2], args
[3]);
2071 case INDEX_op_qemu_ld32u
:
2072 tcg_out_qemu_ld(s
, args
, LD_UINT32
);
2074 case INDEX_op_qemu_ld32s
:
2075 tcg_out_qemu_ld(s
, args
, LD_INT32
);
2079 tgen_deposit(s
, args
[0], args
[2], args
[3], args
[4]);
2083 fprintf(stderr
,"unimplemented opc 0x%x\n",opc
);
2088 static const TCGTargetOpDef s390_op_defs
[] = {
2089 { INDEX_op_exit_tb
, { } },
2090 { INDEX_op_goto_tb
, { } },
2091 { INDEX_op_call
, { "ri" } },
2092 { INDEX_op_br
, { } },
2094 { INDEX_op_mov_i32
, { "r", "r" } },
2095 { INDEX_op_movi_i32
, { "r" } },
2097 { INDEX_op_ld8u_i32
, { "r", "r" } },
2098 { INDEX_op_ld8s_i32
, { "r", "r" } },
2099 { INDEX_op_ld16u_i32
, { "r", "r" } },
2100 { INDEX_op_ld16s_i32
, { "r", "r" } },
2101 { INDEX_op_ld_i32
, { "r", "r" } },
2102 { INDEX_op_st8_i32
, { "r", "r" } },
2103 { INDEX_op_st16_i32
, { "r", "r" } },
2104 { INDEX_op_st_i32
, { "r", "r" } },
2106 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2107 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
2108 { INDEX_op_mul_i32
, { "r", "0", "rK" } },
2110 { INDEX_op_div2_i32
, { "b", "a", "0", "1", "r" } },
2111 { INDEX_op_divu2_i32
, { "b", "a", "0", "1", "r" } },
2113 { INDEX_op_and_i32
, { "r", "0", "ri" } },
2114 { INDEX_op_or_i32
, { "r", "0", "rWO" } },
2115 { INDEX_op_xor_i32
, { "r", "0", "rWX" } },
2117 { INDEX_op_neg_i32
, { "r", "r" } },
2119 { INDEX_op_shl_i32
, { "r", "0", "Ri" } },
2120 { INDEX_op_shr_i32
, { "r", "0", "Ri" } },
2121 { INDEX_op_sar_i32
, { "r", "0", "Ri" } },
2123 { INDEX_op_rotl_i32
, { "r", "r", "Ri" } },
2124 { INDEX_op_rotr_i32
, { "r", "r", "Ri" } },
2126 { INDEX_op_ext8s_i32
, { "r", "r" } },
2127 { INDEX_op_ext8u_i32
, { "r", "r" } },
2128 { INDEX_op_ext16s_i32
, { "r", "r" } },
2129 { INDEX_op_ext16u_i32
, { "r", "r" } },
2131 { INDEX_op_bswap16_i32
, { "r", "r" } },
2132 { INDEX_op_bswap32_i32
, { "r", "r" } },
2134 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "r", "r" } },
2135 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "r", "r" } },
2137 { INDEX_op_brcond_i32
, { "r", "rWC" } },
2138 { INDEX_op_setcond_i32
, { "r", "r", "rWC" } },
2139 { INDEX_op_movcond_i32
, { "r", "r", "rWC", "r", "0" } },
2140 { INDEX_op_deposit_i32
, { "r", "0", "r" } },
2142 { INDEX_op_qemu_ld8u
, { "r", "L" } },
2143 { INDEX_op_qemu_ld8s
, { "r", "L" } },
2144 { INDEX_op_qemu_ld16u
, { "r", "L" } },
2145 { INDEX_op_qemu_ld16s
, { "r", "L" } },
2146 { INDEX_op_qemu_ld32
, { "r", "L" } },
2147 { INDEX_op_qemu_ld64
, { "r", "L" } },
2149 { INDEX_op_qemu_st8
, { "L", "L" } },
2150 { INDEX_op_qemu_st16
, { "L", "L" } },
2151 { INDEX_op_qemu_st32
, { "L", "L" } },
2152 { INDEX_op_qemu_st64
, { "L", "L" } },
2154 { INDEX_op_mov_i64
, { "r", "r" } },
2155 { INDEX_op_movi_i64
, { "r" } },
2157 { INDEX_op_ld8u_i64
, { "r", "r" } },
2158 { INDEX_op_ld8s_i64
, { "r", "r" } },
2159 { INDEX_op_ld16u_i64
, { "r", "r" } },
2160 { INDEX_op_ld16s_i64
, { "r", "r" } },
2161 { INDEX_op_ld32u_i64
, { "r", "r" } },
2162 { INDEX_op_ld32s_i64
, { "r", "r" } },
2163 { INDEX_op_ld_i64
, { "r", "r" } },
2165 { INDEX_op_st8_i64
, { "r", "r" } },
2166 { INDEX_op_st16_i64
, { "r", "r" } },
2167 { INDEX_op_st32_i64
, { "r", "r" } },
2168 { INDEX_op_st_i64
, { "r", "r" } },
2170 { INDEX_op_add_i64
, { "r", "r", "ri" } },
2171 { INDEX_op_sub_i64
, { "r", "0", "ri" } },
2172 { INDEX_op_mul_i64
, { "r", "0", "rK" } },
2174 { INDEX_op_div2_i64
, { "b", "a", "0", "1", "r" } },
2175 { INDEX_op_divu2_i64
, { "b", "a", "0", "1", "r" } },
2176 { INDEX_op_mulu2_i64
, { "b", "a", "0", "r" } },
2178 { INDEX_op_and_i64
, { "r", "0", "ri" } },
2179 { INDEX_op_or_i64
, { "r", "0", "rO" } },
2180 { INDEX_op_xor_i64
, { "r", "0", "rX" } },
2182 { INDEX_op_neg_i64
, { "r", "r" } },
2184 { INDEX_op_shl_i64
, { "r", "r", "Ri" } },
2185 { INDEX_op_shr_i64
, { "r", "r", "Ri" } },
2186 { INDEX_op_sar_i64
, { "r", "r", "Ri" } },
2188 { INDEX_op_rotl_i64
, { "r", "r", "Ri" } },
2189 { INDEX_op_rotr_i64
, { "r", "r", "Ri" } },
2191 { INDEX_op_ext8s_i64
, { "r", "r" } },
2192 { INDEX_op_ext8u_i64
, { "r", "r" } },
2193 { INDEX_op_ext16s_i64
, { "r", "r" } },
2194 { INDEX_op_ext16u_i64
, { "r", "r" } },
2195 { INDEX_op_ext32s_i64
, { "r", "r" } },
2196 { INDEX_op_ext32u_i64
, { "r", "r" } },
2198 { INDEX_op_bswap16_i64
, { "r", "r" } },
2199 { INDEX_op_bswap32_i64
, { "r", "r" } },
2200 { INDEX_op_bswap64_i64
, { "r", "r" } },
2202 { INDEX_op_add2_i64
, { "r", "r", "0", "1", "r", "r" } },
2203 { INDEX_op_sub2_i64
, { "r", "r", "0", "1", "r", "r" } },
2205 { INDEX_op_brcond_i64
, { "r", "rC" } },
2206 { INDEX_op_setcond_i64
, { "r", "r", "rC" } },
2207 { INDEX_op_movcond_i64
, { "r", "r", "rC", "r", "0" } },
2208 { INDEX_op_deposit_i64
, { "r", "0", "r" } },
2210 { INDEX_op_qemu_ld32u
, { "r", "L" } },
2211 { INDEX_op_qemu_ld32s
, { "r", "L" } },
2216 /* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2217 this information. However, getting at that entry is not easy this far
2218 away from main. Our options are: start searching from environ, but
2219 that fails as soon as someone does a setenv in between. Read the data
2220 from /proc/self/auxv. Or do the probing ourselves. The only thing
2221 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2222 that the kernel saves all 64-bits of the registers around traps while
2223 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2224 back and see from when this might not be true). */
2228 static volatile sig_atomic_t got_sigill
;
2230 static void sigill_handler(int sig
)
2235 static void query_facilities(void)
2237 struct sigaction sa_old
, sa_new
;
2238 register int r0
__asm__("0");
2239 register void *r1
__asm__("1");
2242 memset(&sa_new
, 0, sizeof(sa_new
));
2243 sa_new
.sa_handler
= sigill_handler
;
2244 sigaction(SIGILL
, &sa_new
, &sa_old
);
2246 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2247 we need not do any more probing. Unfortunately, this itself is an
2248 extension and the original STORE FACILITY LIST instruction is
2249 kernel-only, storing its results at absolute address 200. */
2252 asm volatile(".word 0xb2b0,0x1000"
2253 : "=r"(r0
) : "0"(0), "r"(r1
) : "memory", "cc");
2256 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2257 kind of instruction that we're interested in. */
2258 /* ??? Possibly some of these are in practice never present unless
2259 the store-facility-extended facility is also present. But since
2260 that isn't documented it's just better to probe for each. */
2262 /* Test for z/Architecture. Required even in 31-bit mode. */
2265 asm volatile(".word 0xb908,0x0000" : "=r"(r0
) : : "cc");
2267 facilities
|= FACILITY_ZARCH_ACTIVE
;
2270 /* Test for long displacement. */
2274 asm volatile(".word 0xe300,0x1000,0x0058"
2275 : "=r"(r0
) : "r"(r1
) : "cc");
2277 facilities
|= FACILITY_LONG_DISP
;
2280 /* Test for extended immediates. */
2283 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2285 facilities
|= FACILITY_EXT_IMM
;
2288 /* Test for general-instructions-extension. */
2291 asm volatile(".word 0xc201,0x0000,0x0001");
2293 facilities
|= FACILITY_GEN_INST_EXT
;
2297 sigaction(SIGILL
, &sa_old
, NULL
);
2299 /* The translator currently uses these extensions unconditionally.
2300 Pruning this back to the base ESA/390 architecture doesn't seem
2301 worthwhile, since even the KVM target requires z/Arch. */
2303 if ((facilities
& FACILITY_ZARCH_ACTIVE
) == 0) {
2304 fprintf(stderr
, "TCG: z/Arch facility is required.\n");
2305 fprintf(stderr
, "TCG: Boot with a 64-bit enabled kernel.\n");
2308 if ((facilities
& FACILITY_LONG_DISP
) == 0) {
2309 fprintf(stderr
, "TCG: long-displacement facility is required.\n");
2313 /* So far there's just enough support for 31-bit mode to let the
2314 compile succeed. This is good enough to run QEMU with KVM. */
2315 if (sizeof(void *) != 8) {
2316 fprintf(stderr
, "TCG: 31-bit mode is not supported.\n");
2325 static void tcg_target_init(TCGContext
*s
)
2327 #if !defined(CONFIG_USER_ONLY)
2329 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
)) {
2336 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2337 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
2339 tcg_regset_clear(tcg_target_call_clobber_regs
);
2340 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2341 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2342 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2343 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2344 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
2345 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
2346 /* The return register can be considered call-clobbered. */
2347 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2349 tcg_regset_clear(s
->reserved_regs
);
2350 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
);
2351 /* XXX many insns can't be used with R0, so we better avoid it for now */
2352 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
);
2353 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2355 tcg_add_target_add_op_defs(s390_op_defs
);
2358 static void tcg_target_qemu_prologue(TCGContext
*s
)
2360 tcg_target_long frame_size
;
2362 /* stmg %r6,%r15,48(%r15) (save registers) */
2363 tcg_out_insn(s
, RXY
, STMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 48);
2365 /* aghi %r15,-frame_size */
2366 frame_size
= TCG_TARGET_CALL_STACK_OFFSET
;
2367 frame_size
+= TCG_STATIC_CALL_ARGS_SIZE
;
2368 frame_size
+= CPU_TEMP_BUF_NLONGS
* sizeof(long);
2369 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_R15
, -frame_size
);
2371 tcg_set_frame(s
, TCG_REG_CALL_STACK
,
2372 TCG_STATIC_CALL_ARGS_SIZE
+ TCG_TARGET_CALL_STACK_OFFSET
,
2373 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2375 if (GUEST_BASE
>= 0x80000) {
2376 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
2377 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2380 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2381 /* br %r3 (go to TB) */
2382 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, tcg_target_call_iarg_regs
[1]);
2384 tb_ret_addr
= s
->code_ptr
;
2386 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2387 tcg_out_insn(s
, RXY
, LMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
,
2390 /* br %r14 (return) */
2391 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_R14
);