2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
9 * Based on i386/tcg-target.c and mips/tcg-target.c
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 #include "../tcg-pool.inc.c"
32 #ifdef CONFIG_DEBUG_TCG
33 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
69 static const int tcg_target_reg_alloc_order
[] = {
70 /* Call saved registers */
71 /* TCG_REG_S0 reservered for TCG_AREG0 */
84 /* Call clobbered registers */
93 /* Argument registers */
104 static const int tcg_target_call_iarg_regs
[] = {
115 static const int tcg_target_call_oarg_regs
[] = {
120 #define TCG_CT_CONST_ZERO 0x100
121 #define TCG_CT_CONST_S12 0x200
122 #define TCG_CT_CONST_N12 0x400
123 #define TCG_CT_CONST_M12 0x800
125 static inline tcg_target_long
sextreg(tcg_target_long val
, int pos
, int len
)
127 if (TCG_TARGET_REG_BITS
== 32) {
128 return sextract32(val
, pos
, len
);
130 return sextract64(val
, pos
, len
);
134 /* parse target specific constraints */
135 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
136 const char *ct_str
, TCGType type
)
140 ct
->ct
|= TCG_CT_REG
;
141 ct
->u
.regs
= 0xffffffff;
144 /* qemu_ld/qemu_st constraint */
145 ct
->ct
|= TCG_CT_REG
;
146 ct
->u
.regs
= 0xffffffff;
147 /* qemu_ld/qemu_st uses TCG_REG_TMP0 */
148 #if defined(CONFIG_SOFTMMU)
149 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[0]);
150 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[1]);
151 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[2]);
152 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[3]);
153 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[4]);
157 ct
->ct
|= TCG_CT_CONST_S12
;
160 ct
->ct
|= TCG_CT_CONST_N12
;
163 ct
->ct
|= TCG_CT_CONST_M12
;
166 /* we can use a zero immediate as a zero register argument. */
167 ct
->ct
|= TCG_CT_CONST_ZERO
;
175 /* test if a constant matches the constraint */
176 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
177 const TCGArgConstraint
*arg_ct
)
180 if (ct
& TCG_CT_CONST
) {
183 if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
186 if ((ct
& TCG_CT_CONST_S12
) && val
== sextreg(val
, 0, 12)) {
189 if ((ct
& TCG_CT_CONST_N12
) && -val
== sextreg(-val
, 0, 12)) {
192 if ((ct
& TCG_CT_CONST_M12
) && val
>= -0xfff && val
<= 0xfff) {
199 * RISC-V Base ISA opcodes (IM)
215 OPC_DIVU
= 0x2005033,
227 OPC_MULH
= 0x2001033,
228 OPC_MULHSU
= 0x2002033,
229 OPC_MULHU
= 0x2003033,
233 OPC_REMU
= 0x2007033,
243 OPC_SRA
= 0x40005033,
244 OPC_SRAI
= 0x40005013,
247 OPC_SUB
= 0x40000033,
252 #if TCG_TARGET_REG_BITS == 64
255 OPC_DIVUW
= 0x200503b,
256 OPC_DIVW
= 0x200403b,
257 OPC_MULW
= 0x200003b,
258 OPC_REMUW
= 0x200703b,
259 OPC_REMW
= 0x200603b,
262 OPC_SRAIW
= 0x4000501b,
263 OPC_SRAW
= 0x4000503b,
266 OPC_SUBW
= 0x4000003b,
268 /* Simplify code throughout by defining aliases for RV32. */
269 OPC_ADDIW
= OPC_ADDI
,
271 OPC_DIVUW
= OPC_DIVU
,
274 OPC_REMUW
= OPC_REMU
,
276 OPC_SLLIW
= OPC_SLLI
,
278 OPC_SRAIW
= OPC_SRAI
,
280 OPC_SRLIW
= OPC_SRLI
,
285 OPC_FENCE
= 0x0000000f,
289 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
294 static int32_t encode_r(RISCVInsn opc
, TCGReg rd
, TCGReg rs1
, TCGReg rs2
)
296 return opc
| (rd
& 0x1f) << 7 | (rs1
& 0x1f) << 15 | (rs2
& 0x1f) << 20;
301 static int32_t encode_imm12(uint32_t imm
)
303 return (imm
& 0xfff) << 20;
306 static int32_t encode_i(RISCVInsn opc
, TCGReg rd
, TCGReg rs1
, uint32_t imm
)
308 return opc
| (rd
& 0x1f) << 7 | (rs1
& 0x1f) << 15 | encode_imm12(imm
);
313 static int32_t encode_simm12(uint32_t imm
)
317 ret
|= (imm
& 0xFE0) << 20;
318 ret
|= (imm
& 0x1F) << 7;
323 static int32_t encode_s(RISCVInsn opc
, TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
325 return opc
| (rs1
& 0x1f) << 15 | (rs2
& 0x1f) << 20 | encode_simm12(imm
);
330 static int32_t encode_sbimm12(uint32_t imm
)
334 ret
|= (imm
& 0x1000) << 19;
335 ret
|= (imm
& 0x7e0) << 20;
336 ret
|= (imm
& 0x1e) << 7;
337 ret
|= (imm
& 0x800) >> 4;
342 static int32_t encode_sb(RISCVInsn opc
, TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
344 return opc
| (rs1
& 0x1f) << 15 | (rs2
& 0x1f) << 20 | encode_sbimm12(imm
);
349 static int32_t encode_uimm20(uint32_t imm
)
351 return imm
& 0xfffff000;
354 static int32_t encode_u(RISCVInsn opc
, TCGReg rd
, uint32_t imm
)
356 return opc
| (rd
& 0x1f) << 7 | encode_uimm20(imm
);
361 static int32_t encode_ujimm20(uint32_t imm
)
365 ret
|= (imm
& 0x0007fe) << (21 - 1);
366 ret
|= (imm
& 0x000800) << (20 - 11);
367 ret
|= (imm
& 0x0ff000) << (12 - 12);
368 ret
|= (imm
& 0x100000) << (31 - 20);
373 static int32_t encode_uj(RISCVInsn opc
, TCGReg rd
, uint32_t imm
)
375 return opc
| (rd
& 0x1f) << 7 | encode_ujimm20(imm
);
379 * RISC-V instruction emitters
382 static void tcg_out_opc_reg(TCGContext
*s
, RISCVInsn opc
,
383 TCGReg rd
, TCGReg rs1
, TCGReg rs2
)
385 tcg_out32(s
, encode_r(opc
, rd
, rs1
, rs2
));
388 static void tcg_out_opc_imm(TCGContext
*s
, RISCVInsn opc
,
389 TCGReg rd
, TCGReg rs1
, TCGArg imm
)
391 tcg_out32(s
, encode_i(opc
, rd
, rs1
, imm
));
394 static void tcg_out_opc_store(TCGContext
*s
, RISCVInsn opc
,
395 TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
397 tcg_out32(s
, encode_s(opc
, rs1
, rs2
, imm
));
400 static void tcg_out_opc_branch(TCGContext
*s
, RISCVInsn opc
,
401 TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
403 tcg_out32(s
, encode_sb(opc
, rs1
, rs2
, imm
));
406 static void tcg_out_opc_upper(TCGContext
*s
, RISCVInsn opc
,
407 TCGReg rd
, uint32_t imm
)
409 tcg_out32(s
, encode_u(opc
, rd
, imm
));
412 static void tcg_out_opc_jump(TCGContext
*s
, RISCVInsn opc
,
413 TCGReg rd
, uint32_t imm
)
415 tcg_out32(s
, encode_uj(opc
, rd
, imm
));
418 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
421 for (i
= 0; i
< count
; ++i
) {
422 p
[i
] = encode_i(OPC_ADDI
, TCG_REG_ZERO
, TCG_REG_ZERO
, 0);
430 static bool reloc_sbimm12(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
432 intptr_t offset
= (intptr_t)target
- (intptr_t)code_ptr
;
434 if (offset
== sextreg(offset
, 1, 12) << 1) {
435 code_ptr
[0] |= encode_sbimm12(offset
);
442 static bool reloc_jimm20(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
444 intptr_t offset
= (intptr_t)target
- (intptr_t)code_ptr
;
446 if (offset
== sextreg(offset
, 1, 20) << 1) {
447 code_ptr
[0] |= encode_ujimm20(offset
);
454 static bool reloc_call(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
456 intptr_t offset
= (intptr_t)target
- (intptr_t)code_ptr
;
457 int32_t lo
= sextreg(offset
, 0, 12);
458 int32_t hi
= offset
- lo
;
460 if (offset
== hi
+ lo
) {
461 code_ptr
[0] |= encode_uimm20(hi
);
462 code_ptr
[1] |= encode_imm12(lo
);
469 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
470 intptr_t value
, intptr_t addend
)
472 uint32_t insn
= *code_ptr
;
476 tcg_debug_assert(addend
== 0);
480 diff
= value
- (uintptr_t)code_ptr
;
481 short_jmp
= diff
== sextreg(diff
, 0, 12);
483 return reloc_sbimm12(code_ptr
, (tcg_insn_unit
*)value
);
485 /* Invert the condition */
486 insn
= insn
^ (1 << 12);
487 /* Clear the offset */
489 /* Set the offset to the PC + 8 */
490 insn
|= encode_sbimm12(8);
495 /* Overwrite the NOP with jal x0,value */
496 diff
= value
- (uintptr_t)(code_ptr
+ 1);
497 insn
= encode_uj(OPC_JAL
, TCG_REG_ZERO
, diff
);
504 return reloc_jimm20(code_ptr
, (tcg_insn_unit
*)value
);
506 return reloc_call(code_ptr
, (tcg_insn_unit
*)value
);
516 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
524 tcg_out_opc_imm(s
, OPC_ADDI
, ret
, arg
, 0);
527 g_assert_not_reached();
532 static void tcg_out_movi(TCGContext
*s
, TCGType type
, TCGReg rd
,
535 tcg_target_long lo
, hi
, tmp
;
538 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
542 lo
= sextreg(val
, 0, 12);
544 tcg_out_opc_imm(s
, OPC_ADDI
, rd
, TCG_REG_ZERO
, lo
);
549 if (TCG_TARGET_REG_BITS
== 32 || val
== (int32_t)val
) {
550 tcg_out_opc_upper(s
, OPC_LUI
, rd
, hi
);
552 tcg_out_opc_imm(s
, OPC_ADDIW
, rd
, rd
, lo
);
557 /* We can only be here if TCG_TARGET_REG_BITS != 32 */
558 tmp
= tcg_pcrel_diff(s
, (void *)val
);
559 if (tmp
== (int32_t)tmp
) {
560 tcg_out_opc_upper(s
, OPC_AUIPC
, rd
, 0);
561 tcg_out_opc_imm(s
, OPC_ADDI
, rd
, rd
, 0);
562 ret
= reloc_call(s
->code_ptr
- 2, (tcg_insn_unit
*)val
);
563 tcg_debug_assert(ret
== true);
567 /* Look for a single 20-bit section. */
570 if (tmp
== sextreg(tmp
, 0, 20)) {
571 tcg_out_opc_upper(s
, OPC_LUI
, rd
, tmp
<< 12);
573 tcg_out_opc_imm(s
, OPC_SLLI
, rd
, rd
, shift
- 12);
575 tcg_out_opc_imm(s
, OPC_SRAI
, rd
, rd
, 12 - shift
);
580 /* Look for a few high zero bits, with lots of bits set in the middle. */
583 if (tmp
== sextreg(tmp
, 12, 20) << 12) {
584 tcg_out_opc_upper(s
, OPC_LUI
, rd
, tmp
);
585 tcg_out_opc_imm(s
, OPC_SRLI
, rd
, rd
, shift
);
587 } else if (tmp
== sextreg(tmp
, 0, 12)) {
588 tcg_out_opc_imm(s
, OPC_ADDI
, rd
, TCG_REG_ZERO
, tmp
);
589 tcg_out_opc_imm(s
, OPC_SRLI
, rd
, rd
, shift
);
593 /* Drop into the constant pool. */
594 new_pool_label(s
, val
, R_RISCV_CALL
, s
->code_ptr
, 0);
595 tcg_out_opc_upper(s
, OPC_AUIPC
, rd
, 0);
596 tcg_out_opc_imm(s
, OPC_LD
, rd
, rd
, 0);
599 static void tcg_out_ext8u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
601 tcg_out_opc_imm(s
, OPC_ANDI
, ret
, arg
, 0xff);
604 static void tcg_out_ext16u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
606 tcg_out_opc_imm(s
, OPC_SLLIW
, ret
, arg
, 16);
607 tcg_out_opc_imm(s
, OPC_SRLIW
, ret
, ret
, 16);
610 static void tcg_out_ext32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
612 tcg_out_opc_imm(s
, OPC_SLLI
, ret
, arg
, 32);
613 tcg_out_opc_imm(s
, OPC_SRLI
, ret
, ret
, 32);
616 static void tcg_out_ext8s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
618 tcg_out_opc_imm(s
, OPC_SLLIW
, ret
, arg
, 24);
619 tcg_out_opc_imm(s
, OPC_SRAIW
, ret
, ret
, 24);
622 static void tcg_out_ext16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
624 tcg_out_opc_imm(s
, OPC_SLLIW
, ret
, arg
, 16);
625 tcg_out_opc_imm(s
, OPC_SRAIW
, ret
, ret
, 16);
628 static void tcg_out_ext32s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
630 tcg_out_opc_imm(s
, OPC_ADDIW
, ret
, arg
, 0);
633 static void tcg_out_ldst(TCGContext
*s
, RISCVInsn opc
, TCGReg data
,
634 TCGReg addr
, intptr_t offset
)
636 intptr_t imm12
= sextreg(offset
, 0, 12);
638 if (offset
!= imm12
) {
639 intptr_t diff
= offset
- (uintptr_t)s
->code_ptr
;
641 if (addr
== TCG_REG_ZERO
&& diff
== (int32_t)diff
) {
642 imm12
= sextreg(diff
, 0, 12);
643 tcg_out_opc_upper(s
, OPC_AUIPC
, TCG_REG_TMP2
, diff
- imm12
);
645 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP2
, offset
- imm12
);
646 if (addr
!= TCG_REG_ZERO
) {
647 tcg_out_opc_reg(s
, OPC_ADD
, TCG_REG_TMP2
, TCG_REG_TMP2
, addr
);
658 tcg_out_opc_store(s
, opc
, addr
, data
, imm12
);
667 tcg_out_opc_imm(s
, opc
, data
, addr
, imm12
);
670 g_assert_not_reached();
674 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
675 TCGReg arg1
, intptr_t arg2
)
677 bool is32bit
= (TCG_TARGET_REG_BITS
== 32 || type
== TCG_TYPE_I32
);
678 tcg_out_ldst(s
, is32bit
? OPC_LW
: OPC_LD
, arg
, arg1
, arg2
);
681 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
682 TCGReg arg1
, intptr_t arg2
)
684 bool is32bit
= (TCG_TARGET_REG_BITS
== 32 || type
== TCG_TYPE_I32
);
685 tcg_out_ldst(s
, is32bit
? OPC_SW
: OPC_SD
, arg
, arg1
, arg2
);
688 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
689 TCGReg base
, intptr_t ofs
)
692 tcg_out_st(s
, type
, TCG_REG_ZERO
, base
, ofs
);
698 static void tcg_out_addsub2(TCGContext
*s
,
699 TCGReg rl
, TCGReg rh
,
700 TCGReg al
, TCGReg ah
,
701 TCGArg bl
, TCGArg bh
,
702 bool cbl
, bool cbh
, bool is_sub
, bool is32bit
)
704 const RISCVInsn opc_add
= is32bit
? OPC_ADDW
: OPC_ADD
;
705 const RISCVInsn opc_addi
= is32bit
? OPC_ADDIW
: OPC_ADDI
;
706 const RISCVInsn opc_sub
= is32bit
? OPC_SUBW
: OPC_SUB
;
707 TCGReg th
= TCG_REG_TMP1
;
709 /* If we have a negative constant such that negating it would
710 make the high part zero, we can (usually) eliminate one insn. */
711 if (cbl
&& cbh
&& bh
== -1 && bl
!= 0) {
717 /* By operating on the high part first, we get to use the final
718 carry operation to move back from the temporary. */
720 tcg_out_opc_reg(s
, (is_sub
? opc_sub
: opc_add
), th
, ah
, bh
);
721 } else if (bh
!= 0 || ah
== rl
) {
722 tcg_out_opc_imm(s
, opc_addi
, th
, ah
, (is_sub
? -bh
: bh
));
727 /* Note that tcg optimization should eliminate the bl == 0 case. */
730 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_REG_TMP0
, al
, bl
);
731 tcg_out_opc_imm(s
, opc_addi
, rl
, al
, -bl
);
733 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_TMP0
, al
, bl
);
734 tcg_out_opc_reg(s
, opc_sub
, rl
, al
, bl
);
736 tcg_out_opc_reg(s
, opc_sub
, rh
, th
, TCG_REG_TMP0
);
739 tcg_out_opc_imm(s
, opc_addi
, rl
, al
, bl
);
740 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_REG_TMP0
, rl
, bl
);
741 } else if (rl
== al
&& rl
== bl
) {
742 tcg_out_opc_imm(s
, OPC_SLTI
, TCG_REG_TMP0
, al
, 0);
743 tcg_out_opc_reg(s
, opc_addi
, rl
, al
, bl
);
745 tcg_out_opc_reg(s
, opc_add
, rl
, al
, bl
);
746 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_TMP0
,
747 rl
, (rl
== bl
? al
: bl
));
749 tcg_out_opc_reg(s
, opc_add
, rh
, th
, TCG_REG_TMP0
);
753 static const struct {
756 } tcg_brcond_to_riscv
[] = {
757 [TCG_COND_EQ
] = { OPC_BEQ
, false },
758 [TCG_COND_NE
] = { OPC_BNE
, false },
759 [TCG_COND_LT
] = { OPC_BLT
, false },
760 [TCG_COND_GE
] = { OPC_BGE
, false },
761 [TCG_COND_LE
] = { OPC_BGE
, true },
762 [TCG_COND_GT
] = { OPC_BLT
, true },
763 [TCG_COND_LTU
] = { OPC_BLTU
, false },
764 [TCG_COND_GEU
] = { OPC_BGEU
, false },
765 [TCG_COND_LEU
] = { OPC_BGEU
, true },
766 [TCG_COND_GTU
] = { OPC_BLTU
, true }
769 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
, TCGReg arg1
,
770 TCGReg arg2
, TCGLabel
*l
)
772 RISCVInsn op
= tcg_brcond_to_riscv
[cond
].op
;
774 tcg_debug_assert(op
!= 0);
776 if (tcg_brcond_to_riscv
[cond
].swap
) {
783 intptr_t diff
= tcg_pcrel_diff(s
, l
->u
.value_ptr
);
784 if (diff
== sextreg(diff
, 0, 12)) {
785 tcg_out_opc_branch(s
, op
, arg1
, arg2
, diff
);
787 /* Invert the conditional branch. */
788 tcg_out_opc_branch(s
, op
^ (1 << 12), arg1
, arg2
, 8);
789 tcg_out_opc_jump(s
, OPC_JAL
, TCG_REG_ZERO
, diff
- 4);
792 tcg_out_reloc(s
, s
->code_ptr
, R_RISCV_BRANCH
, l
, 0);
793 tcg_out_opc_branch(s
, op
, arg1
, arg2
, 0);
794 /* NOP to allow patching later */
795 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_ZERO
, TCG_REG_ZERO
, 0);
799 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
800 TCGReg arg1
, TCGReg arg2
)
804 tcg_out_opc_reg(s
, OPC_SUB
, ret
, arg1
, arg2
);
805 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, ret
, 1);
808 tcg_out_opc_reg(s
, OPC_SUB
, ret
, arg1
, arg2
);
809 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, ret
);
812 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
815 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
816 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
819 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
820 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
823 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
826 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
829 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
830 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
833 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
834 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
837 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
840 g_assert_not_reached();
845 static void tcg_out_brcond2(TCGContext
*s
, TCGCond cond
, TCGReg al
, TCGReg ah
,
846 TCGReg bl
, TCGReg bh
, TCGLabel
*l
)
849 g_assert_not_reached();
852 static void tcg_out_setcond2(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
853 TCGReg al
, TCGReg ah
, TCGReg bl
, TCGReg bh
)
856 g_assert_not_reached();
859 static inline void tcg_out_goto(TCGContext
*s
, tcg_insn_unit
*target
)
861 ptrdiff_t offset
= tcg_pcrel_diff(s
, target
);
862 tcg_debug_assert(offset
== sextreg(offset
, 1, 20) << 1);
863 tcg_out_opc_jump(s
, OPC_JAL
, TCG_REG_ZERO
, offset
);
866 static void tcg_out_call_int(TCGContext
*s
, tcg_insn_unit
*arg
, bool tail
)
868 TCGReg link
= tail
? TCG_REG_ZERO
: TCG_REG_RA
;
869 ptrdiff_t offset
= tcg_pcrel_diff(s
, arg
);
872 if (offset
== sextreg(offset
, 1, 20) << 1) {
873 /* short jump: -2097150 to 2097152 */
874 tcg_out_opc_jump(s
, OPC_JAL
, link
, offset
);
875 } else if (TCG_TARGET_REG_BITS
== 32 ||
876 offset
== sextreg(offset
, 1, 31) << 1) {
877 /* long jump: -2147483646 to 2147483648 */
878 tcg_out_opc_upper(s
, OPC_AUIPC
, TCG_REG_TMP0
, 0);
879 tcg_out_opc_imm(s
, OPC_JALR
, link
, TCG_REG_TMP0
, 0);
880 ret
= reloc_call(s
->code_ptr
- 2, arg
);\
881 tcg_debug_assert(ret
== true);
882 } else if (TCG_TARGET_REG_BITS
== 64) {
883 /* far jump: 64-bit */
884 tcg_target_long imm
= sextreg((tcg_target_long
)arg
, 0, 12);
885 tcg_target_long base
= (tcg_target_long
)arg
- imm
;
886 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP0
, base
);
887 tcg_out_opc_imm(s
, OPC_JALR
, link
, TCG_REG_TMP0
, imm
);
889 g_assert_not_reached();
893 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*arg
)
895 tcg_out_call_int(s
, arg
, false);
898 static void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
900 tcg_insn_unit insn
= OPC_FENCE
;
902 if (a0
& TCG_MO_LD_LD
) {
905 if (a0
& TCG_MO_ST_LD
) {
908 if (a0
& TCG_MO_LD_ST
) {
911 if (a0
& TCG_MO_ST_ST
) {
921 #if defined(CONFIG_SOFTMMU)
922 #include "../tcg-ldst.inc.c"
924 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
925 * TCGMemOpIdx oi, uintptr_t ra)
927 static void * const qemu_ld_helpers
[16] = {
928 [MO_UB
] = helper_ret_ldub_mmu
,
929 [MO_SB
] = helper_ret_ldsb_mmu
,
930 [MO_LEUW
] = helper_le_lduw_mmu
,
931 [MO_LESW
] = helper_le_ldsw_mmu
,
932 [MO_LEUL
] = helper_le_ldul_mmu
,
933 #if TCG_TARGET_REG_BITS == 64
934 [MO_LESL
] = helper_le_ldsl_mmu
,
936 [MO_LEQ
] = helper_le_ldq_mmu
,
937 [MO_BEUW
] = helper_be_lduw_mmu
,
938 [MO_BESW
] = helper_be_ldsw_mmu
,
939 [MO_BEUL
] = helper_be_ldul_mmu
,
940 #if TCG_TARGET_REG_BITS == 64
941 [MO_BESL
] = helper_be_ldsl_mmu
,
943 [MO_BEQ
] = helper_be_ldq_mmu
,
946 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
947 * uintxx_t val, TCGMemOpIdx oi,
950 static void * const qemu_st_helpers
[16] = {
951 [MO_UB
] = helper_ret_stb_mmu
,
952 [MO_LEUW
] = helper_le_stw_mmu
,
953 [MO_LEUL
] = helper_le_stl_mmu
,
954 [MO_LEQ
] = helper_le_stq_mmu
,
955 [MO_BEUW
] = helper_be_stw_mmu
,
956 [MO_BEUL
] = helper_be_stl_mmu
,
957 [MO_BEQ
] = helper_be_stq_mmu
,
960 /* We don't support oversize guests */
961 QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
);
963 /* We expect to use a 12-bit negative offset from ENV. */
964 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
965 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
967 static void tcg_out_tlb_load(TCGContext
*s
, TCGReg addrl
,
968 TCGReg addrh
, TCGMemOpIdx oi
,
969 tcg_insn_unit
**label_ptr
, bool is_load
)
971 MemOp opc
= get_memop(oi
);
972 unsigned s_bits
= opc
& MO_SIZE
;
973 unsigned a_bits
= get_alignment_bits(opc
);
974 tcg_target_long compare_mask
;
975 int mem_index
= get_mmuidx(oi
);
976 int fast_ofs
= TLB_MASK_TABLE_OFS(mem_index
);
977 int mask_ofs
= fast_ofs
+ offsetof(CPUTLBDescFast
, mask
);
978 int table_ofs
= fast_ofs
+ offsetof(CPUTLBDescFast
, table
);
979 TCGReg mask_base
= TCG_AREG0
, table_base
= TCG_AREG0
;
981 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP0
, mask_base
, mask_ofs
);
982 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, table_base
, table_ofs
);
984 tcg_out_opc_imm(s
, OPC_SRLI
, TCG_REG_TMP2
, addrl
,
985 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
986 tcg_out_opc_reg(s
, OPC_AND
, TCG_REG_TMP2
, TCG_REG_TMP2
, TCG_REG_TMP0
);
987 tcg_out_opc_reg(s
, OPC_ADD
, TCG_REG_TMP2
, TCG_REG_TMP2
, TCG_REG_TMP1
);
989 /* Load the tlb comparator and the addend. */
990 tcg_out_ld(s
, TCG_TYPE_TL
, TCG_REG_TMP0
, TCG_REG_TMP2
,
991 is_load
? offsetof(CPUTLBEntry
, addr_read
)
992 : offsetof(CPUTLBEntry
, addr_write
));
993 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP2
, TCG_REG_TMP2
,
994 offsetof(CPUTLBEntry
, addend
));
996 /* We don't support unaligned accesses. */
997 if (a_bits
< s_bits
) {
1000 /* Clear the non-page, non-alignment bits from the address. */
1001 compare_mask
= (tcg_target_long
)TARGET_PAGE_MASK
| ((1 << a_bits
) - 1);
1002 if (compare_mask
== sextreg(compare_mask
, 0, 12)) {
1003 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_TMP1
, addrl
, compare_mask
);
1005 tcg_out_movi(s
, TCG_TYPE_TL
, TCG_REG_TMP1
, compare_mask
);
1006 tcg_out_opc_reg(s
, OPC_AND
, TCG_REG_TMP1
, TCG_REG_TMP1
, addrl
);
1009 /* Compare masked address with the TLB entry. */
1010 label_ptr
[0] = s
->code_ptr
;
1011 tcg_out_opc_branch(s
, OPC_BNE
, TCG_REG_TMP0
, TCG_REG_TMP1
, 0);
1012 /* NOP to allow patching later */
1013 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_ZERO
, TCG_REG_ZERO
, 0);
1015 /* TLB Hit - translate address using addend. */
1016 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1017 tcg_out_ext32u(s
, TCG_REG_TMP0
, addrl
);
1018 addrl
= TCG_REG_TMP0
;
1020 tcg_out_opc_reg(s
, OPC_ADD
, TCG_REG_TMP0
, TCG_REG_TMP2
, addrl
);
1023 static void add_qemu_ldst_label(TCGContext
*s
, int is_ld
, TCGMemOpIdx oi
,
1025 TCGReg datalo
, TCGReg datahi
,
1026 TCGReg addrlo
, TCGReg addrhi
,
1027 void *raddr
, tcg_insn_unit
**label_ptr
)
1029 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1031 label
->is_ld
= is_ld
;
1034 label
->datalo_reg
= datalo
;
1035 label
->datahi_reg
= datahi
;
1036 label
->addrlo_reg
= addrlo
;
1037 label
->addrhi_reg
= addrhi
;
1038 label
->raddr
= raddr
;
1039 label
->label_ptr
[0] = label_ptr
[0];
1042 static bool tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1044 TCGMemOpIdx oi
= l
->oi
;
1045 MemOp opc
= get_memop(oi
);
1046 TCGReg a0
= tcg_target_call_iarg_regs
[0];
1047 TCGReg a1
= tcg_target_call_iarg_regs
[1];
1048 TCGReg a2
= tcg_target_call_iarg_regs
[2];
1049 TCGReg a3
= tcg_target_call_iarg_regs
[3];
1051 /* We don't support oversize guests */
1052 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1053 g_assert_not_reached();
1056 /* resolve label address */
1057 if (!patch_reloc(l
->label_ptr
[0], R_RISCV_BRANCH
,
1058 (intptr_t) s
->code_ptr
, 0)) {
1062 /* call load helper */
1063 tcg_out_mov(s
, TCG_TYPE_PTR
, a0
, TCG_AREG0
);
1064 tcg_out_mov(s
, TCG_TYPE_PTR
, a1
, l
->addrlo_reg
);
1065 tcg_out_movi(s
, TCG_TYPE_PTR
, a2
, oi
);
1066 tcg_out_movi(s
, TCG_TYPE_PTR
, a3
, (tcg_target_long
)l
->raddr
);
1068 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)]);
1069 tcg_out_mov(s
, (opc
& MO_SIZE
) == MO_64
, l
->datalo_reg
, a0
);
1071 tcg_out_goto(s
, l
->raddr
);
1075 static bool tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1077 TCGMemOpIdx oi
= l
->oi
;
1078 MemOp opc
= get_memop(oi
);
1079 MemOp s_bits
= opc
& MO_SIZE
;
1080 TCGReg a0
= tcg_target_call_iarg_regs
[0];
1081 TCGReg a1
= tcg_target_call_iarg_regs
[1];
1082 TCGReg a2
= tcg_target_call_iarg_regs
[2];
1083 TCGReg a3
= tcg_target_call_iarg_regs
[3];
1084 TCGReg a4
= tcg_target_call_iarg_regs
[4];
1086 /* We don't support oversize guests */
1087 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1088 g_assert_not_reached();
1091 /* resolve label address */
1092 if (!patch_reloc(l
->label_ptr
[0], R_RISCV_BRANCH
,
1093 (intptr_t) s
->code_ptr
, 0)) {
1097 /* call store helper */
1098 tcg_out_mov(s
, TCG_TYPE_PTR
, a0
, TCG_AREG0
);
1099 tcg_out_mov(s
, TCG_TYPE_PTR
, a1
, l
->addrlo_reg
);
1100 tcg_out_mov(s
, TCG_TYPE_PTR
, a2
, l
->datalo_reg
);
1103 tcg_out_ext8u(s
, a2
, a2
);
1106 tcg_out_ext16u(s
, a2
, a2
);
1111 tcg_out_movi(s
, TCG_TYPE_PTR
, a3
, oi
);
1112 tcg_out_movi(s
, TCG_TYPE_PTR
, a4
, (tcg_target_long
)l
->raddr
);
1114 tcg_out_call(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)]);
1116 tcg_out_goto(s
, l
->raddr
);
1119 #endif /* CONFIG_SOFTMMU */
1121 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg lo
, TCGReg hi
,
1122 TCGReg base
, MemOp opc
, bool is_64
)
1124 const MemOp bswap
= opc
& MO_BSWAP
;
1126 /* We don't yet handle byteswapping, assert */
1129 switch (opc
& (MO_SSIZE
)) {
1131 tcg_out_opc_imm(s
, OPC_LBU
, lo
, base
, 0);
1134 tcg_out_opc_imm(s
, OPC_LB
, lo
, base
, 0);
1137 tcg_out_opc_imm(s
, OPC_LHU
, lo
, base
, 0);
1140 tcg_out_opc_imm(s
, OPC_LH
, lo
, base
, 0);
1143 if (TCG_TARGET_REG_BITS
== 64 && is_64
) {
1144 tcg_out_opc_imm(s
, OPC_LWU
, lo
, base
, 0);
1149 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1152 /* Prefer to load from offset 0 first, but allow for overlap. */
1153 if (TCG_TARGET_REG_BITS
== 64) {
1154 tcg_out_opc_imm(s
, OPC_LD
, lo
, base
, 0);
1155 } else if (lo
!= base
) {
1156 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1157 tcg_out_opc_imm(s
, OPC_LW
, hi
, base
, 4);
1159 tcg_out_opc_imm(s
, OPC_LW
, hi
, base
, 4);
1160 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1164 g_assert_not_reached();
1168 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1170 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1171 TCGReg data_regl
, data_regh
;
1174 #if defined(CONFIG_SOFTMMU)
1175 tcg_insn_unit
*label_ptr
[1];
1177 TCGReg base
= TCG_REG_TMP0
;
1179 data_regl
= *args
++;
1180 data_regh
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1181 addr_regl
= *args
++;
1182 addr_regh
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1184 opc
= get_memop(oi
);
1186 #if defined(CONFIG_SOFTMMU)
1187 tcg_out_tlb_load(s
, addr_regl
, addr_regh
, oi
, label_ptr
, 1);
1188 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
, is_64
);
1189 add_qemu_ldst_label(s
, 1, oi
,
1190 (is_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1191 data_regl
, data_regh
, addr_regl
, addr_regh
,
1192 s
->code_ptr
, label_ptr
);
1194 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1195 tcg_out_ext32u(s
, base
, addr_regl
);
1199 if (guest_base
== 0) {
1200 tcg_out_opc_reg(s
, OPC_ADD
, base
, addr_regl
, TCG_REG_ZERO
);
1202 tcg_out_opc_reg(s
, OPC_ADD
, base
, TCG_GUEST_BASE_REG
, addr_regl
);
1204 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
, is_64
);
1208 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg lo
, TCGReg hi
,
1209 TCGReg base
, MemOp opc
)
1211 const MemOp bswap
= opc
& MO_BSWAP
;
1213 /* We don't yet handle byteswapping, assert */
1216 switch (opc
& (MO_SSIZE
)) {
1218 tcg_out_opc_store(s
, OPC_SB
, base
, lo
, 0);
1221 tcg_out_opc_store(s
, OPC_SH
, base
, lo
, 0);
1224 tcg_out_opc_store(s
, OPC_SW
, base
, lo
, 0);
1227 if (TCG_TARGET_REG_BITS
== 64) {
1228 tcg_out_opc_store(s
, OPC_SD
, base
, lo
, 0);
1230 tcg_out_opc_store(s
, OPC_SW
, base
, lo
, 0);
1231 tcg_out_opc_store(s
, OPC_SW
, base
, hi
, 4);
1235 g_assert_not_reached();
1239 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1241 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1242 TCGReg data_regl
, data_regh
;
1245 #if defined(CONFIG_SOFTMMU)
1246 tcg_insn_unit
*label_ptr
[1];
1248 TCGReg base
= TCG_REG_TMP0
;
1250 data_regl
= *args
++;
1251 data_regh
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1252 addr_regl
= *args
++;
1253 addr_regh
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1255 opc
= get_memop(oi
);
1257 #if defined(CONFIG_SOFTMMU)
1258 tcg_out_tlb_load(s
, addr_regl
, addr_regh
, oi
, label_ptr
, 0);
1259 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1260 add_qemu_ldst_label(s
, 0, oi
,
1261 (is_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1262 data_regl
, data_regh
, addr_regl
, addr_regh
,
1263 s
->code_ptr
, label_ptr
);
1265 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1266 tcg_out_ext32u(s
, base
, addr_regl
);
1270 if (guest_base
== 0) {
1271 tcg_out_opc_reg(s
, OPC_ADD
, base
, addr_regl
, TCG_REG_ZERO
);
1273 tcg_out_opc_reg(s
, OPC_ADD
, base
, TCG_GUEST_BASE_REG
, addr_regl
);
1275 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1279 static tcg_insn_unit
*tb_ret_addr
;
1281 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1282 const TCGArg
*args
, const int *const_args
)
1284 TCGArg a0
= args
[0];
1285 TCGArg a1
= args
[1];
1286 TCGArg a2
= args
[2];
1287 int c2
= const_args
[2];
1290 case INDEX_op_exit_tb
:
1291 /* Reuse the zeroing that exists for goto_ptr. */
1293 tcg_out_call_int(s
, s
->code_gen_epilogue
, true);
1295 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_A0
, a0
);
1296 tcg_out_call_int(s
, tb_ret_addr
, true);
1300 case INDEX_op_goto_tb
:
1301 assert(s
->tb_jmp_insn_offset
== 0);
1302 /* indirect jump method */
1303 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP0
, TCG_REG_ZERO
,
1304 (uintptr_t)(s
->tb_jmp_target_addr
+ a0
));
1305 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, TCG_REG_TMP0
, 0);
1306 set_jmp_reset_offset(s
, a0
);
1309 case INDEX_op_goto_ptr
:
1310 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, a0
, 0);
1314 tcg_out_reloc(s
, s
->code_ptr
, R_RISCV_JAL
, arg_label(a0
), 0);
1315 tcg_out_opc_jump(s
, OPC_JAL
, TCG_REG_ZERO
, 0);
1318 case INDEX_op_ld8u_i32
:
1319 case INDEX_op_ld8u_i64
:
1320 tcg_out_ldst(s
, OPC_LBU
, a0
, a1
, a2
);
1322 case INDEX_op_ld8s_i32
:
1323 case INDEX_op_ld8s_i64
:
1324 tcg_out_ldst(s
, OPC_LB
, a0
, a1
, a2
);
1326 case INDEX_op_ld16u_i32
:
1327 case INDEX_op_ld16u_i64
:
1328 tcg_out_ldst(s
, OPC_LHU
, a0
, a1
, a2
);
1330 case INDEX_op_ld16s_i32
:
1331 case INDEX_op_ld16s_i64
:
1332 tcg_out_ldst(s
, OPC_LH
, a0
, a1
, a2
);
1334 case INDEX_op_ld32u_i64
:
1335 tcg_out_ldst(s
, OPC_LWU
, a0
, a1
, a2
);
1337 case INDEX_op_ld_i32
:
1338 case INDEX_op_ld32s_i64
:
1339 tcg_out_ldst(s
, OPC_LW
, a0
, a1
, a2
);
1341 case INDEX_op_ld_i64
:
1342 tcg_out_ldst(s
, OPC_LD
, a0
, a1
, a2
);
1345 case INDEX_op_st8_i32
:
1346 case INDEX_op_st8_i64
:
1347 tcg_out_ldst(s
, OPC_SB
, a0
, a1
, a2
);
1349 case INDEX_op_st16_i32
:
1350 case INDEX_op_st16_i64
:
1351 tcg_out_ldst(s
, OPC_SH
, a0
, a1
, a2
);
1353 case INDEX_op_st_i32
:
1354 case INDEX_op_st32_i64
:
1355 tcg_out_ldst(s
, OPC_SW
, a0
, a1
, a2
);
1357 case INDEX_op_st_i64
:
1358 tcg_out_ldst(s
, OPC_SD
, a0
, a1
, a2
);
1361 case INDEX_op_add_i32
:
1363 tcg_out_opc_imm(s
, OPC_ADDIW
, a0
, a1
, a2
);
1365 tcg_out_opc_reg(s
, OPC_ADDW
, a0
, a1
, a2
);
1368 case INDEX_op_add_i64
:
1370 tcg_out_opc_imm(s
, OPC_ADDI
, a0
, a1
, a2
);
1372 tcg_out_opc_reg(s
, OPC_ADD
, a0
, a1
, a2
);
1376 case INDEX_op_sub_i32
:
1378 tcg_out_opc_imm(s
, OPC_ADDIW
, a0
, a1
, -a2
);
1380 tcg_out_opc_reg(s
, OPC_SUBW
, a0
, a1
, a2
);
1383 case INDEX_op_sub_i64
:
1385 tcg_out_opc_imm(s
, OPC_ADDI
, a0
, a1
, -a2
);
1387 tcg_out_opc_reg(s
, OPC_SUB
, a0
, a1
, a2
);
1391 case INDEX_op_and_i32
:
1392 case INDEX_op_and_i64
:
1394 tcg_out_opc_imm(s
, OPC_ANDI
, a0
, a1
, a2
);
1396 tcg_out_opc_reg(s
, OPC_AND
, a0
, a1
, a2
);
1400 case INDEX_op_or_i32
:
1401 case INDEX_op_or_i64
:
1403 tcg_out_opc_imm(s
, OPC_ORI
, a0
, a1
, a2
);
1405 tcg_out_opc_reg(s
, OPC_OR
, a0
, a1
, a2
);
1409 case INDEX_op_xor_i32
:
1410 case INDEX_op_xor_i64
:
1412 tcg_out_opc_imm(s
, OPC_XORI
, a0
, a1
, a2
);
1414 tcg_out_opc_reg(s
, OPC_XOR
, a0
, a1
, a2
);
1418 case INDEX_op_not_i32
:
1419 case INDEX_op_not_i64
:
1420 tcg_out_opc_imm(s
, OPC_XORI
, a0
, a1
, -1);
1423 case INDEX_op_neg_i32
:
1424 tcg_out_opc_reg(s
, OPC_SUBW
, a0
, TCG_REG_ZERO
, a1
);
1426 case INDEX_op_neg_i64
:
1427 tcg_out_opc_reg(s
, OPC_SUB
, a0
, TCG_REG_ZERO
, a1
);
1430 case INDEX_op_mul_i32
:
1431 tcg_out_opc_reg(s
, OPC_MULW
, a0
, a1
, a2
);
1433 case INDEX_op_mul_i64
:
1434 tcg_out_opc_reg(s
, OPC_MUL
, a0
, a1
, a2
);
1437 case INDEX_op_div_i32
:
1438 tcg_out_opc_reg(s
, OPC_DIVW
, a0
, a1
, a2
);
1440 case INDEX_op_div_i64
:
1441 tcg_out_opc_reg(s
, OPC_DIV
, a0
, a1
, a2
);
1444 case INDEX_op_divu_i32
:
1445 tcg_out_opc_reg(s
, OPC_DIVUW
, a0
, a1
, a2
);
1447 case INDEX_op_divu_i64
:
1448 tcg_out_opc_reg(s
, OPC_DIVU
, a0
, a1
, a2
);
1451 case INDEX_op_rem_i32
:
1452 tcg_out_opc_reg(s
, OPC_REMW
, a0
, a1
, a2
);
1454 case INDEX_op_rem_i64
:
1455 tcg_out_opc_reg(s
, OPC_REM
, a0
, a1
, a2
);
1458 case INDEX_op_remu_i32
:
1459 tcg_out_opc_reg(s
, OPC_REMUW
, a0
, a1
, a2
);
1461 case INDEX_op_remu_i64
:
1462 tcg_out_opc_reg(s
, OPC_REMU
, a0
, a1
, a2
);
1465 case INDEX_op_shl_i32
:
1467 tcg_out_opc_imm(s
, OPC_SLLIW
, a0
, a1
, a2
);
1469 tcg_out_opc_reg(s
, OPC_SLLW
, a0
, a1
, a2
);
1472 case INDEX_op_shl_i64
:
1474 tcg_out_opc_imm(s
, OPC_SLLI
, a0
, a1
, a2
);
1476 tcg_out_opc_reg(s
, OPC_SLL
, a0
, a1
, a2
);
1480 case INDEX_op_shr_i32
:
1482 tcg_out_opc_imm(s
, OPC_SRLIW
, a0
, a1
, a2
);
1484 tcg_out_opc_reg(s
, OPC_SRLW
, a0
, a1
, a2
);
1487 case INDEX_op_shr_i64
:
1489 tcg_out_opc_imm(s
, OPC_SRLI
, a0
, a1
, a2
);
1491 tcg_out_opc_reg(s
, OPC_SRL
, a0
, a1
, a2
);
1495 case INDEX_op_sar_i32
:
1497 tcg_out_opc_imm(s
, OPC_SRAIW
, a0
, a1
, a2
);
1499 tcg_out_opc_reg(s
, OPC_SRAW
, a0
, a1
, a2
);
1502 case INDEX_op_sar_i64
:
1504 tcg_out_opc_imm(s
, OPC_SRAI
, a0
, a1
, a2
);
1506 tcg_out_opc_reg(s
, OPC_SRA
, a0
, a1
, a2
);
1510 case INDEX_op_add2_i32
:
1511 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1512 const_args
[4], const_args
[5], false, true);
1514 case INDEX_op_add2_i64
:
1515 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1516 const_args
[4], const_args
[5], false, false);
1518 case INDEX_op_sub2_i32
:
1519 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1520 const_args
[4], const_args
[5], true, true);
1522 case INDEX_op_sub2_i64
:
1523 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1524 const_args
[4], const_args
[5], true, false);
1527 case INDEX_op_brcond_i32
:
1528 case INDEX_op_brcond_i64
:
1529 tcg_out_brcond(s
, a2
, a0
, a1
, arg_label(args
[3]));
1531 case INDEX_op_brcond2_i32
:
1532 tcg_out_brcond2(s
, args
[4], a0
, a1
, a2
, args
[3], arg_label(args
[5]));
1535 case INDEX_op_setcond_i32
:
1536 case INDEX_op_setcond_i64
:
1537 tcg_out_setcond(s
, args
[3], a0
, a1
, a2
);
1539 case INDEX_op_setcond2_i32
:
1540 tcg_out_setcond2(s
, args
[5], a0
, a1
, a2
, args
[3], args
[4]);
1543 case INDEX_op_qemu_ld_i32
:
1544 tcg_out_qemu_ld(s
, args
, false);
1546 case INDEX_op_qemu_ld_i64
:
1547 tcg_out_qemu_ld(s
, args
, true);
1549 case INDEX_op_qemu_st_i32
:
1550 tcg_out_qemu_st(s
, args
, false);
1552 case INDEX_op_qemu_st_i64
:
1553 tcg_out_qemu_st(s
, args
, true);
1556 case INDEX_op_ext8u_i32
:
1557 case INDEX_op_ext8u_i64
:
1558 tcg_out_ext8u(s
, a0
, a1
);
1561 case INDEX_op_ext16u_i32
:
1562 case INDEX_op_ext16u_i64
:
1563 tcg_out_ext16u(s
, a0
, a1
);
1566 case INDEX_op_ext32u_i64
:
1567 case INDEX_op_extu_i32_i64
:
1568 tcg_out_ext32u(s
, a0
, a1
);
1571 case INDEX_op_ext8s_i32
:
1572 case INDEX_op_ext8s_i64
:
1573 tcg_out_ext8s(s
, a0
, a1
);
1576 case INDEX_op_ext16s_i32
:
1577 case INDEX_op_ext16s_i64
:
1578 tcg_out_ext16s(s
, a0
, a1
);
1581 case INDEX_op_ext32s_i64
:
1582 case INDEX_op_extrl_i64_i32
:
1583 case INDEX_op_ext_i32_i64
:
1584 tcg_out_ext32s(s
, a0
, a1
);
1587 case INDEX_op_extrh_i64_i32
:
1588 tcg_out_opc_imm(s
, OPC_SRAI
, a0
, a1
, 32);
1591 case INDEX_op_mulsh_i32
:
1592 case INDEX_op_mulsh_i64
:
1593 tcg_out_opc_reg(s
, OPC_MULH
, a0
, a1
, a2
);
1596 case INDEX_op_muluh_i32
:
1597 case INDEX_op_muluh_i64
:
1598 tcg_out_opc_reg(s
, OPC_MULHU
, a0
, a1
, a2
);
1605 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
1606 case INDEX_op_mov_i64
:
1607 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
1608 case INDEX_op_movi_i64
:
1609 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
1611 g_assert_not_reached();
1615 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
1617 static const TCGTargetOpDef r
1618 = { .args_ct_str
= { "r" } };
1619 static const TCGTargetOpDef r_r
1620 = { .args_ct_str
= { "r", "r" } };
1621 static const TCGTargetOpDef rZ_r
1622 = { .args_ct_str
= { "rZ", "r" } };
1623 static const TCGTargetOpDef rZ_rZ
1624 = { .args_ct_str
= { "rZ", "rZ" } };
1625 static const TCGTargetOpDef rZ_rZ_rZ_rZ
1626 = { .args_ct_str
= { "rZ", "rZ", "rZ", "rZ" } };
1627 static const TCGTargetOpDef r_r_ri
1628 = { .args_ct_str
= { "r", "r", "ri" } };
1629 static const TCGTargetOpDef r_r_rI
1630 = { .args_ct_str
= { "r", "r", "rI" } };
1631 static const TCGTargetOpDef r_rZ_rN
1632 = { .args_ct_str
= { "r", "rZ", "rN" } };
1633 static const TCGTargetOpDef r_rZ_rZ
1634 = { .args_ct_str
= { "r", "rZ", "rZ" } };
1635 static const TCGTargetOpDef r_rZ_rZ_rZ_rZ
1636 = { .args_ct_str
= { "r", "rZ", "rZ", "rZ", "rZ" } };
1637 static const TCGTargetOpDef r_L
1638 = { .args_ct_str
= { "r", "L" } };
1639 static const TCGTargetOpDef r_r_L
1640 = { .args_ct_str
= { "r", "r", "L" } };
1641 static const TCGTargetOpDef r_L_L
1642 = { .args_ct_str
= { "r", "L", "L" } };
1643 static const TCGTargetOpDef r_r_L_L
1644 = { .args_ct_str
= { "r", "r", "L", "L" } };
1645 static const TCGTargetOpDef LZ_L
1646 = { .args_ct_str
= { "LZ", "L" } };
1647 static const TCGTargetOpDef LZ_L_L
1648 = { .args_ct_str
= { "LZ", "L", "L" } };
1649 static const TCGTargetOpDef LZ_LZ_L
1650 = { .args_ct_str
= { "LZ", "LZ", "L" } };
1651 static const TCGTargetOpDef LZ_LZ_L_L
1652 = { .args_ct_str
= { "LZ", "LZ", "L", "L" } };
1653 static const TCGTargetOpDef r_r_rZ_rZ_rM_rM
1654 = { .args_ct_str
= { "r", "r", "rZ", "rZ", "rM", "rM" } };
1657 case INDEX_op_goto_ptr
:
1660 case INDEX_op_ld8u_i32
:
1661 case INDEX_op_ld8s_i32
:
1662 case INDEX_op_ld16u_i32
:
1663 case INDEX_op_ld16s_i32
:
1664 case INDEX_op_ld_i32
:
1665 case INDEX_op_not_i32
:
1666 case INDEX_op_neg_i32
:
1667 case INDEX_op_ld8u_i64
:
1668 case INDEX_op_ld8s_i64
:
1669 case INDEX_op_ld16u_i64
:
1670 case INDEX_op_ld16s_i64
:
1671 case INDEX_op_ld32s_i64
:
1672 case INDEX_op_ld32u_i64
:
1673 case INDEX_op_ld_i64
:
1674 case INDEX_op_not_i64
:
1675 case INDEX_op_neg_i64
:
1676 case INDEX_op_ext8u_i32
:
1677 case INDEX_op_ext8u_i64
:
1678 case INDEX_op_ext16u_i32
:
1679 case INDEX_op_ext16u_i64
:
1680 case INDEX_op_ext32u_i64
:
1681 case INDEX_op_extu_i32_i64
:
1682 case INDEX_op_ext8s_i32
:
1683 case INDEX_op_ext8s_i64
:
1684 case INDEX_op_ext16s_i32
:
1685 case INDEX_op_ext16s_i64
:
1686 case INDEX_op_ext32s_i64
:
1687 case INDEX_op_extrl_i64_i32
:
1688 case INDEX_op_extrh_i64_i32
:
1689 case INDEX_op_ext_i32_i64
:
1692 case INDEX_op_st8_i32
:
1693 case INDEX_op_st16_i32
:
1694 case INDEX_op_st_i32
:
1695 case INDEX_op_st8_i64
:
1696 case INDEX_op_st16_i64
:
1697 case INDEX_op_st32_i64
:
1698 case INDEX_op_st_i64
:
1701 case INDEX_op_add_i32
:
1702 case INDEX_op_and_i32
:
1703 case INDEX_op_or_i32
:
1704 case INDEX_op_xor_i32
:
1705 case INDEX_op_add_i64
:
1706 case INDEX_op_and_i64
:
1707 case INDEX_op_or_i64
:
1708 case INDEX_op_xor_i64
:
1711 case INDEX_op_sub_i32
:
1712 case INDEX_op_sub_i64
:
1715 case INDEX_op_mul_i32
:
1716 case INDEX_op_mulsh_i32
:
1717 case INDEX_op_muluh_i32
:
1718 case INDEX_op_div_i32
:
1719 case INDEX_op_divu_i32
:
1720 case INDEX_op_rem_i32
:
1721 case INDEX_op_remu_i32
:
1722 case INDEX_op_setcond_i32
:
1723 case INDEX_op_mul_i64
:
1724 case INDEX_op_mulsh_i64
:
1725 case INDEX_op_muluh_i64
:
1726 case INDEX_op_div_i64
:
1727 case INDEX_op_divu_i64
:
1728 case INDEX_op_rem_i64
:
1729 case INDEX_op_remu_i64
:
1730 case INDEX_op_setcond_i64
:
1733 case INDEX_op_shl_i32
:
1734 case INDEX_op_shr_i32
:
1735 case INDEX_op_sar_i32
:
1736 case INDEX_op_shl_i64
:
1737 case INDEX_op_shr_i64
:
1738 case INDEX_op_sar_i64
:
1741 case INDEX_op_brcond_i32
:
1742 case INDEX_op_brcond_i64
:
1745 case INDEX_op_add2_i32
:
1746 case INDEX_op_add2_i64
:
1747 case INDEX_op_sub2_i32
:
1748 case INDEX_op_sub2_i64
:
1749 return &r_r_rZ_rZ_rM_rM
;
1751 case INDEX_op_brcond2_i32
:
1752 return &rZ_rZ_rZ_rZ
;
1754 case INDEX_op_setcond2_i32
:
1755 return &r_rZ_rZ_rZ_rZ
;
1757 case INDEX_op_qemu_ld_i32
:
1758 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_L
: &r_L_L
;
1759 case INDEX_op_qemu_st_i32
:
1760 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &LZ_L
: &LZ_L_L
;
1761 case INDEX_op_qemu_ld_i64
:
1762 return TCG_TARGET_REG_BITS
== 64 ? &r_L
1763 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_r_L
1765 case INDEX_op_qemu_st_i64
:
1766 return TCG_TARGET_REG_BITS
== 64 ? &LZ_L
1767 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &LZ_LZ_L
1775 static const int tcg_target_callee_save_regs
[] = {
1776 TCG_REG_S0
, /* used for the global env (TCG_AREG0) */
1788 TCG_REG_RA
, /* should be last for ABI compliance */
1791 /* Stack frame parameters. */
1792 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
1793 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1794 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1795 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1796 + TCG_TARGET_STACK_ALIGN - 1) \
1797 & -TCG_TARGET_STACK_ALIGN)
1798 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1800 /* We're expecting to be able to use an immediate for frame allocation. */
1801 QEMU_BUILD_BUG_ON(FRAME_SIZE
> 0x7ff);
1803 /* Generate global QEMU prologue and epilogue code */
1804 static void tcg_target_qemu_prologue(TCGContext
*s
)
1808 tcg_set_frame(s
, TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
, TEMP_SIZE
);
1811 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_SP
, TCG_REG_SP
, -FRAME_SIZE
);
1812 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1813 tcg_out_st(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1814 TCG_REG_SP
, SAVE_OFS
+ i
* REG_SIZE
);
1817 #if !defined(CONFIG_SOFTMMU)
1818 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, guest_base
);
1819 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1822 /* Call generated code */
1823 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1824 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, tcg_target_call_iarg_regs
[1], 0);
1826 /* Return path for goto_ptr. Set return value to 0 */
1827 s
->code_gen_epilogue
= s
->code_ptr
;
1828 tcg_out_mov(s
, TCG_TYPE_REG
, TCG_REG_A0
, TCG_REG_ZERO
);
1831 tb_ret_addr
= s
->code_ptr
;
1832 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1833 tcg_out_ld(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1834 TCG_REG_SP
, SAVE_OFS
+ i
* REG_SIZE
);
1837 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_SP
, TCG_REG_SP
, FRAME_SIZE
);
1838 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, TCG_REG_RA
, 0);
1841 static void tcg_target_init(TCGContext
*s
)
1843 tcg_target_available_regs
[TCG_TYPE_I32
] = 0xffffffff;
1844 if (TCG_TARGET_REG_BITS
== 64) {
1845 tcg_target_available_regs
[TCG_TYPE_I64
] = 0xffffffff;
1848 tcg_target_call_clobber_regs
= -1u;
1849 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S0
);
1850 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S1
);
1851 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S2
);
1852 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S3
);
1853 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S4
);
1854 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S5
);
1855 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S6
);
1856 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S7
);
1857 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S8
);
1858 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S9
);
1859 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S10
);
1860 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S11
);
1862 s
->reserved_regs
= 0;
1863 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ZERO
);
1864 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP0
);
1865 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP1
);
1866 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP2
);
1867 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
);
1868 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_GP
);
1869 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TP
);
1874 uint8_t fde_def_cfa
[4];
1875 uint8_t fde_reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2];
1878 #define ELF_HOST_MACHINE EM_RISCV
1880 static const DebugFrame debug_frame
= {
1881 .h
.cie
.len
= sizeof(DebugFrameCIE
) - 4, /* length after .len member */
1884 .h
.cie
.code_align
= 1,
1885 .h
.cie
.data_align
= -(TCG_TARGET_REG_BITS
/ 8) & 0x7f, /* sleb128 */
1886 .h
.cie
.return_column
= TCG_REG_RA
,
1888 /* Total FDE size does not include the "len" member. */
1889 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
1892 12, TCG_REG_SP
, /* DW_CFA_def_cfa sp, ... */
1893 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1897 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */
1898 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */
1899 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */
1900 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */
1901 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */
1902 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */
1903 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */
1904 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */
1905 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */
1906 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */
1907 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */
1908 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
1912 void tcg_register_jit(void *buf
, size_t buf_size
)
1914 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));