2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
9 * Based on i386/tcg-target.c and mips/tcg-target.c
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 #include "../tcg-ldst.c.inc"
31 #include "../tcg-pool.c.inc"
33 #ifdef CONFIG_DEBUG_TCG
34 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 static const int tcg_target_reg_alloc_order[] = {
71 /* Call saved registers */
72 /* TCG_REG_S0 reserved for TCG_AREG0 */
85 /* Call clobbered registers */
94 /* Argument registers */
105 static const int tcg_target_call_iarg_regs[] = {
116 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
118 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
119 tcg_debug_assert(slot >= 0 && slot <= 1);
120 return TCG_REG_A0 + slot;
123 #define TCG_CT_CONST_ZERO 0x100
124 #define TCG_CT_CONST_S12 0x200
125 #define TCG_CT_CONST_N12 0x400
126 #define TCG_CT_CONST_M12 0x800
127 #define TCG_CT_CONST_J12 0x1000
129 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
131 #define sextreg sextract64
133 /* test if a constant matches the constraint */
134 static bool tcg_target_const_match(int64_t val, int ct,
135 TCGType type, TCGCond cond, int vece)
137 if (ct & TCG_CT_CONST) {
140 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
144 * Sign extended from 12 bits: [-0x800, 0x7ff].
145 * Used for most arithmetic, as this is the isa field.
147 if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
151 * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
152 * Used for subtraction, where a constant must be handled by ADDI.
154 if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
158 * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
159 * Used by addsub2 and movcond, which may need the negative value,
160 * and requires the modified constant to be representable.
162 if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
166 * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff].
167 * Used to map ANDN back to ANDI, etc.
169 if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
176 * RISC-V Base ISA opcodes (IM)
192 OPC_DIVU = 0x2005033,
204 OPC_MULH = 0x2001033,
205 OPC_MULHSU = 0x2002033,
206 OPC_MULHU = 0x2003033,
210 OPC_REMU = 0x2007033,
220 OPC_SRA = 0x40005033,
221 OPC_SRAI = 0x40005013,
224 OPC_SUB = 0x40000033,
231 OPC_DIVUW = 0x200503b,
232 OPC_DIVW = 0x200403b,
233 OPC_MULW = 0x200003b,
234 OPC_REMUW = 0x200703b,
235 OPC_REMW = 0x200603b,
238 OPC_SRAIW = 0x4000501b,
239 OPC_SRAW = 0x4000503b,
242 OPC_SUBW = 0x4000003b,
244 OPC_FENCE = 0x0000000f,
245 OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
247 /* Zba: Bit manipulation extension, address generation */
248 OPC_ADD_UW = 0x0800003b,
250 /* Zbb: Bit manipulation extension, basic bit manipulation */
251 OPC_ANDN = 0x40007033,
252 OPC_CLZ = 0x60001013,
253 OPC_CLZW = 0x6000101b,
254 OPC_CPOP = 0x60201013,
255 OPC_CPOPW = 0x6020101b,
256 OPC_CTZ = 0x60101013,
257 OPC_CTZW = 0x6010101b,
258 OPC_ORN = 0x40006033,
259 OPC_REV8 = 0x6b805013,
260 OPC_ROL = 0x60001033,
261 OPC_ROLW = 0x6000103b,
262 OPC_ROR = 0x60005033,
263 OPC_RORW = 0x6000503b,
264 OPC_RORI = 0x60005013,
265 OPC_RORIW = 0x6000501b,
266 OPC_SEXT_B = 0x60401013,
267 OPC_SEXT_H = 0x60501013,
268 OPC_XNOR = 0x40004033,
269 OPC_ZEXT_H = 0x0800403b,
271 /* Zicond: integer conditional operations */
272 OPC_CZERO_EQZ = 0x0e005033,
273 OPC_CZERO_NEZ = 0x0e007033,
277 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
282 static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
284 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
289 static int32_t encode_imm12(uint32_t imm)
291 return (imm & 0xfff) << 20;
294 static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
296 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
301 static int32_t encode_simm12(uint32_t imm)
305 ret |= (imm & 0xFE0) << 20;
306 ret |= (imm & 0x1F) << 7;
311 static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
313 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
318 static int32_t encode_sbimm12(uint32_t imm)
322 ret |= (imm & 0x1000) << 19;
323 ret |= (imm & 0x7e0) << 20;
324 ret |= (imm & 0x1e) << 7;
325 ret |= (imm & 0x800) >> 4;
330 static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
332 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
337 static int32_t encode_uimm20(uint32_t imm)
339 return imm & 0xfffff000;
342 static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
344 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
349 static int32_t encode_ujimm20(uint32_t imm)
353 ret |= (imm & 0x0007fe) << (21 - 1);
354 ret |= (imm & 0x000800) << (20 - 11);
355 ret |= (imm & 0x0ff000) << (12 - 12);
356 ret |= (imm & 0x100000) << (31 - 20);
361 static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
363 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
367 * RISC-V instruction emitters
370 static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
371 TCGReg rd, TCGReg rs1, TCGReg rs2)
373 tcg_out32(s, encode_r(opc, rd, rs1, rs2));
376 static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
377 TCGReg rd, TCGReg rs1, TCGArg imm)
379 tcg_out32(s, encode_i(opc, rd, rs1, imm));
382 static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
383 TCGReg rs1, TCGReg rs2, uint32_t imm)
385 tcg_out32(s, encode_s(opc, rs1, rs2, imm));
388 static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
389 TCGReg rs1, TCGReg rs2, uint32_t imm)
391 tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
394 static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
395 TCGReg rd, uint32_t imm)
397 tcg_out32(s, encode_u(opc, rd, imm));
400 static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
401 TCGReg rd, uint32_t imm)
403 tcg_out32(s, encode_uj(opc, rd, imm));
406 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
409 for (i = 0; i < count; ++i) {
418 static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
420 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
421 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
423 tcg_debug_assert((offset & 1) == 0);
424 if (offset == sextreg(offset, 0, 12)) {
425 *src_rw |= encode_sbimm12(offset);
432 static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
434 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
435 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
437 tcg_debug_assert((offset & 1) == 0);
438 if (offset == sextreg(offset, 0, 20)) {
439 *src_rw |= encode_ujimm20(offset);
446 static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
448 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
449 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
450 int32_t lo = sextreg(offset, 0, 12);
451 int32_t hi = offset - lo;
453 if (offset == hi + lo) {
454 src_rw[0] |= encode_uimm20(hi);
455 src_rw[1] |= encode_imm12(lo);
462 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
463 intptr_t value, intptr_t addend)
465 tcg_debug_assert(addend == 0);
468 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
470 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
472 return reloc_call(code_ptr, (tcg_insn_unit *)value);
474 g_assert_not_reached();
482 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
490 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
493 g_assert_not_reached();
498 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
501 tcg_target_long lo, hi, tmp;
504 if (type == TCG_TYPE_I32) {
508 lo = sextreg(val, 0, 12);
510 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
515 if (val == (int32_t)val) {
516 tcg_out_opc_upper(s, OPC_LUI, rd, hi);
518 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
523 tmp = tcg_pcrel_diff(s, (void *)val);
524 if (tmp == (int32_t)tmp) {
525 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
526 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
527 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
528 tcg_debug_assert(ret == true);
532 /* Look for a single 20-bit section. */
535 if (tmp == sextreg(tmp, 0, 20)) {
536 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
538 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
540 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
545 /* Look for a few high zero bits, with lots of bits set in the middle. */
548 if (tmp == sextreg(tmp, 12, 20) << 12) {
549 tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
550 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
552 } else if (tmp == sextreg(tmp, 0, 12)) {
553 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
554 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
558 /* Drop into the constant pool. */
559 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
560 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
561 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
564 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
569 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
572 /* This function is only used for passing structs by reference. */
573 g_assert_not_reached();
576 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
578 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
581 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
583 if (cpuinfo & CPUINFO_ZBB) {
584 tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO);
586 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
587 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
591 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
593 if (cpuinfo & CPUINFO_ZBA) {
594 tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO);
596 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
597 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
601 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
603 if (cpuinfo & CPUINFO_ZBB) {
604 tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0);
606 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
607 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
611 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
613 if (cpuinfo & CPUINFO_ZBB) {
614 tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0);
616 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
617 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
621 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
623 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
626 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
629 tcg_out_ext32s(s, ret, arg);
633 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
635 tcg_out_ext32u(s, ret, arg);
638 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
640 tcg_out_ext32s(s, ret, arg);
643 static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
644 TCGReg addr, intptr_t offset)
646 intptr_t imm12 = sextreg(offset, 0, 12);
648 if (offset != imm12) {
649 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
651 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
652 imm12 = sextreg(diff, 0, 12);
653 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
655 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
656 if (addr != TCG_REG_ZERO) {
657 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
668 tcg_out_opc_store(s, opc, addr, data, imm12);
677 tcg_out_opc_imm(s, opc, data, addr, imm12);
680 g_assert_not_reached();
684 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
685 TCGReg arg1, intptr_t arg2)
687 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
688 tcg_out_ldst(s, insn, arg, arg1, arg2);
691 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
692 TCGReg arg1, intptr_t arg2)
694 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
695 tcg_out_ldst(s, insn, arg, arg1, arg2);
698 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
699 TCGReg base, intptr_t ofs)
702 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
708 static void tcg_out_addsub2(TCGContext *s,
709 TCGReg rl, TCGReg rh,
710 TCGReg al, TCGReg ah,
711 TCGArg bl, TCGArg bh,
712 bool cbl, bool cbh, bool is_sub, bool is32bit)
714 const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
715 const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
716 const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
717 TCGReg th = TCG_REG_TMP1;
719 /* If we have a negative constant such that negating it would
720 make the high part zero, we can (usually) eliminate one insn. */
721 if (cbl && cbh && bh == -1 && bl != 0) {
727 /* By operating on the high part first, we get to use the final
728 carry operation to move back from the temporary. */
730 tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
731 } else if (bh != 0 || ah == rl) {
732 tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
737 /* Note that tcg optimization should eliminate the bl == 0 case. */
740 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
741 tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
743 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
744 tcg_out_opc_reg(s, opc_sub, rl, al, bl);
746 tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
749 tcg_out_opc_imm(s, opc_addi, rl, al, bl);
750 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
751 } else if (al == bl) {
753 * If the input regs overlap, this is a simple doubling
754 * and carry-out is the input msb. This special case is
755 * required when the output reg overlaps the input,
756 * but we might as well use it always.
758 tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
759 tcg_out_opc_reg(s, opc_add, rl, al, al);
761 tcg_out_opc_reg(s, opc_add, rl, al, bl);
762 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
763 rl, (rl == bl ? al : bl));
765 tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
769 static const struct {
772 } tcg_brcond_to_riscv[] = {
773 [TCG_COND_EQ] = { OPC_BEQ, false },
774 [TCG_COND_NE] = { OPC_BNE, false },
775 [TCG_COND_LT] = { OPC_BLT, false },
776 [TCG_COND_GE] = { OPC_BGE, false },
777 [TCG_COND_LE] = { OPC_BGE, true },
778 [TCG_COND_GT] = { OPC_BLT, true },
779 [TCG_COND_LTU] = { OPC_BLTU, false },
780 [TCG_COND_GEU] = { OPC_BGEU, false },
781 [TCG_COND_LEU] = { OPC_BGEU, true },
782 [TCG_COND_GTU] = { OPC_BLTU, true }
785 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
786 TCGReg arg2, TCGLabel *l)
788 RISCVInsn op = tcg_brcond_to_riscv[cond].op;
790 tcg_debug_assert(op != 0);
792 if (tcg_brcond_to_riscv[cond].swap) {
798 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
799 tcg_out_opc_branch(s, op, arg1, arg2, 0);
802 #define SETCOND_INV TCG_TARGET_NB_REGS
803 #define SETCOND_NEZ (SETCOND_INV << 1)
804 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
806 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
807 TCGReg arg1, tcg_target_long arg2, bool c2)
812 case TCG_COND_EQ: /* -> NE */
813 case TCG_COND_GE: /* -> LT */
814 case TCG_COND_GEU: /* -> LTU */
815 case TCG_COND_GT: /* -> LE */
816 case TCG_COND_GTU: /* -> LEU */
817 cond = tcg_invert_cond(cond);
818 flags ^= SETCOND_INV;
828 * If we have a constant input, the most efficient way to implement
829 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
830 * We don't need to care for this for LE because the constant input
831 * is constrained to signed 12-bit, and 0x800 is representable in the
832 * temporary register.
835 if (cond == TCG_COND_LEU) {
836 /* unsigned <= -1 is true */
838 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
845 tcg_debug_assert(arg2 <= 0x7ff);
846 if (++arg2 == 0x800) {
847 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
855 cond = tcg_swap_cond(cond); /* LE -> GE */
856 cond = tcg_invert_cond(cond); /* GE -> LT */
857 flags ^= SETCOND_INV;
866 flags |= SETCOND_NEZ;
868 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
869 } else if (arg2 == 0) {
872 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2);
878 tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2);
880 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
886 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2);
888 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
893 g_assert_not_reached();
899 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
900 TCGReg arg1, tcg_target_long arg2, bool c2)
902 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
904 if (tmpflags != ret) {
905 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
907 switch (tmpflags & SETCOND_FLAGS) {
909 /* Intermediate result is boolean: simply invert. */
910 tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1);
913 /* Intermediate result is zero/non-zero: test != 0. */
914 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
916 case SETCOND_NEZ | SETCOND_INV:
917 /* Intermediate result is zero/non-zero: test == 0. */
918 tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1);
921 g_assert_not_reached();
926 static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
927 TCGReg arg1, tcg_target_long arg2, bool c2)
932 /* For LT/GE comparison against 0, replicate the sign bit. */
933 if (c2 && arg2 == 0) {
936 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, -1);
940 tcg_out_opc_imm(s, OPC_SRAI, ret, arg1, TCG_TARGET_REG_BITS - 1);
947 tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
948 tmp = tmpflags & ~SETCOND_FLAGS;
950 /* If intermediate result is zero/non-zero: test != 0. */
951 if (tmpflags & SETCOND_NEZ) {
952 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
956 /* Produce the 0/-1 result. */
957 if (tmpflags & SETCOND_INV) {
958 tcg_out_opc_imm(s, OPC_ADDI, ret, tmp, -1);
960 tcg_out_opc_reg(s, OPC_SUB, ret, TCG_REG_ZERO, tmp);
964 static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
965 int val1, bool c_val1,
966 int val2, bool c_val2)
970 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2);
973 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne);
979 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1);
982 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne);
988 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2);
990 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2);
992 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne);
993 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2);
998 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1);
999 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne);
1000 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1);
1004 tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne);
1005 tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne);
1006 tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1);
1009 static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret,
1010 TCGReg cmp1, TCGReg cmp2,
1011 int val, bool c_val)
1016 tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv));
1017 op = tcg_brcond_to_riscv[cond].op;
1018 tcg_debug_assert(op != 0);
1020 if (tcg_brcond_to_riscv[cond].swap) {
1021 tcg_out_opc_branch(s, op, cmp2, cmp1, disp);
1023 tcg_out_opc_branch(s, op, cmp1, cmp2, disp);
1026 tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val);
1028 tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0);
1032 static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
1033 TCGReg cmp1, TCGReg cmp2,
1034 int val1, bool c_val1,
1035 int val2, bool c_val2)
1039 /* TCG optimizer reorders to prefer ret matching val2. */
1040 if (!c_val2 && ret == val2) {
1041 cond = tcg_invert_cond(cond);
1042 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1);
1046 if (!c_val1 && ret == val1) {
1047 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2);
1051 tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret);
1053 tcg_out_movi(s, TCG_TYPE_REG, tmp, val1);
1055 tcg_out_mov(s, TCG_TYPE_REG, tmp, val1);
1057 tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2);
1058 tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
1061 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
1062 TCGReg cmp1, int cmp2, bool c_cmp2,
1063 TCGReg val1, bool c_val1,
1064 TCGReg val2, bool c_val2)
1069 if (!(cpuinfo & CPUINFO_ZICOND) && (!c_cmp2 || cmp2 == 0)) {
1070 tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2,
1071 val1, c_val1, val2, c_val2);
1075 tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2);
1076 t = tmpflags & ~SETCOND_FLAGS;
1078 if (cpuinfo & CPUINFO_ZICOND) {
1079 if (tmpflags & SETCOND_INV) {
1080 tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1);
1082 tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2);
1085 cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE;
1086 tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO,
1087 val1, c_val1, val2, c_val2);
1091 static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
1092 TCGReg ret, TCGReg src1, int src2, bool c_src2)
1094 tcg_out_opc_imm(s, insn, ret, src1, 0);
1096 if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) {
1098 * The requested zero result does not match the insn, so adjust.
1099 * Note that constraints put 'ret' in a new register, so the
1100 * computation above did not clobber either 'src1' or 'src2'.
1102 tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
1103 src2, c_src2, ret, false);
1107 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
1109 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
1110 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
1113 tcg_debug_assert((offset & 1) == 0);
1114 if (offset == sextreg(offset, 0, 20)) {
1115 /* short jump: -2097150 to 2097152 */
1116 tcg_out_opc_jump(s, OPC_JAL, link, offset);
1117 } else if (offset == (int32_t)offset) {
1118 /* long jump: -2147483646 to 2147483648 */
1119 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
1120 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
1121 ret = reloc_call(s->code_ptr - 2, arg);
1122 tcg_debug_assert(ret == true);
1124 /* far jump: 64-bit */
1125 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
1126 tcg_target_long base = (tcg_target_long)arg - imm;
1127 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
1128 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
1132 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
1133 const TCGHelperInfo *info)
1135 tcg_out_call_int(s, arg, false);
1138 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1140 tcg_insn_unit insn = OPC_FENCE;
1142 if (a0 & TCG_MO_LD_LD) {
1145 if (a0 & TCG_MO_ST_LD) {
1148 if (a0 & TCG_MO_LD_ST) {
1151 if (a0 & TCG_MO_ST_ST) {
1158 * Load/store and TLB
1161 static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1163 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1164 bool ok = reloc_jimm20(s->code_ptr - 1, target);
1165 tcg_debug_assert(ok);
1168 bool tcg_target_has_memory_bswap(MemOp memop)
1173 /* We have three temps, we might as well expose them. */
1174 static const TCGLdstHelperParam ldst_helper_param = {
1175 .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 }
1178 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1180 MemOp opc = get_memop(l->oi);
1182 /* resolve label address */
1183 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1187 /* call load helper */
1188 tcg_out_ld_helper_args(s, l, &ldst_helper_param);
1189 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
1190 tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param);
1192 tcg_out_goto(s, l->raddr);
1196 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1198 MemOp opc = get_memop(l->oi);
1200 /* resolve label address */
1201 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1205 /* call store helper */
1206 tcg_out_st_helper_args(s, l, &ldst_helper_param);
1207 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1209 tcg_out_goto(s, l->raddr);
1213 /* We expect to use a 12-bit negative offset from ENV. */
1214 #define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
1217 * For system-mode, perform the TLB load and compare.
1218 * For user-mode, perform any required alignment tests.
1219 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1220 * is required and fill in @h with the host address for the fast path.
1222 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
1223 TCGReg addr_reg, MemOpIdx oi,
1226 TCGType addr_type = s->addr_type;
1227 TCGLabelQemuLdst *ldst = NULL;
1228 MemOp opc = get_memop(oi);
1232 aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1233 a_mask = (1u << aa.align) - 1;
1235 if (tcg_use_softmmu) {
1236 unsigned s_bits = opc & MO_SIZE;
1237 unsigned s_mask = (1u << s_bits) - 1;
1238 int mem_index = get_mmuidx(oi);
1239 int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1240 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1241 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1245 ldst = new_ldst_label(s);
1246 ldst->is_ld = is_ld;
1248 ldst->addrlo_reg = addr_reg;
1250 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1251 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1253 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
1254 s->page_bits - CPU_TLB_ENTRY_BITS);
1255 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1256 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1259 * For aligned accesses, we check the first byte and include the
1260 * alignment bits within the address. For unaligned access, we
1261 * check that we don't cross pages using the address of the last
1262 * byte of the access.
1264 addr_adj = addr_reg;
1265 if (a_mask < s_mask) {
1266 addr_adj = TCG_REG_TMP0;
1267 tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
1268 addr_adj, addr_reg, s_mask - a_mask);
1270 compare_mask = s->page_mask | a_mask;
1271 if (compare_mask == sextreg(compare_mask, 0, 12)) {
1272 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
1274 tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
1275 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
1278 /* Load the tlb comparator and the addend. */
1279 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1280 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1281 is_ld ? offsetof(CPUTLBEntry, addr_read)
1282 : offsetof(CPUTLBEntry, addr_write));
1283 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1284 offsetof(CPUTLBEntry, addend));
1286 /* Compare masked address with the TLB entry. */
1287 ldst->label_ptr[0] = s->code_ptr;
1288 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1290 /* TLB Hit - translate address using addend. */
1291 if (addr_type != TCG_TYPE_I32) {
1292 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
1293 } else if (cpuinfo & CPUINFO_ZBA) {
1294 tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0,
1295 addr_reg, TCG_REG_TMP2);
1297 tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
1298 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0,
1299 TCG_REG_TMP0, TCG_REG_TMP2);
1301 *pbase = TCG_REG_TMP0;
1306 ldst = new_ldst_label(s);
1307 ldst->is_ld = is_ld;
1309 ldst->addrlo_reg = addr_reg;
1311 /* We are expecting alignment max 7, so we can always use andi. */
1312 tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
1313 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
1315 ldst->label_ptr[0] = s->code_ptr;
1316 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1319 if (guest_base != 0) {
1320 base = TCG_REG_TMP0;
1321 if (addr_type != TCG_TYPE_I32) {
1322 tcg_out_opc_reg(s, OPC_ADD, base, addr_reg,
1323 TCG_GUEST_BASE_REG);
1324 } else if (cpuinfo & CPUINFO_ZBA) {
1325 tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg,
1326 TCG_GUEST_BASE_REG);
1328 tcg_out_ext32u(s, base, addr_reg);
1329 tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
1331 } else if (addr_type != TCG_TYPE_I32) {
1334 base = TCG_REG_TMP0;
1335 tcg_out_ext32u(s, base, addr_reg);
1343 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
1344 TCGReg base, MemOp opc, TCGType type)
1346 /* Byte swapping is left to middle-end expansion. */
1347 tcg_debug_assert((opc & MO_BSWAP) == 0);
1349 switch (opc & (MO_SSIZE)) {
1351 tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
1354 tcg_out_opc_imm(s, OPC_LB, val, base, 0);
1357 tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
1360 tcg_out_opc_imm(s, OPC_LH, val, base, 0);
1363 if (type == TCG_TYPE_I64) {
1364 tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
1369 tcg_out_opc_imm(s, OPC_LW, val, base, 0);
1372 tcg_out_opc_imm(s, OPC_LD, val, base, 0);
1375 g_assert_not_reached();
1379 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1380 MemOpIdx oi, TCGType data_type)
1382 TCGLabelQemuLdst *ldst;
1385 ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
1386 tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
1389 ldst->type = data_type;
1390 ldst->datalo_reg = data_reg;
1391 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1395 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
1396 TCGReg base, MemOp opc)
1398 /* Byte swapping is left to middle-end expansion. */
1399 tcg_debug_assert((opc & MO_BSWAP) == 0);
1401 switch (opc & (MO_SSIZE)) {
1403 tcg_out_opc_store(s, OPC_SB, base, val, 0);
1406 tcg_out_opc_store(s, OPC_SH, base, val, 0);
1409 tcg_out_opc_store(s, OPC_SW, base, val, 0);
1412 tcg_out_opc_store(s, OPC_SD, base, val, 0);
1415 g_assert_not_reached();
1419 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1420 MemOpIdx oi, TCGType data_type)
1422 TCGLabelQemuLdst *ldst;
1425 ldst = prepare_host_addr(s, &base, addr_reg, oi, false);
1426 tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
1429 ldst->type = data_type;
1430 ldst->datalo_reg = data_reg;
1431 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1435 static const tcg_insn_unit *tb_ret_addr;
1437 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1439 /* Reuse the zeroing that exists for goto_ptr. */
1441 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1443 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1444 tcg_out_call_int(s, tb_ret_addr, true);
1448 static void tcg_out_goto_tb(TCGContext *s, int which)
1450 /* Direct branch will be patched by tb_target_set_jmp_target. */
1451 set_jmp_insn_offset(s, which);
1452 tcg_out32(s, OPC_JAL);
1454 /* When branch is out of range, fall through to indirect. */
1455 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1456 get_jmp_target_addr(s, which));
1457 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1458 set_jmp_reset_offset(s, which);
1461 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1462 uintptr_t jmp_rx, uintptr_t jmp_rw)
1464 uintptr_t addr = tb->jmp_target_addr[n];
1465 ptrdiff_t offset = addr - jmp_rx;
1468 /* Either directly branch, or fall through to indirect branch. */
1469 if (offset == sextreg(offset, 0, 20)) {
1470 insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1474 qatomic_set((uint32_t *)jmp_rw, insn);
1475 flush_idcache_range(jmp_rx, jmp_rw, 4);
1478 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1479 const TCGArg args[TCG_MAX_OP_ARGS],
1480 const int const_args[TCG_MAX_OP_ARGS])
1482 TCGArg a0 = args[0];
1483 TCGArg a1 = args[1];
1484 TCGArg a2 = args[2];
1485 int c2 = const_args[2];
1488 case INDEX_op_goto_ptr:
1489 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
1493 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
1494 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1497 case INDEX_op_ld8u_i32:
1498 case INDEX_op_ld8u_i64:
1499 tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
1501 case INDEX_op_ld8s_i32:
1502 case INDEX_op_ld8s_i64:
1503 tcg_out_ldst(s, OPC_LB, a0, a1, a2);
1505 case INDEX_op_ld16u_i32:
1506 case INDEX_op_ld16u_i64:
1507 tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
1509 case INDEX_op_ld16s_i32:
1510 case INDEX_op_ld16s_i64:
1511 tcg_out_ldst(s, OPC_LH, a0, a1, a2);
1513 case INDEX_op_ld32u_i64:
1514 tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
1516 case INDEX_op_ld_i32:
1517 case INDEX_op_ld32s_i64:
1518 tcg_out_ldst(s, OPC_LW, a0, a1, a2);
1520 case INDEX_op_ld_i64:
1521 tcg_out_ldst(s, OPC_LD, a0, a1, a2);
1524 case INDEX_op_st8_i32:
1525 case INDEX_op_st8_i64:
1526 tcg_out_ldst(s, OPC_SB, a0, a1, a2);
1528 case INDEX_op_st16_i32:
1529 case INDEX_op_st16_i64:
1530 tcg_out_ldst(s, OPC_SH, a0, a1, a2);
1532 case INDEX_op_st_i32:
1533 case INDEX_op_st32_i64:
1534 tcg_out_ldst(s, OPC_SW, a0, a1, a2);
1536 case INDEX_op_st_i64:
1537 tcg_out_ldst(s, OPC_SD, a0, a1, a2);
1540 case INDEX_op_add_i32:
1542 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
1544 tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
1547 case INDEX_op_add_i64:
1549 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
1551 tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
1555 case INDEX_op_sub_i32:
1557 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
1559 tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
1562 case INDEX_op_sub_i64:
1564 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
1566 tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
1570 case INDEX_op_and_i32:
1571 case INDEX_op_and_i64:
1573 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
1575 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
1579 case INDEX_op_or_i32:
1580 case INDEX_op_or_i64:
1582 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
1584 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
1588 case INDEX_op_xor_i32:
1589 case INDEX_op_xor_i64:
1591 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
1593 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
1597 case INDEX_op_andc_i32:
1598 case INDEX_op_andc_i64:
1600 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2);
1602 tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
1605 case INDEX_op_orc_i32:
1606 case INDEX_op_orc_i64:
1608 tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2);
1610 tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
1613 case INDEX_op_eqv_i32:
1614 case INDEX_op_eqv_i64:
1616 tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2);
1618 tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
1622 case INDEX_op_not_i32:
1623 case INDEX_op_not_i64:
1624 tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
1627 case INDEX_op_neg_i32:
1628 tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
1630 case INDEX_op_neg_i64:
1631 tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
1634 case INDEX_op_mul_i32:
1635 tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
1637 case INDEX_op_mul_i64:
1638 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1641 case INDEX_op_div_i32:
1642 tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
1644 case INDEX_op_div_i64:
1645 tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
1648 case INDEX_op_divu_i32:
1649 tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
1651 case INDEX_op_divu_i64:
1652 tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
1655 case INDEX_op_rem_i32:
1656 tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
1658 case INDEX_op_rem_i64:
1659 tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
1662 case INDEX_op_remu_i32:
1663 tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
1665 case INDEX_op_remu_i64:
1666 tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
1669 case INDEX_op_shl_i32:
1671 tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
1673 tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
1676 case INDEX_op_shl_i64:
1678 tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
1680 tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
1684 case INDEX_op_shr_i32:
1686 tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
1688 tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
1691 case INDEX_op_shr_i64:
1693 tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
1695 tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
1699 case INDEX_op_sar_i32:
1701 tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
1703 tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
1706 case INDEX_op_sar_i64:
1708 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
1710 tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
1714 case INDEX_op_rotl_i32:
1716 tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
1718 tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2);
1721 case INDEX_op_rotl_i64:
1723 tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f);
1725 tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2);
1729 case INDEX_op_rotr_i32:
1731 tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f);
1733 tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2);
1736 case INDEX_op_rotr_i64:
1738 tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f);
1740 tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2);
1744 case INDEX_op_bswap64_i64:
1745 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1747 case INDEX_op_bswap32_i32:
1750 case INDEX_op_bswap32_i64:
1751 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1752 if (a2 & TCG_BSWAP_OZ) {
1753 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
1755 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
1758 case INDEX_op_bswap16_i64:
1759 case INDEX_op_bswap16_i32:
1760 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1761 if (a2 & TCG_BSWAP_OZ) {
1762 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
1764 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
1768 case INDEX_op_ctpop_i32:
1769 tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0);
1771 case INDEX_op_ctpop_i64:
1772 tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
1775 case INDEX_op_clz_i32:
1776 tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2);
1778 case INDEX_op_clz_i64:
1779 tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2);
1781 case INDEX_op_ctz_i32:
1782 tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
1784 case INDEX_op_ctz_i64:
1785 tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2);
1788 case INDEX_op_add2_i32:
1789 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1790 const_args[4], const_args[5], false, true);
1792 case INDEX_op_add2_i64:
1793 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1794 const_args[4], const_args[5], false, false);
1796 case INDEX_op_sub2_i32:
1797 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1798 const_args[4], const_args[5], true, true);
1800 case INDEX_op_sub2_i64:
1801 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1802 const_args[4], const_args[5], true, false);
1805 case INDEX_op_brcond_i32:
1806 case INDEX_op_brcond_i64:
1807 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1810 case INDEX_op_setcond_i32:
1811 case INDEX_op_setcond_i64:
1812 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1815 case INDEX_op_negsetcond_i32:
1816 case INDEX_op_negsetcond_i64:
1817 tcg_out_negsetcond(s, args[3], a0, a1, a2, c2);
1820 case INDEX_op_movcond_i32:
1821 case INDEX_op_movcond_i64:
1822 tcg_out_movcond(s, args[5], a0, a1, a2, c2,
1823 args[3], const_args[3], args[4], const_args[4]);
1826 case INDEX_op_qemu_ld_a32_i32:
1827 case INDEX_op_qemu_ld_a64_i32:
1828 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1830 case INDEX_op_qemu_ld_a32_i64:
1831 case INDEX_op_qemu_ld_a64_i64:
1832 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1834 case INDEX_op_qemu_st_a32_i32:
1835 case INDEX_op_qemu_st_a64_i32:
1836 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1838 case INDEX_op_qemu_st_a32_i64:
1839 case INDEX_op_qemu_st_a64_i64:
1840 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1843 case INDEX_op_extrh_i64_i32:
1844 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
1847 case INDEX_op_mulsh_i32:
1848 case INDEX_op_mulsh_i64:
1849 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
1852 case INDEX_op_muluh_i32:
1853 case INDEX_op_muluh_i64:
1854 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
1861 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1862 case INDEX_op_mov_i64:
1863 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1864 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1865 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1866 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1867 case INDEX_op_ext8s_i64:
1868 case INDEX_op_ext8u_i32:
1869 case INDEX_op_ext8u_i64:
1870 case INDEX_op_ext16s_i32:
1871 case INDEX_op_ext16s_i64:
1872 case INDEX_op_ext16u_i32:
1873 case INDEX_op_ext16u_i64:
1874 case INDEX_op_ext32s_i64:
1875 case INDEX_op_ext32u_i64:
1876 case INDEX_op_ext_i32_i64:
1877 case INDEX_op_extu_i32_i64:
1878 case INDEX_op_extrl_i64_i32:
1880 g_assert_not_reached();
1884 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1887 case INDEX_op_goto_ptr:
1890 case INDEX_op_ld8u_i32:
1891 case INDEX_op_ld8s_i32:
1892 case INDEX_op_ld16u_i32:
1893 case INDEX_op_ld16s_i32:
1894 case INDEX_op_ld_i32:
1895 case INDEX_op_not_i32:
1896 case INDEX_op_neg_i32:
1897 case INDEX_op_ld8u_i64:
1898 case INDEX_op_ld8s_i64:
1899 case INDEX_op_ld16u_i64:
1900 case INDEX_op_ld16s_i64:
1901 case INDEX_op_ld32s_i64:
1902 case INDEX_op_ld32u_i64:
1903 case INDEX_op_ld_i64:
1904 case INDEX_op_not_i64:
1905 case INDEX_op_neg_i64:
1906 case INDEX_op_ext8u_i32:
1907 case INDEX_op_ext8u_i64:
1908 case INDEX_op_ext16u_i32:
1909 case INDEX_op_ext16u_i64:
1910 case INDEX_op_ext32u_i64:
1911 case INDEX_op_extu_i32_i64:
1912 case INDEX_op_ext8s_i32:
1913 case INDEX_op_ext8s_i64:
1914 case INDEX_op_ext16s_i32:
1915 case INDEX_op_ext16s_i64:
1916 case INDEX_op_ext32s_i64:
1917 case INDEX_op_extrl_i64_i32:
1918 case INDEX_op_extrh_i64_i32:
1919 case INDEX_op_ext_i32_i64:
1920 case INDEX_op_bswap16_i32:
1921 case INDEX_op_bswap32_i32:
1922 case INDEX_op_bswap16_i64:
1923 case INDEX_op_bswap32_i64:
1924 case INDEX_op_bswap64_i64:
1925 case INDEX_op_ctpop_i32:
1926 case INDEX_op_ctpop_i64:
1927 return C_O1_I1(r, r);
1929 case INDEX_op_st8_i32:
1930 case INDEX_op_st16_i32:
1931 case INDEX_op_st_i32:
1932 case INDEX_op_st8_i64:
1933 case INDEX_op_st16_i64:
1934 case INDEX_op_st32_i64:
1935 case INDEX_op_st_i64:
1936 return C_O0_I2(rZ, r);
1938 case INDEX_op_add_i32:
1939 case INDEX_op_and_i32:
1940 case INDEX_op_or_i32:
1941 case INDEX_op_xor_i32:
1942 case INDEX_op_add_i64:
1943 case INDEX_op_and_i64:
1944 case INDEX_op_or_i64:
1945 case INDEX_op_xor_i64:
1946 case INDEX_op_setcond_i32:
1947 case INDEX_op_setcond_i64:
1948 case INDEX_op_negsetcond_i32:
1949 case INDEX_op_negsetcond_i64:
1950 return C_O1_I2(r, r, rI);
1952 case INDEX_op_andc_i32:
1953 case INDEX_op_andc_i64:
1954 case INDEX_op_orc_i32:
1955 case INDEX_op_orc_i64:
1956 case INDEX_op_eqv_i32:
1957 case INDEX_op_eqv_i64:
1958 return C_O1_I2(r, r, rJ);
1960 case INDEX_op_sub_i32:
1961 case INDEX_op_sub_i64:
1962 return C_O1_I2(r, rZ, rN);
1964 case INDEX_op_mul_i32:
1965 case INDEX_op_mulsh_i32:
1966 case INDEX_op_muluh_i32:
1967 case INDEX_op_div_i32:
1968 case INDEX_op_divu_i32:
1969 case INDEX_op_rem_i32:
1970 case INDEX_op_remu_i32:
1971 case INDEX_op_mul_i64:
1972 case INDEX_op_mulsh_i64:
1973 case INDEX_op_muluh_i64:
1974 case INDEX_op_div_i64:
1975 case INDEX_op_divu_i64:
1976 case INDEX_op_rem_i64:
1977 case INDEX_op_remu_i64:
1978 return C_O1_I2(r, rZ, rZ);
1980 case INDEX_op_shl_i32:
1981 case INDEX_op_shr_i32:
1982 case INDEX_op_sar_i32:
1983 case INDEX_op_rotl_i32:
1984 case INDEX_op_rotr_i32:
1985 case INDEX_op_shl_i64:
1986 case INDEX_op_shr_i64:
1987 case INDEX_op_sar_i64:
1988 case INDEX_op_rotl_i64:
1989 case INDEX_op_rotr_i64:
1990 return C_O1_I2(r, r, ri);
1992 case INDEX_op_clz_i32:
1993 case INDEX_op_clz_i64:
1994 case INDEX_op_ctz_i32:
1995 case INDEX_op_ctz_i64:
1996 return C_N1_I2(r, r, rM);
1998 case INDEX_op_brcond_i32:
1999 case INDEX_op_brcond_i64:
2000 return C_O0_I2(rZ, rZ);
2002 case INDEX_op_movcond_i32:
2003 case INDEX_op_movcond_i64:
2004 return C_O1_I4(r, r, rI, rM, rM);
2006 case INDEX_op_add2_i32:
2007 case INDEX_op_add2_i64:
2008 case INDEX_op_sub2_i32:
2009 case INDEX_op_sub2_i64:
2010 return C_O2_I4(r, r, rZ, rZ, rM, rM);
2012 case INDEX_op_qemu_ld_a32_i32:
2013 case INDEX_op_qemu_ld_a64_i32:
2014 case INDEX_op_qemu_ld_a32_i64:
2015 case INDEX_op_qemu_ld_a64_i64:
2016 return C_O1_I1(r, r);
2017 case INDEX_op_qemu_st_a32_i32:
2018 case INDEX_op_qemu_st_a64_i32:
2019 case INDEX_op_qemu_st_a32_i64:
2020 case INDEX_op_qemu_st_a64_i64:
2021 return C_O0_I2(rZ, r);
2024 g_assert_not_reached();
2028 static const int tcg_target_callee_save_regs[] = {
2029 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2041 TCG_REG_RA, /* should be last for ABI compliance */
2044 /* Stack frame parameters. */
2045 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2046 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2047 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2048 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2049 + TCG_TARGET_STACK_ALIGN - 1) \
2050 & -TCG_TARGET_STACK_ALIGN)
2051 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2053 /* We're expecting to be able to use an immediate for frame allocation. */
2054 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2056 /* Generate global QEMU prologue and epilogue code */
2057 static void tcg_target_qemu_prologue(TCGContext *s)
2061 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2064 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2065 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2066 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2067 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2070 if (!tcg_use_softmmu && guest_base) {
2071 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2072 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2075 /* Call generated code */
2076 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2077 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2079 /* Return path for goto_ptr. Set return value to 0 */
2080 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2081 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2084 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2085 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2086 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2087 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2090 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2091 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
2094 static void tcg_out_tb_start(TCGContext *s)
2099 static void tcg_target_init(TCGContext *s)
2101 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
2102 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
2104 tcg_target_call_clobber_regs = -1u;
2105 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2106 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2107 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2108 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2109 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2110 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2111 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2112 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2113 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2114 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2115 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
2116 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
2118 s->reserved_regs = 0;
2119 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2120 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2121 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2122 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2123 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2124 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
2125 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2130 uint8_t fde_def_cfa[4];
2131 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2134 #define ELF_HOST_MACHINE EM_RISCV
2136 static const DebugFrame debug_frame = {
2137 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2140 .h.cie.code_align = 1,
2141 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2142 .h.cie.return_column = TCG_REG_RA,
2144 /* Total FDE size does not include the "len" member. */
2145 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2148 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2149 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2153 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */
2154 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */
2155 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */
2156 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */
2157 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */
2158 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */
2159 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */
2160 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */
2161 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */
2162 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */
2163 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */
2164 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
2168 void tcg_register_jit(const void *buf, size_t buf_size)
2170 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));