2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
6 * Based on tcg/riscv/tcg-target.c.inc
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "../tcg-ldst.c.inc"
33 #include <asm/hwcap.h>
35 #ifdef CONFIG_DEBUG_TCG
36 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
58 "r21", /* reserved in the LP64* ABI, hence no ABI name */
104 static const int tcg_target_reg_alloc_order[] = {
105 /* Registers preserved across calls */
106 /* TCG_REG_S0 reserved for TCG_AREG0 */
117 /* Registers (potentially) clobbered across calls */
128 /* Argument registers, opposite order of allocation. */
138 /* Vector registers */
139 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
140 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
141 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
142 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
143 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
144 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
145 /* V24 - V31 are caller-saved, and skipped. */
148 static const int tcg_target_call_iarg_regs[] = {
159 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
161 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
162 tcg_debug_assert(slot >= 0 && slot <= 1);
163 return TCG_REG_A0 + slot;
166 #define TCG_GUEST_BASE_REG TCG_REG_S1
168 #define TCG_CT_CONST_ZERO 0x100
169 #define TCG_CT_CONST_S12 0x200
170 #define TCG_CT_CONST_S32 0x400
171 #define TCG_CT_CONST_U12 0x800
172 #define TCG_CT_CONST_C12 0x1000
173 #define TCG_CT_CONST_WSZ 0x2000
174 #define TCG_CT_CONST_VCMP 0x4000
175 #define TCG_CT_CONST_VADD 0x8000
177 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
178 #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
180 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
182 return sextract64(val, pos, len);
185 /* test if a constant matches the constraint */
186 static bool tcg_target_const_match(int64_t val, int ct,
187 TCGType type, TCGCond cond, int vece)
189 if (ct & TCG_CT_CONST) {
192 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
195 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
198 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
201 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
204 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
207 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
210 int64_t vec_val = sextract64(val, 0, 8 << vece);
211 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
214 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
225 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
226 * complicated; a whopping stack machine is needed to stuff the fields, at
227 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
230 * Hence, define our own simpler relocation types. Numbers are chosen as to
231 * not collide with potential future additions to the true ELF relocation
235 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
236 #define R_LOONGARCH_BR_SK16 256
237 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
238 #define R_LOONGARCH_BR_SD10K16 257
240 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
242 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
243 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
245 tcg_debug_assert((offset & 3) == 0);
247 if (offset == sextreg(offset, 0, 16)) {
248 *src_rw = deposit64(*src_rw, 10, 16, offset);
255 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
256 const tcg_insn_unit *target)
258 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
259 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
261 tcg_debug_assert((offset & 3) == 0);
263 if (offset == sextreg(offset, 0, 26)) {
264 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
265 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
272 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
273 intptr_t value, intptr_t addend)
275 tcg_debug_assert(addend == 0);
277 case R_LOONGARCH_BR_SK16:
278 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
279 case R_LOONGARCH_BR_SD10K16:
280 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
282 g_assert_not_reached();
286 #include "tcg-insn-defs.c.inc"
292 static void tcg_out_mb(TCGContext *s, TCGArg a0)
294 /* Baseline LoongArch only has the full barrier, unfortunately. */
295 tcg_out_opc_dbar(s, 0);
298 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
306 if (ret < TCG_REG_V0) {
307 if (arg < TCG_REG_V0) {
309 * Conventional register-register move used in LoongArch is
310 * `or dst, src, zero`.
312 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
314 tcg_out_opc_movfr2gr_d(s, ret, arg);
317 if (arg < TCG_REG_V0) {
318 tcg_out_opc_movgr2fr_d(s, ret, arg);
320 tcg_out_opc_fmov_d(s, ret, arg);
326 tcg_out_opc_vori_b(s, ret, arg, 0);
329 tcg_out_opc_xvori_b(s, ret, arg, 0);
332 g_assert_not_reached();
337 /* Loads a 32-bit immediate into rd, sign-extended. */
338 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
340 tcg_target_long lo = sextreg(val, 0, 12);
341 tcg_target_long hi12 = sextreg(val, 12, 20);
343 /* Single-instruction cases. */
345 /* val fits in uimm12: ori rd, zero, val */
346 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
349 if (hi12 == sextreg(lo, 12, 20)) {
350 /* val fits in simm12: addi.w rd, zero, val */
351 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
355 /* High bits must be set; load with lu12i.w + optional ori. */
356 tcg_out_opc_lu12i_w(s, rd, hi12);
358 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
362 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
366 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
367 * with dedicated instructions for filling the respective bitfields
371 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
372 * +-----------------------+---------------------------------------+...
374 * +-----------------------+---------------------------------------+...
376 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
377 * ...+-------------------------------------+-------------------------+
379 * ...+-------------------------------------+-------------------------+
381 * Check if val belong to one of the several fast cases, before falling
382 * back to the slow path.
385 intptr_t src_rx, pc_offset;
386 tcg_target_long hi12, hi32, hi52;
388 /* Value fits in signed i32. */
389 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
390 tcg_out_movi_i32(s, rd, val);
394 /* PC-relative cases. */
395 src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
396 if ((val & 3) == 0) {
397 pc_offset = val - src_rx;
398 if (pc_offset == sextreg(pc_offset, 0, 22)) {
399 /* Single pcaddu2i. */
400 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
405 pc_offset = (val >> 12) - (src_rx >> 12);
406 if (pc_offset == sextreg(pc_offset, 0, 20)) {
407 /* Load with pcalau12i + ori. */
408 tcg_target_long val_lo = val & 0xfff;
409 tcg_out_opc_pcalau12i(s, rd, pc_offset);
411 tcg_out_opc_ori(s, rd, rd, val_lo);
416 hi12 = sextreg(val, 12, 20);
417 hi32 = sextreg(val, 32, 20);
418 hi52 = sextreg(val, 52, 12);
420 /* Single cu52i.d case. */
421 if ((hi52 != 0) && (ctz64(val) >= 52)) {
422 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
426 /* Slow path. Initialize the low 32 bits, then concat high bits. */
427 tcg_out_movi_i32(s, rd, val);
429 /* Load hi32 and hi52 explicitly when they are unexpected values. */
430 if (hi32 != sextreg(hi12, 20, 20)) {
431 tcg_out_opc_cu32i_d(s, rd, hi32);
434 if (hi52 != sextreg(hi32, 20, 12)) {
435 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
439 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
440 TCGReg rs, tcg_target_long imm)
442 tcg_target_long lo12 = sextreg(imm, 0, 12);
443 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
446 * Note that there's a hole in between hi16 and lo12:
449 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
450 * ...+-------------------------------+-------+-----------------------+
452 * ...+-------------------------------+-------+-----------------------+
454 * For bits within that hole, it's more efficient to use LU12I and ADD.
456 if (imm == (hi16 << 16) + lo12) {
458 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
461 if (type == TCG_TYPE_I32) {
462 tcg_out_opc_addi_w(s, rd, rs, lo12);
464 tcg_out_opc_addi_d(s, rd, rs, lo12);
466 tcg_out_mov(s, type, rd, rs);
469 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
470 if (type == TCG_TYPE_I32) {
471 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
473 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
478 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
483 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
486 /* This function is only used for passing structs by reference. */
487 g_assert_not_reached();
490 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
492 tcg_out_opc_andi(s, ret, arg, 0xff);
495 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
497 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
500 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
502 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
505 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
507 tcg_out_opc_sext_b(s, ret, arg);
510 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
512 tcg_out_opc_sext_h(s, ret, arg);
515 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
517 tcg_out_opc_addi_w(s, ret, arg, 0);
520 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
523 tcg_out_ext32s(s, ret, arg);
527 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
529 tcg_out_ext32u(s, ret, arg);
532 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
534 tcg_out_ext32s(s, ret, arg);
537 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
538 TCGReg a0, TCGReg a1, TCGReg a2,
539 bool c2, bool is_32bit)
543 * Fast path: semantics already satisfied due to constraint and
544 * insn behavior, single instruction is enough.
546 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
547 /* all clz/ctz insns belong to DJ-format */
548 tcg_out32(s, encode_dj_insn(opc, a0, a1));
552 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
553 /* a0 = a1 ? REG_TMP0 : a2 */
554 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
555 tcg_out_opc_masknez(s, a0, a2, a1);
556 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
559 #define SETCOND_INV TCG_TARGET_NB_REGS
560 #define SETCOND_NEZ (SETCOND_INV << 1)
561 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
563 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
564 TCGReg arg1, tcg_target_long arg2, bool c2)
569 case TCG_COND_EQ: /* -> NE */
570 case TCG_COND_GE: /* -> LT */
571 case TCG_COND_GEU: /* -> LTU */
572 case TCG_COND_GT: /* -> LE */
573 case TCG_COND_GTU: /* -> LEU */
574 cond = tcg_invert_cond(cond);
575 flags ^= SETCOND_INV;
585 * If we have a constant input, the most efficient way to implement
586 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
587 * We don't need to care for this for LE because the constant input
588 * is still constrained to int32_t, and INT32_MAX+1 is representable
589 * in the 64-bit temporary register.
592 if (cond == TCG_COND_LEU) {
593 /* unsigned <= -1 is true */
595 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
607 cond = tcg_swap_cond(cond); /* LE -> GE */
608 cond = tcg_invert_cond(cond); /* GE -> LT */
609 flags ^= SETCOND_INV;
618 flags |= SETCOND_NEZ;
620 tcg_out_opc_xor(s, ret, arg1, arg2);
621 } else if (arg2 == 0) {
623 } else if (arg2 >= 0 && arg2 <= 0xfff) {
624 tcg_out_opc_xori(s, ret, arg1, arg2);
626 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
633 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
634 if (cond == TCG_COND_LT) {
635 tcg_out_opc_slti(s, ret, arg1, arg2);
637 tcg_out_opc_sltui(s, ret, arg1, arg2);
641 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
644 if (cond == TCG_COND_LT) {
645 tcg_out_opc_slt(s, ret, arg1, arg2);
647 tcg_out_opc_sltu(s, ret, arg1, arg2);
652 g_assert_not_reached();
658 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
659 TCGReg arg1, tcg_target_long arg2, bool c2)
661 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
663 if (tmpflags != ret) {
664 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
666 switch (tmpflags & SETCOND_FLAGS) {
668 /* Intermediate result is boolean: simply invert. */
669 tcg_out_opc_xori(s, ret, tmp, 1);
672 /* Intermediate result is zero/non-zero: test != 0. */
673 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
675 case SETCOND_NEZ | SETCOND_INV:
676 /* Intermediate result is zero/non-zero: test == 0. */
677 tcg_out_opc_sltui(s, ret, tmp, 1);
680 g_assert_not_reached();
685 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
686 TCGReg c1, tcg_target_long c2, bool const2,
687 TCGReg v1, TCGReg v2)
689 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
692 /* Standardize the test below to t != 0. */
693 if (tmpflags & SETCOND_INV) {
694 t = v1, v1 = v2, v2 = t;
697 t = tmpflags & ~SETCOND_FLAGS;
698 if (v1 == TCG_REG_ZERO) {
699 tcg_out_opc_masknez(s, ret, v2, t);
700 } else if (v2 == TCG_REG_ZERO) {
701 tcg_out_opc_maskeqz(s, ret, v1, t);
703 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
704 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
705 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
713 static const struct {
716 } tcg_brcond_to_loongarch[] = {
717 [TCG_COND_EQ] = { OPC_BEQ, false },
718 [TCG_COND_NE] = { OPC_BNE, false },
719 [TCG_COND_LT] = { OPC_BGT, true },
720 [TCG_COND_GE] = { OPC_BLE, true },
721 [TCG_COND_LE] = { OPC_BLE, false },
722 [TCG_COND_GT] = { OPC_BGT, false },
723 [TCG_COND_LTU] = { OPC_BGTU, true },
724 [TCG_COND_GEU] = { OPC_BLEU, true },
725 [TCG_COND_LEU] = { OPC_BLEU, false },
726 [TCG_COND_GTU] = { OPC_BGTU, false }
729 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
730 TCGReg arg2, TCGLabel *l)
732 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
734 tcg_debug_assert(op != 0);
736 if (tcg_brcond_to_loongarch[cond].swap) {
742 /* all conditional branch insns belong to DJSk16-format */
743 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
744 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
747 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
749 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
750 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
752 tcg_debug_assert((offset & 3) == 0);
753 if (offset == sextreg(offset, 0, 28)) {
754 /* short jump: +/- 256MiB */
756 tcg_out_opc_b(s, offset >> 2);
758 tcg_out_opc_bl(s, offset >> 2);
760 } else if (offset == sextreg(offset, 0, 38)) {
761 /* long jump: +/- 256GiB */
762 tcg_target_long lo = sextreg(offset, 0, 18);
763 tcg_target_long hi = offset - lo;
764 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
765 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
767 /* far jump: 64-bit */
768 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
769 tcg_target_long hi = (tcg_target_long)arg - lo;
770 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
771 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
775 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
776 const TCGHelperInfo *info)
778 tcg_out_call_int(s, arg, false);
785 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
786 TCGReg addr, intptr_t offset)
788 intptr_t imm12 = sextreg(offset, 0, 12);
790 if (offset != imm12) {
791 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
793 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
794 imm12 = sextreg(diff, 0, 12);
795 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
797 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
798 if (addr != TCG_REG_ZERO) {
799 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
817 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
823 tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12));
826 g_assert_not_reached();
830 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
831 TCGReg base, intptr_t offset)
835 if (dest < TCG_REG_V0) {
836 tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
838 tcg_out_ldst(s, OPC_FLD_S, dest, base, offset);
843 if (dest < TCG_REG_V0) {
844 tcg_out_ldst(s, OPC_LD_D, dest, base, offset);
846 tcg_out_ldst(s, OPC_FLD_D, dest, base, offset);
850 if (-0x800 <= offset && offset <= 0x7ff) {
851 tcg_out_opc_vld(s, dest, base, offset);
853 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
854 tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0);
858 if (-0x800 <= offset && offset <= 0x7ff) {
859 tcg_out_opc_xvld(s, dest, base, offset);
861 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
862 tcg_out_opc_xvldx(s, dest, base, TCG_REG_TMP0);
866 g_assert_not_reached();
870 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
871 TCGReg base, intptr_t offset)
875 if (src < TCG_REG_V0) {
876 tcg_out_ldst(s, OPC_ST_W, src, base, offset);
878 tcg_out_ldst(s, OPC_FST_S, src, base, offset);
883 if (src < TCG_REG_V0) {
884 tcg_out_ldst(s, OPC_ST_D, src, base, offset);
886 tcg_out_ldst(s, OPC_FST_D, src, base, offset);
890 if (-0x800 <= offset && offset <= 0x7ff) {
891 tcg_out_opc_vst(s, src, base, offset);
893 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
894 tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0);
898 if (-0x800 <= offset && offset <= 0x7ff) {
899 tcg_out_opc_xvst(s, src, base, offset);
901 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
902 tcg_out_opc_xvstx(s, src, base, TCG_REG_TMP0);
906 g_assert_not_reached();
910 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
911 TCGReg base, intptr_t ofs)
914 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
921 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
924 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
927 return reloc_br_sd10k16(s->code_ptr - 1, target);
930 static const TCGLdstHelperParam ldst_helper_param = {
931 .ntmp = 1, .tmp = { TCG_REG_TMP0 }
934 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
936 MemOp opc = get_memop(l->oi);
938 /* resolve label address */
939 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
943 tcg_out_ld_helper_args(s, l, &ldst_helper_param);
944 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
945 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
946 return tcg_out_goto(s, l->raddr);
949 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
951 MemOp opc = get_memop(l->oi);
953 /* resolve label address */
954 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
958 tcg_out_st_helper_args(s, l, &ldst_helper_param);
959 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
960 return tcg_out_goto(s, l->raddr);
969 bool tcg_target_has_memory_bswap(MemOp memop)
974 /* We expect to use a 12-bit negative offset from ENV. */
975 #define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
978 * For system-mode, perform the TLB load and compare.
979 * For user-mode, perform any required alignment tests.
980 * In both cases, return a TCGLabelQemuLdst structure if the slow path
981 * is required and fill in @h with the host address for the fast path.
983 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
984 TCGReg addr_reg, MemOpIdx oi,
987 TCGType addr_type = s->addr_type;
988 TCGLabelQemuLdst *ldst = NULL;
989 MemOp opc = get_memop(oi);
992 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
993 a_bits = h->aa.align;
995 if (tcg_use_softmmu) {
996 unsigned s_bits = opc & MO_SIZE;
997 int mem_index = get_mmuidx(oi);
998 int fast_ofs = tlb_mask_table_ofs(s, mem_index);
999 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1000 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1002 ldst = new_ldst_label(s);
1003 ldst->is_ld = is_ld;
1005 ldst->addrlo_reg = addr_reg;
1007 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1008 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1010 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
1011 s->page_bits - CPU_TLB_ENTRY_BITS);
1012 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1013 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1015 /* Load the tlb comparator and the addend. */
1016 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1017 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1018 is_ld ? offsetof(CPUTLBEntry, addr_read)
1019 : offsetof(CPUTLBEntry, addr_write));
1020 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1021 offsetof(CPUTLBEntry, addend));
1024 * For aligned accesses, we check the first byte and include the
1025 * alignment bits within the address. For unaligned access, we
1026 * check that we don't cross pages using the address of the last
1027 * byte of the access.
1029 if (a_bits < s_bits) {
1030 unsigned a_mask = (1u << a_bits) - 1;
1031 unsigned s_mask = (1u << s_bits) - 1;
1032 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
1034 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
1036 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
1037 a_bits, s->page_bits - 1);
1039 /* Compare masked address with the TLB entry. */
1040 ldst->label_ptr[0] = s->code_ptr;
1041 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1043 h->index = TCG_REG_TMP2;
1046 ldst = new_ldst_label(s);
1048 ldst->is_ld = is_ld;
1050 ldst->addrlo_reg = addr_reg;
1053 * Without micro-architecture details, we don't know which of
1054 * bstrpick or andi is faster, so use bstrpick as it's not
1055 * constrained by imm field width. Not to say alignments >= 2^12
1056 * are going to happen any time soon.
1058 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
1060 ldst->label_ptr[0] = s->code_ptr;
1061 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1064 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1067 if (addr_type == TCG_TYPE_I32) {
1068 h->base = TCG_REG_TMP0;
1069 tcg_out_ext32u(s, h->base, addr_reg);
1077 static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
1078 TCGReg rd, HostAddress h)
1080 /* Byte swapping is left to middle-end expansion. */
1081 tcg_debug_assert((opc & MO_BSWAP) == 0);
1083 switch (opc & MO_SSIZE) {
1085 tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
1088 tcg_out_opc_ldx_b(s, rd, h.base, h.index);
1091 tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
1094 tcg_out_opc_ldx_h(s, rd, h.base, h.index);
1097 if (type == TCG_TYPE_I64) {
1098 tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
1103 tcg_out_opc_ldx_w(s, rd, h.base, h.index);
1106 tcg_out_opc_ldx_d(s, rd, h.base, h.index);
1109 g_assert_not_reached();
1113 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1114 MemOpIdx oi, TCGType data_type)
1116 TCGLabelQemuLdst *ldst;
1119 ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1120 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
1123 ldst->type = data_type;
1124 ldst->datalo_reg = data_reg;
1125 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1129 static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
1130 TCGReg rd, HostAddress h)
1132 /* Byte swapping is left to middle-end expansion. */
1133 tcg_debug_assert((opc & MO_BSWAP) == 0);
1135 switch (opc & MO_SIZE) {
1137 tcg_out_opc_stx_b(s, rd, h.base, h.index);
1140 tcg_out_opc_stx_h(s, rd, h.base, h.index);
1143 tcg_out_opc_stx_w(s, rd, h.base, h.index);
1146 tcg_out_opc_stx_d(s, rd, h.base, h.index);
1149 g_assert_not_reached();
1153 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1154 MemOpIdx oi, TCGType data_type)
1156 TCGLabelQemuLdst *ldst;
1159 ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1160 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1163 ldst->type = data_type;
1164 ldst->datalo_reg = data_reg;
1165 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1169 static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
1170 TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1172 TCGLabelQemuLdst *ldst;
1175 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1177 if (h.aa.atom == MO_128) {
1179 * Use VLDX/VSTX when 128-bit atomicity is required.
1180 * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
1183 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
1184 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
1185 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
1187 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
1188 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
1189 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
1192 /* Otherwise use a pair of LD/ST. */
1193 TCGReg base = h.base;
1194 if (h.index != TCG_REG_ZERO) {
1195 base = TCG_REG_TMP0;
1196 tcg_out_opc_add_d(s, base, h.base, h.index);
1199 tcg_debug_assert(base != data_lo);
1200 tcg_out_opc_ld_d(s, data_lo, base, 0);
1201 tcg_out_opc_ld_d(s, data_hi, base, 8);
1203 tcg_out_opc_st_d(s, data_lo, base, 0);
1204 tcg_out_opc_st_d(s, data_hi, base, 8);
1209 ldst->type = TCG_TYPE_I128;
1210 ldst->datalo_reg = data_lo;
1211 ldst->datahi_reg = data_hi;
1212 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1220 static const tcg_insn_unit *tb_ret_addr;
1222 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1224 /* Reuse the zeroing that exists for goto_ptr. */
1226 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1228 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1229 tcg_out_call_int(s, tb_ret_addr, true);
1233 static void tcg_out_goto_tb(TCGContext *s, int which)
1236 * Direct branch, or load indirect address, to be patched
1237 * by tb_target_set_jmp_target. Check indirect load offset
1238 * in range early, regardless of direct branch distance,
1239 * via assert within tcg_out_opc_pcaddu2i.
1241 uintptr_t i_addr = get_jmp_target_addr(s, which);
1242 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1244 set_jmp_insn_offset(s, which);
1245 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1247 /* Finish the load and indirect branch. */
1248 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1249 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1250 set_jmp_reset_offset(s, which);
1253 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1254 uintptr_t jmp_rx, uintptr_t jmp_rw)
1256 uintptr_t d_addr = tb->jmp_target_addr[n];
1257 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1260 /* Either directly branch, or load slot address for indirect branch. */
1261 if (d_disp == sextreg(d_disp, 0, 26)) {
1262 insn = encode_sd10k16_insn(OPC_B, d_disp);
1264 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1265 intptr_t i_disp = i_addr - jmp_rx;
1266 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1269 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1270 flush_idcache_range(jmp_rx, jmp_rw, 4);
1273 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1274 const TCGArg args[TCG_MAX_OP_ARGS],
1275 const int const_args[TCG_MAX_OP_ARGS])
1277 TCGArg a0 = args[0];
1278 TCGArg a1 = args[1];
1279 TCGArg a2 = args[2];
1280 TCGArg a3 = args[3];
1281 int c2 = const_args[2];
1288 case INDEX_op_goto_ptr:
1289 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1293 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1295 tcg_out_opc_b(s, 0);
1298 case INDEX_op_brcond_i32:
1299 case INDEX_op_brcond_i64:
1300 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1303 case INDEX_op_extrh_i64_i32:
1304 tcg_out_opc_srai_d(s, a0, a1, 32);
1307 case INDEX_op_not_i32:
1308 case INDEX_op_not_i64:
1309 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1312 case INDEX_op_nor_i32:
1313 case INDEX_op_nor_i64:
1315 tcg_out_opc_ori(s, a0, a1, a2);
1316 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1318 tcg_out_opc_nor(s, a0, a1, a2);
1322 case INDEX_op_andc_i32:
1323 case INDEX_op_andc_i64:
1325 /* guaranteed to fit due to constraint */
1326 tcg_out_opc_andi(s, a0, a1, ~a2);
1328 tcg_out_opc_andn(s, a0, a1, a2);
1332 case INDEX_op_orc_i32:
1333 case INDEX_op_orc_i64:
1335 /* guaranteed to fit due to constraint */
1336 tcg_out_opc_ori(s, a0, a1, ~a2);
1338 tcg_out_opc_orn(s, a0, a1, a2);
1342 case INDEX_op_and_i32:
1343 case INDEX_op_and_i64:
1345 tcg_out_opc_andi(s, a0, a1, a2);
1347 tcg_out_opc_and(s, a0, a1, a2);
1351 case INDEX_op_or_i32:
1352 case INDEX_op_or_i64:
1354 tcg_out_opc_ori(s, a0, a1, a2);
1356 tcg_out_opc_or(s, a0, a1, a2);
1360 case INDEX_op_xor_i32:
1361 case INDEX_op_xor_i64:
1363 tcg_out_opc_xori(s, a0, a1, a2);
1365 tcg_out_opc_xor(s, a0, a1, a2);
1369 case INDEX_op_extract_i32:
1370 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1372 case INDEX_op_extract_i64:
1373 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1376 case INDEX_op_deposit_i32:
1377 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1379 case INDEX_op_deposit_i64:
1380 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1383 case INDEX_op_bswap16_i32:
1384 case INDEX_op_bswap16_i64:
1385 tcg_out_opc_revb_2h(s, a0, a1);
1386 if (a2 & TCG_BSWAP_OS) {
1387 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1388 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1389 tcg_out_ext16u(s, a0, a0);
1393 case INDEX_op_bswap32_i32:
1394 /* All 32-bit values are computed sign-extended in the register. */
1397 case INDEX_op_bswap32_i64:
1398 tcg_out_opc_revb_2w(s, a0, a1);
1399 if (a2 & TCG_BSWAP_OS) {
1400 tcg_out_ext32s(s, a0, a0);
1401 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1402 tcg_out_ext32u(s, a0, a0);
1406 case INDEX_op_bswap64_i64:
1407 tcg_out_opc_revb_d(s, a0, a1);
1410 case INDEX_op_clz_i32:
1411 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1413 case INDEX_op_clz_i64:
1414 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1417 case INDEX_op_ctz_i32:
1418 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1420 case INDEX_op_ctz_i64:
1421 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1424 case INDEX_op_shl_i32:
1426 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1428 tcg_out_opc_sll_w(s, a0, a1, a2);
1431 case INDEX_op_shl_i64:
1433 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1435 tcg_out_opc_sll_d(s, a0, a1, a2);
1439 case INDEX_op_shr_i32:
1441 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1443 tcg_out_opc_srl_w(s, a0, a1, a2);
1446 case INDEX_op_shr_i64:
1448 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1450 tcg_out_opc_srl_d(s, a0, a1, a2);
1454 case INDEX_op_sar_i32:
1456 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1458 tcg_out_opc_sra_w(s, a0, a1, a2);
1461 case INDEX_op_sar_i64:
1463 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1465 tcg_out_opc_sra_d(s, a0, a1, a2);
1469 case INDEX_op_rotl_i32:
1470 /* transform into equivalent rotr/rotri */
1472 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1474 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1475 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1478 case INDEX_op_rotl_i64:
1479 /* transform into equivalent rotr/rotri */
1481 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1483 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1484 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1488 case INDEX_op_rotr_i32:
1490 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1492 tcg_out_opc_rotr_w(s, a0, a1, a2);
1495 case INDEX_op_rotr_i64:
1497 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1499 tcg_out_opc_rotr_d(s, a0, a1, a2);
1503 case INDEX_op_add_i32:
1505 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1507 tcg_out_opc_add_w(s, a0, a1, a2);
1510 case INDEX_op_add_i64:
1512 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1514 tcg_out_opc_add_d(s, a0, a1, a2);
1518 case INDEX_op_sub_i32:
1520 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1522 tcg_out_opc_sub_w(s, a0, a1, a2);
1525 case INDEX_op_sub_i64:
1527 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1529 tcg_out_opc_sub_d(s, a0, a1, a2);
1533 case INDEX_op_neg_i32:
1534 tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
1536 case INDEX_op_neg_i64:
1537 tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1);
1540 case INDEX_op_mul_i32:
1541 tcg_out_opc_mul_w(s, a0, a1, a2);
1543 case INDEX_op_mul_i64:
1544 tcg_out_opc_mul_d(s, a0, a1, a2);
1547 case INDEX_op_mulsh_i32:
1548 tcg_out_opc_mulh_w(s, a0, a1, a2);
1550 case INDEX_op_mulsh_i64:
1551 tcg_out_opc_mulh_d(s, a0, a1, a2);
1554 case INDEX_op_muluh_i32:
1555 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1557 case INDEX_op_muluh_i64:
1558 tcg_out_opc_mulh_du(s, a0, a1, a2);
1561 case INDEX_op_div_i32:
1562 tcg_out_opc_div_w(s, a0, a1, a2);
1564 case INDEX_op_div_i64:
1565 tcg_out_opc_div_d(s, a0, a1, a2);
1568 case INDEX_op_divu_i32:
1569 tcg_out_opc_div_wu(s, a0, a1, a2);
1571 case INDEX_op_divu_i64:
1572 tcg_out_opc_div_du(s, a0, a1, a2);
1575 case INDEX_op_rem_i32:
1576 tcg_out_opc_mod_w(s, a0, a1, a2);
1578 case INDEX_op_rem_i64:
1579 tcg_out_opc_mod_d(s, a0, a1, a2);
1582 case INDEX_op_remu_i32:
1583 tcg_out_opc_mod_wu(s, a0, a1, a2);
1585 case INDEX_op_remu_i64:
1586 tcg_out_opc_mod_du(s, a0, a1, a2);
1589 case INDEX_op_setcond_i32:
1590 case INDEX_op_setcond_i64:
1591 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1594 case INDEX_op_movcond_i32:
1595 case INDEX_op_movcond_i64:
1596 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1599 case INDEX_op_ld8s_i32:
1600 case INDEX_op_ld8s_i64:
1601 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1603 case INDEX_op_ld8u_i32:
1604 case INDEX_op_ld8u_i64:
1605 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1607 case INDEX_op_ld16s_i32:
1608 case INDEX_op_ld16s_i64:
1609 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1611 case INDEX_op_ld16u_i32:
1612 case INDEX_op_ld16u_i64:
1613 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1615 case INDEX_op_ld_i32:
1616 case INDEX_op_ld32s_i64:
1617 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1619 case INDEX_op_ld32u_i64:
1620 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1622 case INDEX_op_ld_i64:
1623 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1626 case INDEX_op_st8_i32:
1627 case INDEX_op_st8_i64:
1628 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1630 case INDEX_op_st16_i32:
1631 case INDEX_op_st16_i64:
1632 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1634 case INDEX_op_st_i32:
1635 case INDEX_op_st32_i64:
1636 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1638 case INDEX_op_st_i64:
1639 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1642 case INDEX_op_qemu_ld_a32_i32:
1643 case INDEX_op_qemu_ld_a64_i32:
1644 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1646 case INDEX_op_qemu_ld_a32_i64:
1647 case INDEX_op_qemu_ld_a64_i64:
1648 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1650 case INDEX_op_qemu_ld_a32_i128:
1651 case INDEX_op_qemu_ld_a64_i128:
1652 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
1654 case INDEX_op_qemu_st_a32_i32:
1655 case INDEX_op_qemu_st_a64_i32:
1656 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1658 case INDEX_op_qemu_st_a32_i64:
1659 case INDEX_op_qemu_st_a64_i64:
1660 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1662 case INDEX_op_qemu_st_a32_i128:
1663 case INDEX_op_qemu_st_a64_i128:
1664 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
1667 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1668 case INDEX_op_mov_i64:
1669 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1670 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1671 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1672 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1673 case INDEX_op_ext8s_i64:
1674 case INDEX_op_ext8u_i32:
1675 case INDEX_op_ext8u_i64:
1676 case INDEX_op_ext16s_i32:
1677 case INDEX_op_ext16s_i64:
1678 case INDEX_op_ext16u_i32:
1679 case INDEX_op_ext16u_i64:
1680 case INDEX_op_ext32s_i64:
1681 case INDEX_op_ext32u_i64:
1682 case INDEX_op_ext_i32_i64:
1683 case INDEX_op_extu_i32_i64:
1684 case INDEX_op_extrl_i64_i32:
1686 g_assert_not_reached();
1690 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1691 TCGReg rd, TCGReg rs)
1693 static const LoongArchInsn repl_insn[2][4] = {
1694 { OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H,
1695 OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D },
1696 { OPC_XVREPLGR2VR_B, OPC_XVREPLGR2VR_H,
1697 OPC_XVREPLGR2VR_W, OPC_XVREPLGR2VR_D },
1699 bool lasx = type == TCG_TYPE_V256;
1701 tcg_debug_assert(vece <= MO_64);
1702 tcg_out32(s, encode_vdj_insn(repl_insn[lasx][vece], rd, rs));
1706 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1707 TCGReg r, TCGReg base, intptr_t offset)
1709 bool lasx = type == TCG_TYPE_V256;
1711 /* Handle imm overflow and division (vldrepl.d imm is divided by 8). */
1712 if (offset < -0x800 || offset > 0x7ff ||
1713 (offset & ((1 << vece) - 1)) != 0) {
1714 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
1715 base = TCG_REG_TMP0;
1723 tcg_out_opc_xvldrepl_b(s, r, base, offset);
1725 tcg_out_opc_vldrepl_b(s, r, base, offset);
1730 tcg_out_opc_xvldrepl_h(s, r, base, offset);
1732 tcg_out_opc_vldrepl_h(s, r, base, offset);
1737 tcg_out_opc_xvldrepl_w(s, r, base, offset);
1739 tcg_out_opc_vldrepl_w(s, r, base, offset);
1744 tcg_out_opc_xvldrepl_d(s, r, base, offset);
1746 tcg_out_opc_vldrepl_d(s, r, base, offset);
1750 g_assert_not_reached();
1755 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1756 TCGReg rd, int64_t v64)
1758 /* Try vldi if imm can fit */
1759 int64_t value = sextract64(v64, 0, 8 << vece);
1760 if (-0x200 <= value && value <= 0x1FF) {
1761 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
1763 if (type == TCG_TYPE_V256) {
1764 tcg_out_opc_xvldi(s, rd, imm);
1766 tcg_out_opc_vldi(s, rd, imm);
1771 /* TODO: vldi patterns when imm 12 is set */
1773 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
1774 tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0);
1777 static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece,
1778 TCGArg a0, TCGArg a1, TCGArg a2,
1779 bool a2_is_const, bool is_add)
1781 static const LoongArchInsn add_vec_insn[2][4] = {
1782 { OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D },
1783 { OPC_XVADD_B, OPC_XVADD_H, OPC_XVADD_W, OPC_XVADD_D },
1785 static const LoongArchInsn add_vec_imm_insn[2][4] = {
1786 { OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU },
1787 { OPC_XVADDI_BU, OPC_XVADDI_HU, OPC_XVADDI_WU, OPC_XVADDI_DU },
1789 static const LoongArchInsn sub_vec_insn[2][4] = {
1790 { OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D },
1791 { OPC_XVSUB_B, OPC_XVSUB_H, OPC_XVSUB_W, OPC_XVSUB_D },
1793 static const LoongArchInsn sub_vec_imm_insn[2][4] = {
1794 { OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU },
1795 { OPC_XVSUBI_BU, OPC_XVSUBI_HU, OPC_XVSUBI_WU, OPC_XVSUBI_DU },
1800 int64_t value = sextract64(a2, 0, 8 << vece);
1806 insn = sub_vec_imm_insn[lasx][vece];
1809 insn = add_vec_imm_insn[lasx][vece];
1812 /* Constraint TCG_CT_CONST_VADD ensures validity. */
1813 tcg_debug_assert(0 <= value && value <= 0x1f);
1815 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
1820 insn = add_vec_insn[lasx][vece];
1822 insn = sub_vec_insn[lasx][vece];
1824 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
1827 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
1828 unsigned vecl, unsigned vece,
1829 const TCGArg args[TCG_MAX_OP_ARGS],
1830 const int const_args[TCG_MAX_OP_ARGS])
1832 TCGType type = vecl + TCG_TYPE_V64;
1833 bool lasx = type == TCG_TYPE_V256;
1834 TCGArg a0, a1, a2, a3;
1837 static const LoongArchInsn cmp_vec_insn[16][2][4] = {
1839 { OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D },
1840 { OPC_XVSEQ_B, OPC_XVSEQ_H, OPC_XVSEQ_W, OPC_XVSEQ_D },
1843 { OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D },
1844 { OPC_XVSLE_B, OPC_XVSLE_H, OPC_XVSLE_W, OPC_XVSLE_D },
1847 { OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU },
1848 { OPC_XVSLE_BU, OPC_XVSLE_HU, OPC_XVSLE_WU, OPC_XVSLE_DU },
1851 { OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D },
1852 { OPC_XVSLT_B, OPC_XVSLT_H, OPC_XVSLT_W, OPC_XVSLT_D },
1855 { OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU },
1856 { OPC_XVSLT_BU, OPC_XVSLT_HU, OPC_XVSLT_WU, OPC_XVSLT_DU },
1859 static const LoongArchInsn cmp_vec_imm_insn[16][2][4] = {
1861 { OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D },
1862 { OPC_XVSEQI_B, OPC_XVSEQI_H, OPC_XVSEQI_W, OPC_XVSEQI_D },
1865 { OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D },
1866 { OPC_XVSLEI_B, OPC_XVSLEI_H, OPC_XVSLEI_W, OPC_XVSLEI_D },
1869 { OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU },
1870 { OPC_XVSLEI_BU, OPC_XVSLEI_HU, OPC_XVSLEI_WU, OPC_XVSLEI_DU },
1873 { OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D },
1874 { OPC_XVSLTI_B, OPC_XVSLTI_H, OPC_XVSLTI_W, OPC_XVSLTI_D },
1877 { OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU },
1878 { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU },
1881 static const LoongArchInsn neg_vec_insn[2][4] = {
1882 { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D },
1883 { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D },
1885 static const LoongArchInsn mul_vec_insn[2][4] = {
1886 { OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D },
1887 { OPC_XVMUL_B, OPC_XVMUL_H, OPC_XVMUL_W, OPC_XVMUL_D },
1889 static const LoongArchInsn smin_vec_insn[2][4] = {
1890 { OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D },
1891 { OPC_XVMIN_B, OPC_XVMIN_H, OPC_XVMIN_W, OPC_XVMIN_D },
1893 static const LoongArchInsn umin_vec_insn[2][4] = {
1894 { OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU },
1895 { OPC_XVMIN_BU, OPC_XVMIN_HU, OPC_XVMIN_WU, OPC_XVMIN_DU },
1897 static const LoongArchInsn smax_vec_insn[2][4] = {
1898 { OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D },
1899 { OPC_XVMAX_B, OPC_XVMAX_H, OPC_XVMAX_W, OPC_XVMAX_D },
1901 static const LoongArchInsn umax_vec_insn[2][4] = {
1902 { OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU },
1903 { OPC_XVMAX_BU, OPC_XVMAX_HU, OPC_XVMAX_WU, OPC_XVMAX_DU },
1905 static const LoongArchInsn ssadd_vec_insn[2][4] = {
1906 { OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D },
1907 { OPC_XVSADD_B, OPC_XVSADD_H, OPC_XVSADD_W, OPC_XVSADD_D },
1909 static const LoongArchInsn usadd_vec_insn[2][4] = {
1910 { OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU },
1911 { OPC_XVSADD_BU, OPC_XVSADD_HU, OPC_XVSADD_WU, OPC_XVSADD_DU },
1913 static const LoongArchInsn sssub_vec_insn[2][4] = {
1914 { OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D },
1915 { OPC_XVSSUB_B, OPC_XVSSUB_H, OPC_XVSSUB_W, OPC_XVSSUB_D },
1917 static const LoongArchInsn ussub_vec_insn[2][4] = {
1918 { OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU },
1919 { OPC_XVSSUB_BU, OPC_XVSSUB_HU, OPC_XVSSUB_WU, OPC_XVSSUB_DU },
1921 static const LoongArchInsn shlv_vec_insn[2][4] = {
1922 { OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D },
1923 { OPC_XVSLL_B, OPC_XVSLL_H, OPC_XVSLL_W, OPC_XVSLL_D },
1925 static const LoongArchInsn shrv_vec_insn[2][4] = {
1926 { OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D },
1927 { OPC_XVSRL_B, OPC_XVSRL_H, OPC_XVSRL_W, OPC_XVSRL_D },
1929 static const LoongArchInsn sarv_vec_insn[2][4] = {
1930 { OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D },
1931 { OPC_XVSRA_B, OPC_XVSRA_H, OPC_XVSRA_W, OPC_XVSRA_D },
1933 static const LoongArchInsn shli_vec_insn[2][4] = {
1934 { OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D },
1935 { OPC_XVSLLI_B, OPC_XVSLLI_H, OPC_XVSLLI_W, OPC_XVSLLI_D },
1937 static const LoongArchInsn shri_vec_insn[2][4] = {
1938 { OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D },
1939 { OPC_XVSRLI_B, OPC_XVSRLI_H, OPC_XVSRLI_W, OPC_XVSRLI_D },
1941 static const LoongArchInsn sari_vec_insn[2][4] = {
1942 { OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D },
1943 { OPC_XVSRAI_B, OPC_XVSRAI_H, OPC_XVSRAI_W, OPC_XVSRAI_D },
1945 static const LoongArchInsn rotrv_vec_insn[2][4] = {
1946 { OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D },
1947 { OPC_XVROTR_B, OPC_XVROTR_H, OPC_XVROTR_W, OPC_XVROTR_D },
1949 static const LoongArchInsn rotri_vec_insn[2][4] = {
1950 { OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D },
1951 { OPC_XVROTRI_B, OPC_XVROTRI_H, OPC_XVROTRI_W, OPC_XVROTRI_D },
1960 case INDEX_op_st_vec:
1961 tcg_out_st(s, type, a0, a1, a2);
1963 case INDEX_op_ld_vec:
1964 tcg_out_ld(s, type, a0, a1, a2);
1966 case INDEX_op_and_vec:
1967 insn = lasx ? OPC_XVAND_V : OPC_VAND_V;
1969 case INDEX_op_andc_vec:
1971 * vandn vd, vj, vk: vd = vk & ~vj
1972 * andc_vec vd, vj, vk: vd = vj & ~vk
1973 * vj and vk are swapped
1977 insn = lasx ? OPC_XVANDN_V : OPC_VANDN_V;
1979 case INDEX_op_or_vec:
1980 insn = lasx ? OPC_XVOR_V : OPC_VOR_V;
1982 case INDEX_op_orc_vec:
1983 insn = lasx ? OPC_XVORN_V : OPC_VORN_V;
1985 case INDEX_op_xor_vec:
1986 insn = lasx ? OPC_XVXOR_V : OPC_VXOR_V;
1988 case INDEX_op_not_vec:
1991 case INDEX_op_nor_vec:
1992 insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
1994 case INDEX_op_cmp_vec:
1996 TCGCond cond = args[3];
1998 if (const_args[2]) {
2000 * cmp_vec dest, src, value
2001 * Try vseqi/vslei/vslti
2003 int64_t value = sextract64(a2, 0, 8 << vece);
2004 if ((cond == TCG_COND_EQ ||
2005 cond == TCG_COND_LE ||
2006 cond == TCG_COND_LT) &&
2007 (-0x10 <= value && value <= 0x0f)) {
2008 insn = cmp_vec_imm_insn[cond][lasx][vece];
2009 tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
2011 } else if ((cond == TCG_COND_LEU ||
2012 cond == TCG_COND_LTU) &&
2013 (0x00 <= value && value <= 0x1f)) {
2014 insn = cmp_vec_imm_insn[cond][lasx][vece];
2015 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
2022 * cmp_vec a0, a1, temp, cond
2024 tcg_out_dupi_vec(s, type, vece, TCG_VEC_TMP0, a2);
2028 insn = cmp_vec_insn[cond][lasx][vece];
2031 t = a1, a1 = a2, a2 = t;
2032 cond = tcg_swap_cond(cond);
2033 insn = cmp_vec_insn[cond][lasx][vece];
2034 tcg_debug_assert(insn != 0);
2038 case INDEX_op_add_vec:
2039 tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true);
2041 case INDEX_op_sub_vec:
2042 tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], false);
2044 case INDEX_op_neg_vec:
2045 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], a0, a1));
2047 case INDEX_op_mul_vec:
2048 insn = mul_vec_insn[lasx][vece];
2050 case INDEX_op_smin_vec:
2051 insn = smin_vec_insn[lasx][vece];
2053 case INDEX_op_smax_vec:
2054 insn = smax_vec_insn[lasx][vece];
2056 case INDEX_op_umin_vec:
2057 insn = umin_vec_insn[lasx][vece];
2059 case INDEX_op_umax_vec:
2060 insn = umax_vec_insn[lasx][vece];
2062 case INDEX_op_ssadd_vec:
2063 insn = ssadd_vec_insn[lasx][vece];
2065 case INDEX_op_usadd_vec:
2066 insn = usadd_vec_insn[lasx][vece];
2068 case INDEX_op_sssub_vec:
2069 insn = sssub_vec_insn[lasx][vece];
2071 case INDEX_op_ussub_vec:
2072 insn = ussub_vec_insn[lasx][vece];
2074 case INDEX_op_shlv_vec:
2075 insn = shlv_vec_insn[lasx][vece];
2077 case INDEX_op_shrv_vec:
2078 insn = shrv_vec_insn[lasx][vece];
2080 case INDEX_op_sarv_vec:
2081 insn = sarv_vec_insn[lasx][vece];
2083 case INDEX_op_rotlv_vec:
2084 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
2085 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece],
2089 case INDEX_op_rotrv_vec:
2090 insn = rotrv_vec_insn[lasx][vece];
2092 case INDEX_op_shli_vec:
2093 insn = shli_vec_insn[lasx][vece];
2095 case INDEX_op_shri_vec:
2096 insn = shri_vec_insn[lasx][vece];
2098 case INDEX_op_sari_vec:
2099 insn = sari_vec_insn[lasx][vece];
2101 case INDEX_op_rotli_vec:
2102 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
2103 a2 = extract32(-a2, 0, 3 + vece);
2104 insn = rotri_vec_insn[lasx][vece];
2106 case INDEX_op_bitsel_vec:
2107 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
2109 tcg_out_opc_xvbitsel_v(s, a0, a3, a2, a1);
2111 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
2114 case INDEX_op_dupm_vec:
2115 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2118 g_assert_not_reached();
2120 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
2125 tcg_out32(s, encode_vdvjuk3_insn(insn, a0, a1, a2));
2128 tcg_out32(s, encode_vdvjuk4_insn(insn, a0, a1, a2));
2131 tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, a2));
2134 tcg_out32(s, encode_vdvjuk6_insn(insn, a0, a1, a2));
2137 g_assert_not_reached();
2143 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2146 case INDEX_op_ld_vec:
2147 case INDEX_op_st_vec:
2148 case INDEX_op_dup_vec:
2149 case INDEX_op_dupm_vec:
2150 case INDEX_op_cmp_vec:
2151 case INDEX_op_add_vec:
2152 case INDEX_op_sub_vec:
2153 case INDEX_op_and_vec:
2154 case INDEX_op_andc_vec:
2155 case INDEX_op_or_vec:
2156 case INDEX_op_orc_vec:
2157 case INDEX_op_xor_vec:
2158 case INDEX_op_nor_vec:
2159 case INDEX_op_not_vec:
2160 case INDEX_op_neg_vec:
2161 case INDEX_op_mul_vec:
2162 case INDEX_op_smin_vec:
2163 case INDEX_op_smax_vec:
2164 case INDEX_op_umin_vec:
2165 case INDEX_op_umax_vec:
2166 case INDEX_op_ssadd_vec:
2167 case INDEX_op_usadd_vec:
2168 case INDEX_op_sssub_vec:
2169 case INDEX_op_ussub_vec:
2170 case INDEX_op_shlv_vec:
2171 case INDEX_op_shrv_vec:
2172 case INDEX_op_sarv_vec:
2173 case INDEX_op_bitsel_vec:
2180 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2183 g_assert_not_reached();
2186 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2189 case INDEX_op_goto_ptr:
2192 case INDEX_op_st8_i32:
2193 case INDEX_op_st8_i64:
2194 case INDEX_op_st16_i32:
2195 case INDEX_op_st16_i64:
2196 case INDEX_op_st32_i64:
2197 case INDEX_op_st_i32:
2198 case INDEX_op_st_i64:
2199 case INDEX_op_qemu_st_a32_i32:
2200 case INDEX_op_qemu_st_a64_i32:
2201 case INDEX_op_qemu_st_a32_i64:
2202 case INDEX_op_qemu_st_a64_i64:
2203 return C_O0_I2(rZ, r);
2205 case INDEX_op_qemu_ld_a32_i128:
2206 case INDEX_op_qemu_ld_a64_i128:
2207 return C_N2_I1(r, r, r);
2209 case INDEX_op_qemu_st_a32_i128:
2210 case INDEX_op_qemu_st_a64_i128:
2211 return C_O0_I3(r, r, r);
2213 case INDEX_op_brcond_i32:
2214 case INDEX_op_brcond_i64:
2215 return C_O0_I2(rZ, rZ);
2217 case INDEX_op_ext8s_i32:
2218 case INDEX_op_ext8s_i64:
2219 case INDEX_op_ext8u_i32:
2220 case INDEX_op_ext8u_i64:
2221 case INDEX_op_ext16s_i32:
2222 case INDEX_op_ext16s_i64:
2223 case INDEX_op_ext16u_i32:
2224 case INDEX_op_ext16u_i64:
2225 case INDEX_op_ext32s_i64:
2226 case INDEX_op_ext32u_i64:
2227 case INDEX_op_extu_i32_i64:
2228 case INDEX_op_extrl_i64_i32:
2229 case INDEX_op_extrh_i64_i32:
2230 case INDEX_op_ext_i32_i64:
2231 case INDEX_op_neg_i32:
2232 case INDEX_op_neg_i64:
2233 case INDEX_op_not_i32:
2234 case INDEX_op_not_i64:
2235 case INDEX_op_extract_i32:
2236 case INDEX_op_extract_i64:
2237 case INDEX_op_bswap16_i32:
2238 case INDEX_op_bswap16_i64:
2239 case INDEX_op_bswap32_i32:
2240 case INDEX_op_bswap32_i64:
2241 case INDEX_op_bswap64_i64:
2242 case INDEX_op_ld8s_i32:
2243 case INDEX_op_ld8s_i64:
2244 case INDEX_op_ld8u_i32:
2245 case INDEX_op_ld8u_i64:
2246 case INDEX_op_ld16s_i32:
2247 case INDEX_op_ld16s_i64:
2248 case INDEX_op_ld16u_i32:
2249 case INDEX_op_ld16u_i64:
2250 case INDEX_op_ld32s_i64:
2251 case INDEX_op_ld32u_i64:
2252 case INDEX_op_ld_i32:
2253 case INDEX_op_ld_i64:
2254 case INDEX_op_qemu_ld_a32_i32:
2255 case INDEX_op_qemu_ld_a64_i32:
2256 case INDEX_op_qemu_ld_a32_i64:
2257 case INDEX_op_qemu_ld_a64_i64:
2258 return C_O1_I1(r, r);
2260 case INDEX_op_andc_i32:
2261 case INDEX_op_andc_i64:
2262 case INDEX_op_orc_i32:
2263 case INDEX_op_orc_i64:
2265 * LoongArch insns for these ops don't have reg-imm forms, but we
2266 * can express using andi/ori if ~constant satisfies
2269 return C_O1_I2(r, r, rC);
2271 case INDEX_op_shl_i32:
2272 case INDEX_op_shl_i64:
2273 case INDEX_op_shr_i32:
2274 case INDEX_op_shr_i64:
2275 case INDEX_op_sar_i32:
2276 case INDEX_op_sar_i64:
2277 case INDEX_op_rotl_i32:
2278 case INDEX_op_rotl_i64:
2279 case INDEX_op_rotr_i32:
2280 case INDEX_op_rotr_i64:
2281 return C_O1_I2(r, r, ri);
2283 case INDEX_op_add_i32:
2284 return C_O1_I2(r, r, ri);
2285 case INDEX_op_add_i64:
2286 return C_O1_I2(r, r, rJ);
2288 case INDEX_op_and_i32:
2289 case INDEX_op_and_i64:
2290 case INDEX_op_nor_i32:
2291 case INDEX_op_nor_i64:
2292 case INDEX_op_or_i32:
2293 case INDEX_op_or_i64:
2294 case INDEX_op_xor_i32:
2295 case INDEX_op_xor_i64:
2296 /* LoongArch reg-imm bitops have their imms ZERO-extended */
2297 return C_O1_I2(r, r, rU);
2299 case INDEX_op_clz_i32:
2300 case INDEX_op_clz_i64:
2301 case INDEX_op_ctz_i32:
2302 case INDEX_op_ctz_i64:
2303 return C_O1_I2(r, r, rW);
2305 case INDEX_op_deposit_i32:
2306 case INDEX_op_deposit_i64:
2307 /* Must deposit into the same register as input */
2308 return C_O1_I2(r, 0, rZ);
2310 case INDEX_op_sub_i32:
2311 case INDEX_op_setcond_i32:
2312 return C_O1_I2(r, rZ, ri);
2313 case INDEX_op_sub_i64:
2314 case INDEX_op_setcond_i64:
2315 return C_O1_I2(r, rZ, rJ);
2317 case INDEX_op_mul_i32:
2318 case INDEX_op_mul_i64:
2319 case INDEX_op_mulsh_i32:
2320 case INDEX_op_mulsh_i64:
2321 case INDEX_op_muluh_i32:
2322 case INDEX_op_muluh_i64:
2323 case INDEX_op_div_i32:
2324 case INDEX_op_div_i64:
2325 case INDEX_op_divu_i32:
2326 case INDEX_op_divu_i64:
2327 case INDEX_op_rem_i32:
2328 case INDEX_op_rem_i64:
2329 case INDEX_op_remu_i32:
2330 case INDEX_op_remu_i64:
2331 return C_O1_I2(r, rZ, rZ);
2333 case INDEX_op_movcond_i32:
2334 case INDEX_op_movcond_i64:
2335 return C_O1_I4(r, rZ, rJ, rZ, rZ);
2337 case INDEX_op_ld_vec:
2338 case INDEX_op_dupm_vec:
2339 case INDEX_op_dup_vec:
2340 return C_O1_I1(w, r);
2342 case INDEX_op_st_vec:
2343 return C_O0_I2(w, r);
2345 case INDEX_op_cmp_vec:
2346 return C_O1_I2(w, w, wM);
2348 case INDEX_op_add_vec:
2349 case INDEX_op_sub_vec:
2350 return C_O1_I2(w, w, wA);
2352 case INDEX_op_and_vec:
2353 case INDEX_op_andc_vec:
2354 case INDEX_op_or_vec:
2355 case INDEX_op_orc_vec:
2356 case INDEX_op_xor_vec:
2357 case INDEX_op_nor_vec:
2358 case INDEX_op_mul_vec:
2359 case INDEX_op_smin_vec:
2360 case INDEX_op_smax_vec:
2361 case INDEX_op_umin_vec:
2362 case INDEX_op_umax_vec:
2363 case INDEX_op_ssadd_vec:
2364 case INDEX_op_usadd_vec:
2365 case INDEX_op_sssub_vec:
2366 case INDEX_op_ussub_vec:
2367 case INDEX_op_shlv_vec:
2368 case INDEX_op_shrv_vec:
2369 case INDEX_op_sarv_vec:
2370 case INDEX_op_rotrv_vec:
2371 case INDEX_op_rotlv_vec:
2372 return C_O1_I2(w, w, w);
2374 case INDEX_op_not_vec:
2375 case INDEX_op_neg_vec:
2376 case INDEX_op_shli_vec:
2377 case INDEX_op_shri_vec:
2378 case INDEX_op_sari_vec:
2379 case INDEX_op_rotli_vec:
2380 return C_O1_I1(w, w);
2382 case INDEX_op_bitsel_vec:
2383 return C_O1_I3(w, w, w, w);
2386 g_assert_not_reached();
2390 static const int tcg_target_callee_save_regs[] = {
2391 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2401 TCG_REG_RA, /* should be last for ABI compliance */
2404 /* Stack frame parameters. */
2405 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2406 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2407 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2408 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2409 + TCG_TARGET_STACK_ALIGN - 1) \
2410 & -TCG_TARGET_STACK_ALIGN)
2411 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2413 /* We're expecting to be able to use an immediate for frame allocation. */
2414 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2416 /* Generate global QEMU prologue and epilogue code */
2417 static void tcg_target_qemu_prologue(TCGContext *s)
2421 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2424 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2425 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2426 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2427 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2430 if (!tcg_use_softmmu && guest_base) {
2431 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2432 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2435 /* Call generated code */
2436 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2437 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2439 /* Return path for goto_ptr. Set return value to 0 */
2440 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2441 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2444 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2445 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2446 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2447 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2450 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2451 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
2454 static void tcg_out_tb_start(TCGContext *s)
2459 static void tcg_target_init(TCGContext *s)
2461 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2463 /* Server and desktop class cpus have UAL; embedded cpus do not. */
2464 if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
2465 error_report("TCG: unaligned access support required; exiting");
2469 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2470 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2472 tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS;
2473 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2474 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2475 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2476 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2477 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2478 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2479 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2480 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2481 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2482 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2484 if (cpuinfo & CPUINFO_LSX) {
2485 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2486 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2487 if (cpuinfo & CPUINFO_LASX) {
2488 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
2490 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
2491 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
2492 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
2493 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
2494 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
2495 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
2496 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
2497 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
2500 s->reserved_regs = 0;
2501 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2502 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2503 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2504 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2505 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2506 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2507 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
2508 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
2513 uint8_t fde_def_cfa[4];
2514 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2517 #define ELF_HOST_MACHINE EM_LOONGARCH
2519 static const DebugFrame debug_frame = {
2520 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2523 .h.cie.code_align = 1,
2524 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2525 .h.cie.return_column = TCG_REG_RA,
2527 /* Total FDE size does not include the "len" member. */
2528 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2531 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2532 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2536 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
2537 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
2538 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
2539 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
2540 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
2541 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
2542 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
2543 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
2544 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
2545 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
2546 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
2550 void tcg_register_jit(const void *buf, size_t buf_size)
2552 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));