2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-ldst.c.inc"
27 #include "../tcg-pool.c.inc"
29 int arm_arch = __ARM_ARCH;
31 #ifndef use_idiv_instructions
32 bool use_idiv_instructions;
34 #ifndef use_neon_instructions
35 bool use_neon_instructions;
38 #ifdef CONFIG_DEBUG_TCG
39 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
41 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc",
42 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7",
43 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
47 static const int tcg_target_reg_alloc_order[] = {
68 /* Q4 - Q7 are call-saved, and skipped. */
79 static const int tcg_target_call_iarg_regs[4] = {
80 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
83 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
85 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
86 tcg_debug_assert(slot >= 0 && slot <= 3);
87 return TCG_REG_R0 + slot;
90 #define TCG_REG_TMP TCG_REG_R12
91 #define TCG_VEC_TMP TCG_REG_Q15
92 #define TCG_REG_GUEST_BASE TCG_REG_R11
97 COND_CS = 0x2, /* Unsigned greater or equal */
98 COND_CC = 0x3, /* Unsigned less than */
99 COND_MI = 0x4, /* Negative */
100 COND_PL = 0x5, /* Zero or greater */
101 COND_VS = 0x6, /* Overflow */
102 COND_VC = 0x7, /* No overflow */
103 COND_HI = 0x8, /* Unsigned greater than */
104 COND_LS = 0x9, /* Unsigned less or equal */
112 #define TO_CPSR (1 << 20)
114 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
115 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
116 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
117 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
118 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
119 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
120 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
121 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
124 ARITH_AND = 0x0 << 21,
125 ARITH_EOR = 0x1 << 21,
126 ARITH_SUB = 0x2 << 21,
127 ARITH_RSB = 0x3 << 21,
128 ARITH_ADD = 0x4 << 21,
129 ARITH_ADC = 0x5 << 21,
130 ARITH_SBC = 0x6 << 21,
131 ARITH_RSC = 0x7 << 21,
132 ARITH_TST = 0x8 << 21 | TO_CPSR,
133 ARITH_CMP = 0xa << 21 | TO_CPSR,
134 ARITH_CMN = 0xb << 21 | TO_CPSR,
135 ARITH_ORR = 0xc << 21,
136 ARITH_MOV = 0xd << 21,
137 ARITH_BIC = 0xe << 21,
138 ARITH_MVN = 0xf << 21,
142 INSN_CLZ = 0x016f0f10,
143 INSN_RBIT = 0x06ff0f30,
145 INSN_LDMIA = 0x08b00000,
146 INSN_STMDB = 0x09200000,
148 INSN_LDR_IMM = 0x04100000,
149 INSN_LDR_REG = 0x06100000,
150 INSN_STR_IMM = 0x04000000,
151 INSN_STR_REG = 0x06000000,
153 INSN_LDRH_IMM = 0x005000b0,
154 INSN_LDRH_REG = 0x001000b0,
155 INSN_LDRSH_IMM = 0x005000f0,
156 INSN_LDRSH_REG = 0x001000f0,
157 INSN_STRH_IMM = 0x004000b0,
158 INSN_STRH_REG = 0x000000b0,
160 INSN_LDRB_IMM = 0x04500000,
161 INSN_LDRB_REG = 0x06500000,
162 INSN_LDRSB_IMM = 0x005000d0,
163 INSN_LDRSB_REG = 0x001000d0,
164 INSN_STRB_IMM = 0x04400000,
165 INSN_STRB_REG = 0x06400000,
167 INSN_LDRD_IMM = 0x004000d0,
168 INSN_LDRD_REG = 0x000000d0,
169 INSN_STRD_IMM = 0x004000f0,
170 INSN_STRD_REG = 0x000000f0,
172 INSN_DMB_ISH = 0xf57ff05b,
173 INSN_DMB_MCR = 0xee070fba,
175 /* Architected nop introduced in v6k. */
176 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
177 also Just So Happened to do nothing on pre-v6k so that we
178 don't need to conditionalize it? */
179 INSN_NOP_v6k = 0xe320f000,
180 /* Otherwise the assembler uses mov r0,r0 */
181 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
183 INSN_VADD = 0xf2000800,
184 INSN_VAND = 0xf2000110,
185 INSN_VBIC = 0xf2100110,
186 INSN_VEOR = 0xf3000110,
187 INSN_VORN = 0xf2300110,
188 INSN_VORR = 0xf2200110,
189 INSN_VSUB = 0xf3000800,
190 INSN_VMUL = 0xf2000910,
191 INSN_VQADD = 0xf2000010,
192 INSN_VQADD_U = 0xf3000010,
193 INSN_VQSUB = 0xf2000210,
194 INSN_VQSUB_U = 0xf3000210,
195 INSN_VMAX = 0xf2000600,
196 INSN_VMAX_U = 0xf3000600,
197 INSN_VMIN = 0xf2000610,
198 INSN_VMIN_U = 0xf3000610,
200 INSN_VABS = 0xf3b10300,
201 INSN_VMVN = 0xf3b00580,
202 INSN_VNEG = 0xf3b10380,
204 INSN_VCEQ0 = 0xf3b10100,
205 INSN_VCGT0 = 0xf3b10000,
206 INSN_VCGE0 = 0xf3b10080,
207 INSN_VCLE0 = 0xf3b10180,
208 INSN_VCLT0 = 0xf3b10200,
210 INSN_VCEQ = 0xf3000810,
211 INSN_VCGE = 0xf2000310,
212 INSN_VCGT = 0xf2000300,
213 INSN_VCGE_U = 0xf3000310,
214 INSN_VCGT_U = 0xf3000300,
216 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
217 INSN_VSARI = 0xf2800010, /* VSHR.S */
218 INSN_VSHRI = 0xf3800010, /* VSHR.U */
219 INSN_VSLI = 0xf3800510,
220 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */
221 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */
223 INSN_VBSL = 0xf3100110,
224 INSN_VBIT = 0xf3200110,
225 INSN_VBIF = 0xf3300110,
227 INSN_VTST = 0xf2000810,
229 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
230 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */
231 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */
232 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
233 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */
234 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
235 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */
238 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
240 static const uint8_t tcg_cond_to_arm_cond[] = {
241 [TCG_COND_EQ] = COND_EQ,
242 [TCG_COND_NE] = COND_NE,
243 [TCG_COND_LT] = COND_LT,
244 [TCG_COND_GE] = COND_GE,
245 [TCG_COND_LE] = COND_LE,
246 [TCG_COND_GT] = COND_GT,
248 [TCG_COND_LTU] = COND_CC,
249 [TCG_COND_GEU] = COND_CS,
250 [TCG_COND_LEU] = COND_LS,
251 [TCG_COND_GTU] = COND_HI,
254 static int encode_imm(uint32_t imm);
256 /* TCG private relocation type: add with pc+imm8 */
259 /* TCG private relocation type: vldr with imm8 << 2 */
260 #define R_ARM_PC11 12
262 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
264 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
265 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
267 if (offset == sextract32(offset, 0, 24)) {
268 *src_rw = deposit32(*src_rw, 0, 24, offset);
274 static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
276 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
277 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
279 if (offset >= -0xfff && offset <= 0xfff) {
280 tcg_insn_unit insn = *src_rw;
281 bool u = (offset >= 0);
285 insn = deposit32(insn, 23, 1, u);
286 insn = deposit32(insn, 0, 12, offset);
293 static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
295 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
296 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
298 if (offset >= -0xff && offset <= 0xff) {
299 tcg_insn_unit insn = *src_rw;
300 bool u = (offset >= 0);
304 insn = deposit32(insn, 23, 1, u);
305 insn = deposit32(insn, 0, 8, offset);
312 static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
314 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
315 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
316 int imm12 = encode_imm(offset);
319 *src_rw = deposit32(*src_rw, 0, 12, imm12);
325 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
326 intptr_t value, intptr_t addend)
328 tcg_debug_assert(addend == 0);
331 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
333 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
335 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
337 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
339 g_assert_not_reached();
343 #define TCG_CT_CONST_ARM 0x100
344 #define TCG_CT_CONST_INV 0x200
345 #define TCG_CT_CONST_NEG 0x400
346 #define TCG_CT_CONST_ZERO 0x800
347 #define TCG_CT_CONST_ORRI 0x1000
348 #define TCG_CT_CONST_ANDI 0x2000
350 #define ALL_GENERAL_REGS 0xffffu
351 #define ALL_VECTOR_REGS 0xffff0000u
354 * r0-r3 will be overwritten when reading the tlb entry (system-mode only);
355 * r14 will be overwritten by the BLNE branching to the slow path.
357 #define ALL_QLDST_REGS \
358 (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14)))
361 * ARM immediates for ALU instructions are made of an unsigned 8-bit
362 * right-rotated by an even amount between 0 and 30.
364 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
366 static int encode_imm(uint32_t imm)
370 /* Simple case, no rotation required. */
371 if ((imm & ~0xff) == 0) {
375 /* Next, try a simple even shift. */
376 rot = ctz32(imm) & ~1;
379 if ((imm8 & ~0xff) == 0) {
384 * Finally, try harder with rotations.
385 * The ctz test above will have taken care of rotates >= 8.
387 for (rot = 2; rot < 8; rot += 2) {
388 imm8 = rol32(imm, rot);
389 if ((imm8 & ~0xff) == 0) {
393 /* Fail: imm cannot be encoded. */
397 /* Note that rot is even, and we discard bit 0 by shifting by 7. */
398 return rot << 7 | imm8;
401 static int encode_imm_nofail(uint32_t imm)
403 int ret = encode_imm(imm);
404 tcg_debug_assert(ret >= 0);
408 static bool check_fit_imm(uint32_t imm)
410 return encode_imm(imm) >= 0;
413 /* Return true if v16 is a valid 16-bit shifted immediate. */
414 static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
416 if (v16 == (v16 & 0xff)) {
420 } else if (v16 == (v16 & 0xff00)) {
428 /* Return true if v32 is a valid 32-bit shifted immediate. */
429 static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
431 if (v32 == (v32 & 0xff)) {
435 } else if (v32 == (v32 & 0xff00)) {
437 *imm8 = (v32 >> 8) & 0xff;
439 } else if (v32 == (v32 & 0xff0000)) {
441 *imm8 = (v32 >> 16) & 0xff;
443 } else if (v32 == (v32 & 0xff000000)) {
451 /* Return true if v32 is a valid 32-bit shifting ones immediate. */
452 static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
454 if ((v32 & 0xffff00ff) == 0xff) {
456 *imm8 = (v32 >> 8) & 0xff;
458 } else if ((v32 & 0xff00ffff) == 0xffff) {
460 *imm8 = (v32 >> 16) & 0xff;
467 * Return non-zero if v32 can be formed by MOVI+ORR.
468 * Place the parameters for MOVI in (cmode, imm8).
469 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
471 static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
475 for (i = 6; i > 0; i -= 2) {
476 /* Mask out one byte we can add with ORR. */
477 uint32_t tmp = v32 & ~(0xffu << (i * 4));
478 if (is_shimm32(tmp, cmode, imm8) ||
479 is_soimm32(tmp, cmode, imm8)) {
486 /* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
487 static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
489 if (v32 == deposit32(v32, 16, 16, v32)) {
490 return is_shimm16(v32, cmode, imm8);
492 return is_shimm32(v32, cmode, imm8);
496 /* Test if a constant matches the constraint.
497 * TODO: define constraints for:
499 * ldr/str offset: between -0xfff and 0xfff
500 * ldrh/strh offset: between -0xff and 0xff
501 * mov operand2: values represented with x << (2 * y), x < 0x100
502 * add, sub, eor...: ditto
504 static bool tcg_target_const_match(int64_t val, int ct,
505 TCGType type, TCGCond cond, int vece)
507 if (ct & TCG_CT_CONST) {
509 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
511 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
513 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
515 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
519 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
522 case TCG_CT_CONST_ANDI:
525 case TCG_CT_CONST_ORRI:
526 if (val == deposit64(val, 32, 32, val)) {
528 return is_shimm1632(val, &cmode, &imm8);
532 /* Both bits should not be set for the same insn. */
533 g_assert_not_reached();
539 static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
541 tcg_out32(s, (cond << 28) | INSN_B |
542 (((offset - 8) >> 2) & 0x00ffffff));
545 static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
547 tcg_out32(s, (cond << 28) | 0x0b000000 |
548 (((offset - 8) >> 2) & 0x00ffffff));
551 static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
553 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
556 static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
558 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
559 (((offset - 8) >> 2) & 0x00ffffff));
562 static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
563 TCGReg rd, TCGReg rn, TCGReg rm, int shift)
565 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
566 (rn << 16) | (rd << 12) | shift | rm);
569 static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
571 /* Simple reg-reg move, optimising out the 'do nothing' case */
573 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
577 static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
579 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
582 static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
585 * Unless the C portion of QEMU is compiled as thumb, we don't need
586 * true BX semantics; merely a branch to an address held in a register.
588 tcg_out_bx_reg(s, cond, rn);
591 static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
592 TCGReg rd, TCGReg rn, int im)
594 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
595 (rn << 16) | (rd << 12) | im);
598 static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
599 TCGReg rn, uint16_t mask)
601 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
604 /* Note that this routine is used for both LDR and LDRH formats, so we do
605 not wish to include an immediate shift at this point. */
606 static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
607 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
609 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
610 | (w << 21) | (rn << 16) | (rt << 12) | rm);
613 static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
614 TCGReg rn, int imm8, bool p, bool w)
621 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
622 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
625 static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
626 TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
633 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
634 (rn << 16) | (rt << 12) | imm12);
637 static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
638 TCGReg rn, int imm12)
640 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
643 static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
644 TCGReg rn, int imm12)
646 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
649 static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
650 TCGReg rn, TCGReg rm)
652 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
655 static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
656 TCGReg rn, TCGReg rm)
658 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
661 static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
664 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
667 static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
668 TCGReg rn, TCGReg rm)
670 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
673 static void __attribute__((unused))
674 tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
676 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
679 static void __attribute__((unused))
680 tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
682 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
685 static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
686 TCGReg rn, TCGReg rm)
688 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
691 /* Register pre-increment with base writeback. */
692 static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
693 TCGReg rn, TCGReg rm)
695 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
698 static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
699 TCGReg rn, TCGReg rm)
701 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
704 static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
707 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
710 static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
713 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
716 static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
717 TCGReg rn, TCGReg rm)
719 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
722 static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
723 TCGReg rn, TCGReg rm)
725 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
728 static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
731 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
734 static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
735 TCGReg rn, TCGReg rm)
737 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
740 static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
741 TCGReg rn, int imm12)
743 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
746 static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
747 TCGReg rn, int imm12)
749 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
752 static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
753 TCGReg rn, TCGReg rm)
755 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
758 static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
759 TCGReg rn, TCGReg rm)
761 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
764 static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
767 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
770 static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
771 TCGReg rn, TCGReg rm)
773 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
776 static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
777 TCGReg rd, uint32_t arg)
779 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
780 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
783 static void tcg_out_movi32(TCGContext *s, ARMCond cond,
784 TCGReg rd, uint32_t arg)
786 int imm12, diff, opc, sh1, sh2;
787 uint32_t tt0, tt1, tt2;
789 /* Check a single MOV/MVN before anything else. */
790 imm12 = encode_imm(arg);
792 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
795 imm12 = encode_imm(~arg);
797 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
801 /* Check for a pc-relative address. This will usually be the TB,
802 or within the TB, which is immediately before the code block. */
803 diff = tcg_pcrel_diff(s, (void *)arg) - 8;
805 imm12 = encode_imm(diff);
807 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
811 imm12 = encode_imm(-diff);
813 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
818 /* Use movw + movt. */
819 if (use_armv7_instructions) {
821 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
822 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
823 if (arg & 0xffff0000) {
825 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
826 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
831 /* Look for sequences of two insns. If we have lots of 1's, we can
832 shorten the sequence by beginning with mvn and then clearing
833 higher bits with eor. */
836 if (ctpop32(arg) > 16) {
840 sh1 = ctz32(tt0) & ~1;
841 tt1 = tt0 & ~(0xff << sh1);
842 sh2 = ctz32(tt1) & ~1;
843 tt2 = tt1 & ~(0xff << sh2);
847 rot = ((32 - sh1) << 7) & 0xf00;
848 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
849 rot = ((32 - sh2) << 7) & 0xf00;
850 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
851 ((tt0 >> sh2) & 0xff) | rot);
855 /* Otherwise, drop it into the constant pool. */
856 tcg_out_movi_pool(s, cond, rd, arg);
860 * Emit either the reg,imm or reg,reg form of a data-processing insn.
861 * rhs must satisfy the "rI" constraint.
863 static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
864 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
867 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
869 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
874 * Emit either the reg,imm or reg,reg form of a data-processing insn.
875 * rhs must satisfy the "rIK" constraint.
877 static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
878 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
882 int imm12 = encode_imm(rhs);
884 imm12 = encode_imm_nofail(~rhs);
887 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
889 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
893 static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
894 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
897 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
898 * rhs must satisfy the "rIN" constraint.
901 int imm12 = encode_imm(rhs);
903 imm12 = encode_imm_nofail(-rhs);
906 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
908 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
912 static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
913 TCGReg rn, TCGReg rm)
916 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
919 static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
920 TCGReg rd1, TCGReg rn, TCGReg rm)
923 tcg_out32(s, (cond << 28) | 0x00800090 |
924 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
927 static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
928 TCGReg rd1, TCGReg rn, TCGReg rm)
931 tcg_out32(s, (cond << 28) | 0x00c00090 |
932 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
935 static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
936 TCGReg rd, TCGReg rn, TCGReg rm)
938 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
941 static void tcg_out_udiv(TCGContext *s, ARMCond cond,
942 TCGReg rd, TCGReg rn, TCGReg rm)
944 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
947 static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
950 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
953 static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
955 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
958 static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
961 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
964 static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
967 tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
970 static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
972 g_assert_not_reached();
975 static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
977 g_assert_not_reached();
980 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
982 g_assert_not_reached();
985 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
987 g_assert_not_reached();
990 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
992 g_assert_not_reached();
995 static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
996 TCGReg rd, TCGReg rn, int flags)
998 if (flags & TCG_BSWAP_OS) {
1000 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1005 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1006 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1008 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1012 static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1015 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1018 static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1019 TCGArg a1, int ofs, int len, bool const_a1)
1022 /* bfi becomes bfc with rn == 15. */
1026 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1027 | (ofs << 7) | ((ofs + len - 1) << 16));
1030 static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1031 TCGReg rn, int ofs, int len)
1034 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1035 | (ofs << 7) | ((len - 1) << 16));
1038 static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1039 TCGReg rn, int ofs, int len)
1042 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1043 | (ofs << 7) | ((len - 1) << 16));
1046 static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1047 TCGReg rd, TCGReg rn, int32_t offset)
1049 if (offset > 0xfff || offset < -0xfff) {
1050 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1051 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1053 tcg_out_ld32_12(s, cond, rd, rn, offset);
1056 static void tcg_out_st32(TCGContext *s, ARMCond cond,
1057 TCGReg rd, TCGReg rn, int32_t offset)
1059 if (offset > 0xfff || offset < -0xfff) {
1060 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1061 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1063 tcg_out_st32_12(s, cond, rd, rn, offset);
1066 static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1067 TCGReg rd, TCGReg rn, int32_t offset)
1069 if (offset > 0xff || offset < -0xff) {
1070 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1071 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1073 tcg_out_ld16u_8(s, cond, rd, rn, offset);
1076 static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1077 TCGReg rd, TCGReg rn, int32_t offset)
1079 if (offset > 0xff || offset < -0xff) {
1080 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1081 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1083 tcg_out_ld16s_8(s, cond, rd, rn, offset);
1086 static void tcg_out_st16(TCGContext *s, ARMCond cond,
1087 TCGReg rd, TCGReg rn, int32_t offset)
1089 if (offset > 0xff || offset < -0xff) {
1090 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1091 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1093 tcg_out_st16_8(s, cond, rd, rn, offset);
1096 static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1097 TCGReg rd, TCGReg rn, int32_t offset)
1099 if (offset > 0xfff || offset < -0xfff) {
1100 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1101 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1103 tcg_out_ld8_12(s, cond, rd, rn, offset);
1106 static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1107 TCGReg rd, TCGReg rn, int32_t offset)
1109 if (offset > 0xff || offset < -0xff) {
1110 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1111 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1113 tcg_out_ld8s_8(s, cond, rd, rn, offset);
1116 static void tcg_out_st8(TCGContext *s, ARMCond cond,
1117 TCGReg rd, TCGReg rn, int32_t offset)
1119 if (offset > 0xfff || offset < -0xfff) {
1120 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1121 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1123 tcg_out_st8_12(s, cond, rd, rn, offset);
1127 * The _goto case is normally between TBs within the same code buffer, and
1128 * with the code buffer limited to 16MB we wouldn't need the long case.
1129 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1131 static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1133 intptr_t addri = (intptr_t)addr;
1134 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1135 bool arm_mode = !(addri & 1);
1137 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1138 tcg_out_b_imm(s, cond, disp);
1142 /* LDR is interworking from v5t. */
1143 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1147 * The call case is mostly used for helpers - so it's not unreasonable
1148 * for them to be beyond branch range.
1150 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1152 intptr_t addri = (intptr_t)addr;
1153 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1154 bool arm_mode = !(addri & 1);
1156 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1158 tcg_out_bl_imm(s, COND_AL, disp);
1160 tcg_out_blx_imm(s, disp);
1165 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1166 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1169 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1170 const TCGHelperInfo *info)
1172 tcg_out_call_int(s, addr);
1175 static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1178 tcg_out_goto(s, cond, l->u.value_ptr);
1180 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1181 tcg_out_b_imm(s, cond, 0);
1185 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1187 if (use_armv7_instructions) {
1188 tcg_out32(s, INSN_DMB_ISH);
1190 tcg_out32(s, INSN_DMB_MCR);
1194 static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
1195 TCGArg b, int b_const)
1197 if (!is_tst_cond(cond)) {
1198 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
1202 cond = tcg_tst_eqne_cond(cond);
1204 int imm12 = encode_imm(b);
1207 * The compare constraints allow rIN, but TST does not support N.
1208 * Be prepared to load the constant into a scratch register.
1211 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
1214 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
1217 tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
1221 static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1222 const int *const_args)
1224 TCGReg al = args[0];
1225 TCGReg ah = args[1];
1226 TCGArg bl = args[2];
1227 TCGArg bh = args[3];
1228 TCGCond cond = args[4];
1229 int const_bl = const_args[2];
1230 int const_bh = const_args[3];
1240 * We perform a conditional comparison. If the high half is
1241 * equal, then overwrite the flags with the comparison of the
1242 * low half. The resulting flags cover the whole.
1244 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1245 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1248 case TCG_COND_TSTEQ:
1249 case TCG_COND_TSTNE:
1250 /* Similar, but with TST instead of CMP. */
1251 tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh);
1252 tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl);
1253 return tcg_tst_eqne_cond(cond);
1257 /* We perform a double-word subtraction and examine the result.
1258 We do not actually need the result of the subtract, so the
1259 low part "subtract" is a compare. For the high half we have
1260 no choice but to compute into a temporary. */
1261 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1262 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1263 TCG_REG_TMP, ah, bh, const_bh);
1268 /* Similar, but with swapped arguments, via reversed subtract. */
1269 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1270 TCG_REG_TMP, al, bl, const_bl);
1271 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1272 TCG_REG_TMP, ah, bh, const_bh);
1273 return tcg_swap_cond(cond);
1276 g_assert_not_reached();
1281 * Note that TCGReg references Q-registers.
1282 * Q-regno = 2 * D-regno, so shift left by 1 while inserting.
1284 static uint32_t encode_vd(TCGReg rd)
1286 tcg_debug_assert(rd >= TCG_REG_Q0);
1287 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1290 static uint32_t encode_vn(TCGReg rn)
1292 tcg_debug_assert(rn >= TCG_REG_Q0);
1293 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1296 static uint32_t encode_vm(TCGReg rm)
1298 tcg_debug_assert(rm >= TCG_REG_Q0);
1299 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1302 static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1305 tcg_out32(s, insn | (vece << 18) | (q << 6) |
1306 encode_vd(d) | encode_vm(m));
1309 static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1310 TCGReg d, TCGReg n, TCGReg m)
1312 tcg_out32(s, insn | (vece << 20) | (q << 6) |
1313 encode_vd(d) | encode_vn(n) | encode_vm(m));
1316 static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1317 int q, int op, int cmode, uint8_t imm8)
1319 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1320 | (cmode << 8) | extract32(imm8, 0, 4)
1321 | (extract32(imm8, 4, 3) << 16)
1322 | (extract32(imm8, 7, 1) << 24));
1325 static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1326 TCGReg rd, TCGReg rm, int l_imm6)
1328 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1329 (extract32(l_imm6, 6, 1) << 7) |
1330 (extract32(l_imm6, 0, 6) << 16));
1333 static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1334 TCGReg rd, TCGReg rn, int offset)
1337 if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1338 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1339 TCG_REG_TMP, rn, offset, true);
1341 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1342 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1343 TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1347 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1358 bool tcg_target_has_memory_bswap(MemOp memop)
1363 static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1365 /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
1369 static const TCGLdstHelperParam ldst_helper_param = {
1370 .ra_gen = ldst_ra_gen,
1372 .tmp = { TCG_REG_TMP },
1375 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1377 MemOp opc = get_memop(lb->oi);
1379 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1383 tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1384 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1385 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1387 tcg_out_goto(s, COND_AL, lb->raddr);
1391 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1393 MemOp opc = get_memop(lb->oi);
1395 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1399 tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1401 /* Tail-call to the helper, which will return to the fast path. */
1402 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1406 /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
1407 #define MIN_TLB_MASK_TABLE_OFS -256
1409 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1410 TCGReg addrlo, TCGReg addrhi,
1411 MemOpIdx oi, bool is_ld)
1413 TCGLabelQemuLdst *ldst = NULL;
1414 MemOp opc = get_memop(oi);
1417 if (tcg_use_softmmu) {
1421 .index = TCG_REG_R1,
1422 .index_scratch = true,
1428 .index = guest_base ? TCG_REG_GUEST_BASE : -1,
1429 .index_scratch = false,
1433 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1434 a_mask = (1 << h->aa.align) - 1;
1436 if (tcg_use_softmmu) {
1437 int mem_index = get_mmuidx(oi);
1438 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1439 : offsetof(CPUTLBEntry, addr_write);
1440 int fast_off = tlb_mask_table_ofs(s, mem_index);
1441 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1444 ldst = new_ldst_label(s);
1445 ldst->is_ld = is_ld;
1447 ldst->addrlo_reg = addrlo;
1448 ldst->addrhi_reg = addrhi;
1450 /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
1451 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1452 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1453 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1455 /* Extract the tlb index from the address into R0. */
1456 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1457 SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
1460 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1461 * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1463 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1465 if (s->addr_type == TCG_TYPE_I32) {
1466 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
1467 TCG_REG_R1, TCG_REG_R0);
1469 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
1470 TCG_REG_R1, TCG_REG_R0);
1473 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1474 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1475 if (s->addr_type == TCG_TYPE_I32) {
1476 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1478 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1482 /* Load the tlb addend. */
1483 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1484 offsetof(CPUTLBEntry, addend));
1487 * Check alignment, check comparators.
1488 * Do this in 2-4 insns. Use MOVW for v7, if possible,
1489 * to reduce the number of sequential conditional instructions.
1490 * Almost all guests have at least 4k pages, which means that we need
1491 * to clear at least 9 bits even for an 8-byte memory, which means it
1492 * isn't worth checking for an immediate operand for BIC.
1494 * For unaligned accesses, test the page of the last unit of alignment.
1495 * This leaves the least significant alignment bits unchanged, and of
1496 * course must be zero.
1499 if (a_mask < s_mask) {
1500 t_addr = TCG_REG_R0;
1501 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1502 addrlo, s_mask - a_mask);
1504 if (use_armv7_instructions && s->page_bits <= 16) {
1505 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
1506 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1507 t_addr, TCG_REG_TMP, 0);
1508 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1509 TCG_REG_R2, TCG_REG_TMP, 0);
1512 tcg_debug_assert(a_mask <= 0xff);
1513 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1515 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1516 SHIFT_IMM_LSR(s->page_bits));
1517 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1518 0, TCG_REG_R2, TCG_REG_TMP,
1519 SHIFT_IMM_LSL(s->page_bits));
1522 if (s->addr_type != TCG_TYPE_I32) {
1523 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1525 } else if (a_mask) {
1526 ldst = new_ldst_label(s);
1527 ldst->is_ld = is_ld;
1529 ldst->addrlo_reg = addrlo;
1530 ldst->addrhi_reg = addrhi;
1532 /* We are expecting alignment to max out at 7 */
1533 tcg_debug_assert(a_mask <= 0xff);
1534 /* tst addr, #mask */
1535 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1541 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1542 TCGReg datahi, HostAddress h)
1546 /* Byte swapping is left to middle-end expansion. */
1547 tcg_debug_assert((opc & MO_BSWAP) == 0);
1549 switch (opc & MO_SSIZE) {
1552 tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
1554 tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
1559 tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
1561 tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
1566 tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
1568 tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
1573 tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
1575 tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
1580 tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
1582 tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
1586 /* We used pair allocation for datalo, so already should be aligned. */
1587 tcg_debug_assert((datalo & 1) == 0);
1588 tcg_debug_assert(datahi == datalo + 1);
1589 /* LDRD requires alignment; double-check that. */
1590 if (memop_alignment_bits(opc) >= MO_64) {
1592 tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
1596 * Rm (the second address op) must not overlap Rt or Rt + 1.
1597 * Since datalo is aligned, we can simplify the test via alignment.
1598 * Flip the two address arguments if that works.
1600 if ((h.index & ~1) != datalo) {
1601 tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
1604 if ((h.base & ~1) != datalo) {
1605 tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
1611 if (datalo == h.base) {
1612 tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
1615 } else if (h.index_scratch) {
1616 tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
1617 tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
1620 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1621 h.base, h.index, SHIFT_IMM_LSL(0));
1624 tcg_out_ld32_12(s, h.cond, datalo, base, 0);
1625 tcg_out_ld32_12(s, h.cond, datahi, base, 4);
1628 g_assert_not_reached();
1632 static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
1633 TCGReg addrlo, TCGReg addrhi,
1634 MemOpIdx oi, TCGType data_type)
1636 MemOp opc = get_memop(oi);
1637 TCGLabelQemuLdst *ldst;
1640 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
1642 ldst->type = data_type;
1643 ldst->datalo_reg = datalo;
1644 ldst->datahi_reg = datahi;
1647 * This a conditional BL only to load a pointer within this
1648 * opcode into LR for the slow path. We will not be using
1649 * the value for a tail call.
1651 ldst->label_ptr[0] = s->code_ptr;
1652 tcg_out_bl_imm(s, COND_NE, 0);
1654 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1655 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1657 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1661 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1662 TCGReg datahi, HostAddress h)
1664 /* Byte swapping is left to middle-end expansion. */
1665 tcg_debug_assert((opc & MO_BSWAP) == 0);
1667 switch (opc & MO_SIZE) {
1670 tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
1672 tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
1677 tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
1679 tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
1684 tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1686 tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
1690 /* We used pair allocation for datalo, so already should be aligned. */
1691 tcg_debug_assert((datalo & 1) == 0);
1692 tcg_debug_assert(datahi == datalo + 1);
1693 /* STRD requires alignment; double-check that. */
1694 if (memop_alignment_bits(opc) >= MO_64) {
1696 tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
1698 tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
1700 } else if (h.index < 0) {
1701 tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1702 tcg_out_st32_12(s, h.cond, datahi, h.base, 4);
1703 } else if (h.index_scratch) {
1704 tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
1705 tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
1707 tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1708 h.base, h.index, SHIFT_IMM_LSL(0));
1709 tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
1710 tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
1714 g_assert_not_reached();
1718 static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
1719 TCGReg addrlo, TCGReg addrhi,
1720 MemOpIdx oi, TCGType data_type)
1722 MemOp opc = get_memop(oi);
1723 TCGLabelQemuLdst *ldst;
1726 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
1728 ldst->type = data_type;
1729 ldst->datalo_reg = datalo;
1730 ldst->datahi_reg = datahi;
1733 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1735 /* The conditional call is last, as we're going to return here. */
1736 ldst->label_ptr[0] = s->code_ptr;
1737 tcg_out_bl_imm(s, COND_NE, 0);
1738 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1740 tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1744 static void tcg_out_epilogue(TCGContext *s);
1746 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1748 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1749 tcg_out_epilogue(s);
1752 static void tcg_out_goto_tb(TCGContext *s, int which)
1757 /* Direct branch will be patched by tb_target_set_jmp_target. */
1758 set_jmp_insn_offset(s, which);
1759 tcg_out32(s, INSN_NOP);
1761 /* When branch is out of range, fall through to indirect. */
1762 i_addr = get_jmp_target_addr(s, which);
1763 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1764 tcg_debug_assert(i_disp < 0);
1765 if (i_disp >= -0xfff) {
1766 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
1769 * The TB is close, but outside the 12 bits addressable by
1770 * the load. We can extend this to 20 bits with a sub of a
1771 * shifted immediate from pc.
1774 int l = -(h & 0xfff);
1776 h = encode_imm_nofail(h + l);
1777 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
1778 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
1780 set_jmp_reset_offset(s, which);
1783 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1784 uintptr_t jmp_rx, uintptr_t jmp_rw)
1786 uintptr_t addr = tb->jmp_target_addr[n];
1787 ptrdiff_t offset = addr - (jmp_rx + 8);
1790 /* Either directly branch, or fall through to indirect branch. */
1791 if (offset == sextract64(offset, 0, 26)) {
1793 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
1798 qatomic_set((uint32_t *)jmp_rw, insn);
1799 flush_idcache_range(jmp_rx, jmp_rw, 4);
1802 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1803 const TCGArg args[TCG_MAX_OP_ARGS],
1804 const int const_args[TCG_MAX_OP_ARGS])
1806 TCGArg a0, a1, a2, a3, a4, a5;
1810 case INDEX_op_goto_ptr:
1811 tcg_out_b_reg(s, COND_AL, args[0]);
1814 tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1817 case INDEX_op_ld8u_i32:
1818 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1820 case INDEX_op_ld8s_i32:
1821 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1823 case INDEX_op_ld16u_i32:
1824 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1826 case INDEX_op_ld16s_i32:
1827 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1829 case INDEX_op_ld_i32:
1830 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1832 case INDEX_op_st8_i32:
1833 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1835 case INDEX_op_st16_i32:
1836 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1838 case INDEX_op_st_i32:
1839 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1842 case INDEX_op_movcond_i32:
1843 /* Constraints mean that v2 is always in the same register as dest,
1844 * so we only need to do "if condition passed, move v1 to dest".
1846 c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
1847 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
1848 ARITH_MVN, args[0], 0, args[3], const_args[3]);
1850 case INDEX_op_add_i32:
1851 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1852 args[0], args[1], args[2], const_args[2]);
1854 case INDEX_op_sub_i32:
1855 if (const_args[1]) {
1856 if (const_args[2]) {
1857 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1859 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1860 args[0], args[2], args[1], 1);
1863 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1864 args[0], args[1], args[2], const_args[2]);
1867 case INDEX_op_and_i32:
1868 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1869 args[0], args[1], args[2], const_args[2]);
1871 case INDEX_op_andc_i32:
1872 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1873 args[0], args[1], args[2], const_args[2]);
1875 case INDEX_op_or_i32:
1878 case INDEX_op_xor_i32:
1882 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1884 case INDEX_op_add2_i32:
1885 a0 = args[0], a1 = args[1], a2 = args[2];
1886 a3 = args[3], a4 = args[4], a5 = args[5];
1887 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1890 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1891 a0, a2, a4, const_args[4]);
1892 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1893 a1, a3, a5, const_args[5]);
1894 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1896 case INDEX_op_sub2_i32:
1897 a0 = args[0], a1 = args[1], a2 = args[2];
1898 a3 = args[3], a4 = args[4], a5 = args[5];
1899 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1902 if (const_args[2]) {
1903 if (const_args[4]) {
1904 tcg_out_movi32(s, COND_AL, a0, a4);
1907 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1909 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1910 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1912 if (const_args[3]) {
1913 if (const_args[5]) {
1914 tcg_out_movi32(s, COND_AL, a1, a5);
1917 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1919 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1920 a1, a3, a5, const_args[5]);
1922 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1924 case INDEX_op_neg_i32:
1925 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1927 case INDEX_op_not_i32:
1928 tcg_out_dat_reg(s, COND_AL,
1929 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1931 case INDEX_op_mul_i32:
1932 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1934 case INDEX_op_mulu2_i32:
1935 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1937 case INDEX_op_muls2_i32:
1938 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1940 /* XXX: Perhaps args[2] & 0x1f is wrong */
1941 case INDEX_op_shl_i32:
1943 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1945 case INDEX_op_shr_i32:
1946 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1947 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1949 case INDEX_op_sar_i32:
1950 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1951 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1953 case INDEX_op_rotr_i32:
1954 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1955 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1958 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1961 case INDEX_op_rotl_i32:
1962 if (const_args[2]) {
1963 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1964 ((0x20 - args[2]) & 0x1f) ?
1965 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1968 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1969 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1970 SHIFT_REG_ROR(TCG_REG_TMP));
1974 case INDEX_op_ctz_i32:
1975 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
1979 case INDEX_op_clz_i32:
1985 if (c && a2 == 32) {
1986 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1989 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1990 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1991 if (c || a0 != a2) {
1992 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
1996 case INDEX_op_brcond_i32:
1997 c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
1998 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
2000 case INDEX_op_setcond_i32:
2001 c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
2002 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
2003 ARITH_MOV, args[0], 0, 1);
2004 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2005 ARITH_MOV, args[0], 0, 0);
2007 case INDEX_op_negsetcond_i32:
2008 c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
2009 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
2010 ARITH_MVN, args[0], 0, 0);
2011 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2012 ARITH_MOV, args[0], 0, 0);
2015 case INDEX_op_brcond2_i32:
2016 c = tcg_out_cmp2(s, args, const_args);
2017 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2019 case INDEX_op_setcond2_i32:
2020 c = tcg_out_cmp2(s, args + 1, const_args + 1);
2021 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2022 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2023 ARITH_MOV, args[0], 0, 0);
2026 case INDEX_op_qemu_ld_a32_i32:
2027 tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2029 case INDEX_op_qemu_ld_a64_i32:
2030 tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
2031 args[3], TCG_TYPE_I32);
2033 case INDEX_op_qemu_ld_a32_i64:
2034 tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
2035 args[3], TCG_TYPE_I64);
2037 case INDEX_op_qemu_ld_a64_i64:
2038 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
2039 args[4], TCG_TYPE_I64);
2042 case INDEX_op_qemu_st_a32_i32:
2043 tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2045 case INDEX_op_qemu_st_a64_i32:
2046 tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
2047 args[3], TCG_TYPE_I32);
2049 case INDEX_op_qemu_st_a32_i64:
2050 tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
2051 args[3], TCG_TYPE_I64);
2053 case INDEX_op_qemu_st_a64_i64:
2054 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
2055 args[4], TCG_TYPE_I64);
2058 case INDEX_op_bswap16_i32:
2059 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2061 case INDEX_op_bswap32_i32:
2062 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2065 case INDEX_op_deposit_i32:
2066 tcg_out_deposit(s, COND_AL, args[0], args[2],
2067 args[3], args[4], const_args[2]);
2069 case INDEX_op_extract_i32:
2070 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2072 case INDEX_op_sextract_i32:
2073 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2075 case INDEX_op_extract2_i32:
2076 /* ??? These optimization vs zero should be generic. */
2077 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
2078 if (const_args[1]) {
2079 if (const_args[2]) {
2080 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2082 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2083 args[2], SHIFT_IMM_LSL(32 - args[3]));
2085 } else if (const_args[2]) {
2086 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2087 args[1], SHIFT_IMM_LSR(args[3]));
2089 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
2090 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2091 args[2], SHIFT_IMM_LSL(32 - args[3]));
2092 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2093 args[1], SHIFT_IMM_LSR(args[3]));
2097 case INDEX_op_div_i32:
2098 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2100 case INDEX_op_divu_i32:
2101 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2105 tcg_out_mb(s, args[0]);
2108 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2109 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2110 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
2111 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
2112 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
2113 case INDEX_op_ext8u_i32:
2114 case INDEX_op_ext16s_i32:
2115 case INDEX_op_ext16u_i32:
2117 g_assert_not_reached();
2121 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2124 case INDEX_op_goto_ptr:
2127 case INDEX_op_ld8u_i32:
2128 case INDEX_op_ld8s_i32:
2129 case INDEX_op_ld16u_i32:
2130 case INDEX_op_ld16s_i32:
2131 case INDEX_op_ld_i32:
2132 case INDEX_op_neg_i32:
2133 case INDEX_op_not_i32:
2134 case INDEX_op_bswap16_i32:
2135 case INDEX_op_bswap32_i32:
2136 case INDEX_op_ext8s_i32:
2137 case INDEX_op_ext16s_i32:
2138 case INDEX_op_ext16u_i32:
2139 case INDEX_op_extract_i32:
2140 case INDEX_op_sextract_i32:
2141 return C_O1_I1(r, r);
2143 case INDEX_op_st8_i32:
2144 case INDEX_op_st16_i32:
2145 case INDEX_op_st_i32:
2146 return C_O0_I2(r, r);
2148 case INDEX_op_add_i32:
2149 case INDEX_op_sub_i32:
2150 case INDEX_op_setcond_i32:
2151 case INDEX_op_negsetcond_i32:
2152 return C_O1_I2(r, r, rIN);
2154 case INDEX_op_and_i32:
2155 case INDEX_op_andc_i32:
2156 case INDEX_op_clz_i32:
2157 case INDEX_op_ctz_i32:
2158 return C_O1_I2(r, r, rIK);
2160 case INDEX_op_mul_i32:
2161 case INDEX_op_div_i32:
2162 case INDEX_op_divu_i32:
2163 return C_O1_I2(r, r, r);
2165 case INDEX_op_mulu2_i32:
2166 case INDEX_op_muls2_i32:
2167 return C_O2_I2(r, r, r, r);
2169 case INDEX_op_or_i32:
2170 case INDEX_op_xor_i32:
2171 return C_O1_I2(r, r, rI);
2173 case INDEX_op_shl_i32:
2174 case INDEX_op_shr_i32:
2175 case INDEX_op_sar_i32:
2176 case INDEX_op_rotl_i32:
2177 case INDEX_op_rotr_i32:
2178 return C_O1_I2(r, r, ri);
2180 case INDEX_op_brcond_i32:
2181 return C_O0_I2(r, rIN);
2182 case INDEX_op_deposit_i32:
2183 return C_O1_I2(r, 0, rZ);
2184 case INDEX_op_extract2_i32:
2185 return C_O1_I2(r, rZ, rZ);
2186 case INDEX_op_movcond_i32:
2187 return C_O1_I4(r, r, rIN, rIK, 0);
2188 case INDEX_op_add2_i32:
2189 return C_O2_I4(r, r, r, r, rIN, rIK);
2190 case INDEX_op_sub2_i32:
2191 return C_O2_I4(r, r, rI, rI, rIN, rIK);
2192 case INDEX_op_brcond2_i32:
2193 return C_O0_I4(r, r, rI, rI);
2194 case INDEX_op_setcond2_i32:
2195 return C_O1_I4(r, r, r, rI, rI);
2197 case INDEX_op_qemu_ld_a32_i32:
2198 return C_O1_I1(r, q);
2199 case INDEX_op_qemu_ld_a64_i32:
2200 return C_O1_I2(r, q, q);
2201 case INDEX_op_qemu_ld_a32_i64:
2202 return C_O2_I1(e, p, q);
2203 case INDEX_op_qemu_ld_a64_i64:
2204 return C_O2_I2(e, p, q, q);
2205 case INDEX_op_qemu_st_a32_i32:
2206 return C_O0_I2(q, q);
2207 case INDEX_op_qemu_st_a64_i32:
2208 return C_O0_I3(q, q, q);
2209 case INDEX_op_qemu_st_a32_i64:
2210 return C_O0_I3(Q, p, q);
2211 case INDEX_op_qemu_st_a64_i64:
2212 return C_O0_I4(Q, p, q, q);
2214 case INDEX_op_st_vec:
2215 return C_O0_I2(w, r);
2216 case INDEX_op_ld_vec:
2217 case INDEX_op_dupm_vec:
2218 return C_O1_I1(w, r);
2219 case INDEX_op_dup_vec:
2220 return C_O1_I1(w, wr);
2221 case INDEX_op_abs_vec:
2222 case INDEX_op_neg_vec:
2223 case INDEX_op_not_vec:
2224 case INDEX_op_shli_vec:
2225 case INDEX_op_shri_vec:
2226 case INDEX_op_sari_vec:
2227 return C_O1_I1(w, w);
2228 case INDEX_op_dup2_vec:
2229 case INDEX_op_add_vec:
2230 case INDEX_op_mul_vec:
2231 case INDEX_op_smax_vec:
2232 case INDEX_op_smin_vec:
2233 case INDEX_op_ssadd_vec:
2234 case INDEX_op_sssub_vec:
2235 case INDEX_op_sub_vec:
2236 case INDEX_op_umax_vec:
2237 case INDEX_op_umin_vec:
2238 case INDEX_op_usadd_vec:
2239 case INDEX_op_ussub_vec:
2240 case INDEX_op_xor_vec:
2241 case INDEX_op_arm_sshl_vec:
2242 case INDEX_op_arm_ushl_vec:
2243 return C_O1_I2(w, w, w);
2244 case INDEX_op_arm_sli_vec:
2245 return C_O1_I2(w, 0, w);
2246 case INDEX_op_or_vec:
2247 case INDEX_op_andc_vec:
2248 return C_O1_I2(w, w, wO);
2249 case INDEX_op_and_vec:
2250 case INDEX_op_orc_vec:
2251 return C_O1_I2(w, w, wV);
2252 case INDEX_op_cmp_vec:
2253 return C_O1_I2(w, w, wZ);
2254 case INDEX_op_bitsel_vec:
2255 return C_O1_I3(w, w, w, w);
2257 g_assert_not_reached();
2261 static void tcg_target_init(TCGContext *s)
2264 * Only probe for the platform and capabilities if we haven't already
2265 * determined maximum values at compile time.
2267 #if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2269 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2270 #ifndef use_idiv_instructions
2271 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2273 #ifndef use_neon_instructions
2274 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2279 if (__ARM_ARCH < 7) {
2280 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2281 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2282 arm_arch = pl[1] - '0';
2286 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2291 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2293 tcg_target_call_clobber_regs = 0;
2294 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2295 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2296 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2297 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2298 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2299 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2301 if (use_neon_instructions) {
2302 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2303 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2305 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2306 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2307 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2308 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2309 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2310 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2311 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2312 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2313 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2314 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2315 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2316 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2319 s->reserved_regs = 0;
2320 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2321 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2322 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2323 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2326 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2327 TCGReg arg1, intptr_t arg2)
2331 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2334 /* regs 1; size 8; align 8 */
2335 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2339 * We have only 8-byte alignment for the stack per the ABI.
2340 * Rather than dynamically re-align the stack, it's easier
2341 * to simply not request alignment beyond that. So:
2342 * regs 2; size 8; align 8
2344 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2347 g_assert_not_reached();
2351 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2352 TCGReg arg1, intptr_t arg2)
2356 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2359 /* regs 1; size 8; align 8 */
2360 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2363 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2364 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2367 g_assert_not_reached();
2371 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2372 TCGReg base, intptr_t ofs)
2377 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2384 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2385 tcg_out_mov_reg(s, COND_AL, ret, arg);
2392 /* "VMOV D,N" is an alias for "VORR D,N,N". */
2393 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2397 g_assert_not_reached();
2401 static void tcg_out_movi(TCGContext *s, TCGType type,
2402 TCGReg ret, tcg_target_long arg)
2404 tcg_debug_assert(type == TCG_TYPE_I32);
2405 tcg_debug_assert(ret < TCG_REG_Q0);
2406 tcg_out_movi32(s, COND_AL, ret, arg);
2409 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
2414 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
2415 tcg_target_long imm)
2417 int enc, opc = ARITH_ADD;
2419 /* All of the easiest immediates to encode are positive. */
2424 enc = encode_imm(imm);
2426 tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
2428 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
2429 tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
2430 TCG_REG_TMP, SHIFT_IMM_LSL(0));
2434 /* Type is always V128, with I64 elements. */
2435 static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2437 /* Move high element into place first. */
2439 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2440 /* Move low element into place; tcg_out_mov will check for nop. */
2441 tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2444 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2445 TCGReg rd, TCGReg rs)
2447 int q = type - TCG_TYPE_V64;
2449 if (vece == MO_64) {
2450 if (type == TCG_TYPE_V128) {
2451 tcg_out_dup2_vec(s, rd, rs, rs);
2453 tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2455 } else if (rs < TCG_REG_Q0) {
2456 int b = (vece == MO_8);
2457 int e = (vece == MO_16);
2458 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2459 encode_vn(rd) | (rs << 12));
2461 int imm4 = 1 << vece;
2462 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2463 encode_vd(rd) | encode_vm(rs));
2468 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2469 TCGReg rd, TCGReg base, intptr_t offset)
2471 if (vece == MO_64) {
2472 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2473 if (type == TCG_TYPE_V128) {
2474 tcg_out_dup2_vec(s, rd, rd, rd);
2477 int q = type - TCG_TYPE_V64;
2478 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2484 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2485 TCGReg rd, int64_t v64)
2487 int q = type - TCG_TYPE_V64;
2490 /* Test all bytes equal first. */
2492 tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2497 * Test all bytes 0x00 or 0xff second. This can match cases that
2498 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2500 for (i = imm8 = 0; i < 8; i++) {
2501 uint8_t byte = v64 >> (i * 8);
2504 } else if (byte != 0) {
2508 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2513 * Tests for various replications. For each element width, if we
2514 * cannot find an expansion there's no point checking a larger
2515 * width because we already know by replication it cannot match.
2517 if (vece == MO_16) {
2520 if (is_shimm16(v16, &cmode, &imm8)) {
2521 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2524 if (is_shimm16(~v16, &cmode, &imm8)) {
2525 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2530 * Otherwise, all remaining constants can be loaded in two insns:
2531 * rd = v16 & 0xff, rd |= v16 & 0xff00.
2533 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2534 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */
2538 if (vece == MO_32) {
2541 if (is_shimm32(v32, &cmode, &imm8) ||
2542 is_soimm32(v32, &cmode, &imm8)) {
2543 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2546 if (is_shimm32(~v32, &cmode, &imm8) ||
2547 is_soimm32(~v32, &cmode, &imm8)) {
2548 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2553 * Restrict the set of constants to those we can load with
2554 * two instructions. Others we load from the pool.
2556 i = is_shimm32_pair(v32, &cmode, &imm8);
2558 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2559 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2562 i = is_shimm32_pair(~v32, &cmode, &imm8);
2564 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2565 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2571 * As a last resort, load from the constant pool.
2573 if (!q || vece == MO_64) {
2574 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2575 /* VLDR Dd, [pc + offset] */
2576 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2578 tcg_out_dup2_vec(s, rd, rd, rd);
2581 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2582 /* add tmp, pc, offset */
2583 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2584 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2588 static const ARMInsn vec_cmp_insn[16] = {
2589 [TCG_COND_EQ] = INSN_VCEQ,
2590 [TCG_COND_GT] = INSN_VCGT,
2591 [TCG_COND_GE] = INSN_VCGE,
2592 [TCG_COND_GTU] = INSN_VCGT_U,
2593 [TCG_COND_GEU] = INSN_VCGE_U,
2596 static const ARMInsn vec_cmp0_insn[16] = {
2597 [TCG_COND_EQ] = INSN_VCEQ0,
2598 [TCG_COND_GT] = INSN_VCGT0,
2599 [TCG_COND_GE] = INSN_VCGE0,
2600 [TCG_COND_LT] = INSN_VCLT0,
2601 [TCG_COND_LE] = INSN_VCLE0,
2604 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2605 unsigned vecl, unsigned vece,
2606 const TCGArg args[TCG_MAX_OP_ARGS],
2607 const int const_args[TCG_MAX_OP_ARGS])
2609 TCGType type = vecl + TCG_TYPE_V64;
2611 TCGArg a0, a1, a2, a3;
2619 case INDEX_op_ld_vec:
2620 tcg_out_ld(s, type, a0, a1, a2);
2622 case INDEX_op_st_vec:
2623 tcg_out_st(s, type, a0, a1, a2);
2625 case INDEX_op_dupm_vec:
2626 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2628 case INDEX_op_dup2_vec:
2629 tcg_out_dup2_vec(s, a0, a1, a2);
2631 case INDEX_op_abs_vec:
2632 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2634 case INDEX_op_neg_vec:
2635 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2637 case INDEX_op_not_vec:
2638 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2640 case INDEX_op_add_vec:
2641 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2643 case INDEX_op_mul_vec:
2644 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2646 case INDEX_op_smax_vec:
2647 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2649 case INDEX_op_smin_vec:
2650 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2652 case INDEX_op_sub_vec:
2653 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2655 case INDEX_op_ssadd_vec:
2656 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2658 case INDEX_op_sssub_vec:
2659 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2661 case INDEX_op_umax_vec:
2662 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2664 case INDEX_op_umin_vec:
2665 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2667 case INDEX_op_usadd_vec:
2668 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2670 case INDEX_op_ussub_vec:
2671 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2673 case INDEX_op_xor_vec:
2674 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2676 case INDEX_op_arm_sshl_vec:
2678 * Note that Vm is the data and Vn is the shift count,
2679 * therefore the arguments appear reversed.
2681 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2683 case INDEX_op_arm_ushl_vec:
2685 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2687 case INDEX_op_shli_vec:
2688 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2690 case INDEX_op_shri_vec:
2691 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2693 case INDEX_op_sari_vec:
2694 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2696 case INDEX_op_arm_sli_vec:
2697 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2700 case INDEX_op_andc_vec:
2701 if (!const_args[2]) {
2702 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2707 case INDEX_op_and_vec:
2708 if (const_args[2]) {
2709 is_shimm1632(~a2, &cmode, &imm8);
2711 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2714 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2717 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2720 case INDEX_op_orc_vec:
2721 if (!const_args[2]) {
2722 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2727 case INDEX_op_or_vec:
2728 if (const_args[2]) {
2729 is_shimm1632(a2, &cmode, &imm8);
2731 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2734 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2737 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2740 case INDEX_op_cmp_vec:
2742 TCGCond cond = args[3];
2747 if (const_args[2]) {
2748 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2750 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2751 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2755 case TCG_COND_TSTNE:
2756 case TCG_COND_TSTEQ:
2757 if (const_args[2]) {
2759 tcg_out_dupi_vec(s, type, MO_8, a0,
2760 -(cond == TCG_COND_TSTEQ));
2763 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a2);
2764 if (cond == TCG_COND_TSTEQ) {
2765 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2770 if (const_args[2]) {
2771 insn = vec_cmp0_insn[cond];
2773 tcg_out_vreg2(s, insn, q, vece, a0, a1);
2776 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2779 insn = vec_cmp_insn[cond];
2782 t = a1, a1 = a2, a2 = t;
2783 cond = tcg_swap_cond(cond);
2784 insn = vec_cmp_insn[cond];
2785 tcg_debug_assert(insn != 0);
2787 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2793 case INDEX_op_bitsel_vec:
2796 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2797 } else if (a0 == a2) {
2798 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2800 tcg_out_mov(s, type, a0, a1);
2801 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2805 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2806 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2808 g_assert_not_reached();
2812 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2815 case INDEX_op_add_vec:
2816 case INDEX_op_sub_vec:
2817 case INDEX_op_and_vec:
2818 case INDEX_op_andc_vec:
2819 case INDEX_op_or_vec:
2820 case INDEX_op_orc_vec:
2821 case INDEX_op_xor_vec:
2822 case INDEX_op_not_vec:
2823 case INDEX_op_shli_vec:
2824 case INDEX_op_shri_vec:
2825 case INDEX_op_sari_vec:
2826 case INDEX_op_ssadd_vec:
2827 case INDEX_op_sssub_vec:
2828 case INDEX_op_usadd_vec:
2829 case INDEX_op_ussub_vec:
2830 case INDEX_op_bitsel_vec:
2832 case INDEX_op_abs_vec:
2833 case INDEX_op_cmp_vec:
2834 case INDEX_op_mul_vec:
2835 case INDEX_op_neg_vec:
2836 case INDEX_op_smax_vec:
2837 case INDEX_op_smin_vec:
2838 case INDEX_op_umax_vec:
2839 case INDEX_op_umin_vec:
2840 return vece < MO_64;
2841 case INDEX_op_shlv_vec:
2842 case INDEX_op_shrv_vec:
2843 case INDEX_op_sarv_vec:
2844 case INDEX_op_rotli_vec:
2845 case INDEX_op_rotlv_vec:
2846 case INDEX_op_rotrv_vec:
2853 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2857 TCGv_vec v0, v1, v2, t1, t2, c1;
2861 v0 = temp_tcgv_vec(arg_temp(a0));
2862 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2863 a2 = va_arg(va, TCGArg);
2867 case INDEX_op_shlv_vec:
2869 * Merely propagate shlv_vec to arm_ushl_vec.
2870 * In this way we don't set TCG_TARGET_HAS_shv_vec
2871 * because everything is done via expansion.
2873 v2 = temp_tcgv_vec(arg_temp(a2));
2874 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2875 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2878 case INDEX_op_shrv_vec:
2879 case INDEX_op_sarv_vec:
2880 /* Right shifts are negative left shifts for NEON. */
2881 v2 = temp_tcgv_vec(arg_temp(a2));
2882 t1 = tcg_temp_new_vec(type);
2883 tcg_gen_neg_vec(vece, t1, v2);
2884 if (opc == INDEX_op_shrv_vec) {
2885 opc = INDEX_op_arm_ushl_vec;
2887 opc = INDEX_op_arm_sshl_vec;
2889 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
2890 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2891 tcg_temp_free_vec(t1);
2894 case INDEX_op_rotli_vec:
2895 t1 = tcg_temp_new_vec(type);
2896 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
2897 vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
2898 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
2899 tcg_temp_free_vec(t1);
2902 case INDEX_op_rotlv_vec:
2903 v2 = temp_tcgv_vec(arg_temp(a2));
2904 t1 = tcg_temp_new_vec(type);
2905 c1 = tcg_constant_vec(type, vece, 8 << vece);
2906 tcg_gen_sub_vec(vece, t1, v2, c1);
2907 /* Right shifts are negative left shifts for NEON. */
2908 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2909 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2910 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2911 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2912 tcg_gen_or_vec(vece, v0, v0, t1);
2913 tcg_temp_free_vec(t1);
2916 case INDEX_op_rotrv_vec:
2917 v2 = temp_tcgv_vec(arg_temp(a2));
2918 t1 = tcg_temp_new_vec(type);
2919 t2 = tcg_temp_new_vec(type);
2920 c1 = tcg_constant_vec(type, vece, 8 << vece);
2921 tcg_gen_neg_vec(vece, t1, v2);
2922 tcg_gen_sub_vec(vece, t2, c1, v2);
2923 /* Right shifts are negative left shifts for NEON. */
2924 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2925 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2926 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
2927 tcgv_vec_arg(v1), tcgv_vec_arg(t2));
2928 tcg_gen_or_vec(vece, v0, t1, t2);
2929 tcg_temp_free_vec(t1);
2930 tcg_temp_free_vec(t2);
2934 g_assert_not_reached();
2938 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2941 for (i = 0; i < count; ++i) {
2946 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2947 and tcg_register_jit. */
2949 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2951 #define FRAME_SIZE \
2953 + TCG_STATIC_CALL_ARGS_SIZE \
2954 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2955 + TCG_TARGET_STACK_ALIGN - 1) \
2956 & -TCG_TARGET_STACK_ALIGN)
2958 #define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE)
2960 static void tcg_target_qemu_prologue(TCGContext *s)
2962 /* Calling convention requires us to save r4-r11 and lr. */
2963 /* stmdb sp!, { r4 - r11, lr } */
2964 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
2965 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
2966 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
2967 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
2969 /* Reserve callee argument and tcg temp space. */
2970 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2971 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2972 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2973 CPU_TEMP_BUF_NLONGS * sizeof(long));
2975 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2977 if (!tcg_use_softmmu && guest_base) {
2978 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
2979 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
2982 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
2985 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2986 * and fall through to the rest of the epilogue.
2988 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2989 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
2990 tcg_out_epilogue(s);
2993 static void tcg_out_epilogue(TCGContext *s)
2995 /* Release local stack frame. */
2996 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2997 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2999 /* ldmia sp!, { r4 - r11, pc } */
3000 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
3001 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3002 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3003 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
3006 static void tcg_out_tb_start(TCGContext *s)
3013 uint8_t fde_def_cfa[4];
3014 uint8_t fde_reg_ofs[18];
3017 #define ELF_HOST_MACHINE EM_ARM
3019 /* We're expecting a 2 byte uleb128 encoded value. */
3020 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3022 static const DebugFrame debug_frame = {
3023 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3026 .h.cie.code_align = 1,
3027 .h.cie.data_align = 0x7c, /* sleb128 -4 */
3028 .h.cie.return_column = 14,
3030 /* Total FDE size does not include the "len" member. */
3031 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3034 12, 13, /* DW_CFA_def_cfa sp, ... */
3035 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3039 /* The following must match the stmdb in the prologue. */
3040 0x8e, 1, /* DW_CFA_offset, lr, -4 */
3041 0x8b, 2, /* DW_CFA_offset, r11, -8 */
3042 0x8a, 3, /* DW_CFA_offset, r10, -12 */
3043 0x89, 4, /* DW_CFA_offset, r9, -16 */
3044 0x88, 5, /* DW_CFA_offset, r8, -20 */
3045 0x87, 6, /* DW_CFA_offset, r7, -24 */
3046 0x86, 7, /* DW_CFA_offset, r6, -28 */
3047 0x85, 8, /* DW_CFA_offset, r5, -32 */
3048 0x84, 9, /* DW_CFA_offset, r4, -36 */
3052 void tcg_register_jit(const void *buf, size_t buf_size)
3054 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));