2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
35 #if TCG_TARGET_REG_BITS == 32
36 # define LO_OFF (MIPS_BE * 4)
37 # define HI_OFF (4 - LO_OFF)
39 /* To assert at compile-time that these values are never used
40 for TCG_TARGET_REG_BITS == 64. */
41 /* extern */ int link_error(void);
42 # define LO_OFF link_error()
43 # define HI_OFF link_error()
46 #ifdef CONFIG_DEBUG_TCG
47 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
83 #define TCG_TMP0 TCG_REG_AT
84 #define TCG_TMP1 TCG_REG_T9
85 #define TCG_TMP2 TCG_REG_T8
86 #define TCG_TMP3 TCG_REG_T7
88 #ifndef CONFIG_SOFTMMU
89 #define TCG_GUEST_BASE_REG TCG_REG_S1
92 /* check if we really need so many registers :P */
93 static const int tcg_target_reg_alloc_order
[] = {
94 /* Call saved registers. */
105 /* Call clobbered registers. */
115 /* Argument registers, opposite order of allocation. */
126 static const TCGReg tcg_target_call_iarg_regs
[] = {
131 #if _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
139 static const TCGReg tcg_target_call_oarg_regs
[2] = {
144 static tcg_insn_unit
*tb_ret_addr
;
145 static tcg_insn_unit
*bswap32_addr
;
146 static tcg_insn_unit
*bswap32u_addr
;
147 static tcg_insn_unit
*bswap64_addr
;
149 static inline uint32_t reloc_pc16_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
151 /* Let the compiler perform the right-shift as part of the arithmetic. */
152 ptrdiff_t disp
= target
- (pc
+ 1);
153 tcg_debug_assert(disp
== (int16_t)disp
);
154 return disp
& 0xffff;
157 static inline void reloc_pc16(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
159 *pc
= deposit32(*pc
, 0, 16, reloc_pc16_val(pc
, target
));
162 static inline uint32_t reloc_26_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
164 tcg_debug_assert((((uintptr_t)pc
^ (uintptr_t)target
) & 0xf0000000) == 0);
165 return ((uintptr_t)target
>> 2) & 0x3ffffff;
168 static inline void reloc_26(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
170 *pc
= deposit32(*pc
, 0, 26, reloc_26_val(pc
, target
));
173 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
174 intptr_t value
, intptr_t addend
)
176 tcg_debug_assert(type
== R_MIPS_PC16
);
177 tcg_debug_assert(addend
== 0);
178 reloc_pc16(code_ptr
, (tcg_insn_unit
*)value
);
181 #define TCG_CT_CONST_ZERO 0x100
182 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
183 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
184 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
185 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
186 #define TCG_CT_CONST_WSZ 0x2000 /* word size */
188 static inline bool is_p2m1(tcg_target_long val
)
190 return val
&& ((val
+ 1) & val
) == 0;
193 /* parse target specific constraints */
194 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
195 const char *ct_str
, TCGType type
)
199 ct
->ct
|= TCG_CT_REG
;
200 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
202 case 'L': /* qemu_ld input arg constraint */
203 ct
->ct
|= TCG_CT_REG
;
204 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
205 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
206 #if defined(CONFIG_SOFTMMU)
207 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
208 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
212 case 'S': /* qemu_st constraint */
213 ct
->ct
|= TCG_CT_REG
;
214 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
215 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
216 #if defined(CONFIG_SOFTMMU)
217 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
218 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
219 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A3
);
221 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A1
);
226 ct
->ct
|= TCG_CT_CONST_U16
;
229 ct
->ct
|= TCG_CT_CONST_S16
;
232 ct
->ct
|= TCG_CT_CONST_P2M1
;
235 ct
->ct
|= TCG_CT_CONST_N16
;
238 ct
->ct
|= TCG_CT_CONST_WSZ
;
241 /* We are cheating a bit here, using the fact that the register
242 ZERO is also the register number 0. Hence there is no need
243 to check for const_args in each instruction. */
244 ct
->ct
|= TCG_CT_CONST_ZERO
;
252 /* test if a constant matches the constraint */
253 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
254 const TCGArgConstraint
*arg_ct
)
258 if (ct
& TCG_CT_CONST
) {
260 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
262 } else if ((ct
& TCG_CT_CONST_U16
) && val
== (uint16_t)val
) {
264 } else if ((ct
& TCG_CT_CONST_S16
) && val
== (int16_t)val
) {
266 } else if ((ct
& TCG_CT_CONST_N16
) && val
>= -32767 && val
<= 32767) {
268 } else if ((ct
& TCG_CT_CONST_P2M1
)
269 && use_mips32r2_instructions
&& is_p2m1(val
)) {
271 } else if ((ct
& TCG_CT_CONST_WSZ
)
272 && val
== (type
== TCG_TYPE_I32
? 32 : 64)) {
278 /* instruction opcodes */
284 OPC_BLEZ
= 006 << 26,
285 OPC_BGTZ
= 007 << 26,
286 OPC_ADDIU
= 011 << 26,
287 OPC_SLTI
= 012 << 26,
288 OPC_SLTIU
= 013 << 26,
289 OPC_ANDI
= 014 << 26,
291 OPC_XORI
= 016 << 26,
293 OPC_DADDIU
= 031 << 26,
306 OPC_SPECIAL
= 000 << 26,
307 OPC_SLL
= OPC_SPECIAL
| 000,
308 OPC_SRL
= OPC_SPECIAL
| 002,
309 OPC_ROTR
= OPC_SPECIAL
| 002 | (1 << 21),
310 OPC_SRA
= OPC_SPECIAL
| 003,
311 OPC_SLLV
= OPC_SPECIAL
| 004,
312 OPC_SRLV
= OPC_SPECIAL
| 006,
313 OPC_ROTRV
= OPC_SPECIAL
| 006 | 0100,
314 OPC_SRAV
= OPC_SPECIAL
| 007,
315 OPC_JR_R5
= OPC_SPECIAL
| 010,
316 OPC_JALR
= OPC_SPECIAL
| 011,
317 OPC_MOVZ
= OPC_SPECIAL
| 012,
318 OPC_MOVN
= OPC_SPECIAL
| 013,
319 OPC_SYNC
= OPC_SPECIAL
| 017,
320 OPC_MFHI
= OPC_SPECIAL
| 020,
321 OPC_MFLO
= OPC_SPECIAL
| 022,
322 OPC_DSLLV
= OPC_SPECIAL
| 024,
323 OPC_DSRLV
= OPC_SPECIAL
| 026,
324 OPC_DROTRV
= OPC_SPECIAL
| 026 | 0100,
325 OPC_DSRAV
= OPC_SPECIAL
| 027,
326 OPC_MULT
= OPC_SPECIAL
| 030,
327 OPC_MUL_R6
= OPC_SPECIAL
| 030 | 0200,
328 OPC_MUH
= OPC_SPECIAL
| 030 | 0300,
329 OPC_MULTU
= OPC_SPECIAL
| 031,
330 OPC_MULU
= OPC_SPECIAL
| 031 | 0200,
331 OPC_MUHU
= OPC_SPECIAL
| 031 | 0300,
332 OPC_DIV
= OPC_SPECIAL
| 032,
333 OPC_DIV_R6
= OPC_SPECIAL
| 032 | 0200,
334 OPC_MOD
= OPC_SPECIAL
| 032 | 0300,
335 OPC_DIVU
= OPC_SPECIAL
| 033,
336 OPC_DIVU_R6
= OPC_SPECIAL
| 033 | 0200,
337 OPC_MODU
= OPC_SPECIAL
| 033 | 0300,
338 OPC_DMULT
= OPC_SPECIAL
| 034,
339 OPC_DMUL
= OPC_SPECIAL
| 034 | 0200,
340 OPC_DMUH
= OPC_SPECIAL
| 034 | 0300,
341 OPC_DMULTU
= OPC_SPECIAL
| 035,
342 OPC_DMULU
= OPC_SPECIAL
| 035 | 0200,
343 OPC_DMUHU
= OPC_SPECIAL
| 035 | 0300,
344 OPC_DDIV
= OPC_SPECIAL
| 036,
345 OPC_DDIV_R6
= OPC_SPECIAL
| 036 | 0200,
346 OPC_DMOD
= OPC_SPECIAL
| 036 | 0300,
347 OPC_DDIVU
= OPC_SPECIAL
| 037,
348 OPC_DDIVU_R6
= OPC_SPECIAL
| 037 | 0200,
349 OPC_DMODU
= OPC_SPECIAL
| 037 | 0300,
350 OPC_ADDU
= OPC_SPECIAL
| 041,
351 OPC_SUBU
= OPC_SPECIAL
| 043,
352 OPC_AND
= OPC_SPECIAL
| 044,
353 OPC_OR
= OPC_SPECIAL
| 045,
354 OPC_XOR
= OPC_SPECIAL
| 046,
355 OPC_NOR
= OPC_SPECIAL
| 047,
356 OPC_SLT
= OPC_SPECIAL
| 052,
357 OPC_SLTU
= OPC_SPECIAL
| 053,
358 OPC_DADDU
= OPC_SPECIAL
| 055,
359 OPC_DSUBU
= OPC_SPECIAL
| 057,
360 OPC_SELEQZ
= OPC_SPECIAL
| 065,
361 OPC_SELNEZ
= OPC_SPECIAL
| 067,
362 OPC_DSLL
= OPC_SPECIAL
| 070,
363 OPC_DSRL
= OPC_SPECIAL
| 072,
364 OPC_DROTR
= OPC_SPECIAL
| 072 | (1 << 21),
365 OPC_DSRA
= OPC_SPECIAL
| 073,
366 OPC_DSLL32
= OPC_SPECIAL
| 074,
367 OPC_DSRL32
= OPC_SPECIAL
| 076,
368 OPC_DROTR32
= OPC_SPECIAL
| 076 | (1 << 21),
369 OPC_DSRA32
= OPC_SPECIAL
| 077,
370 OPC_CLZ_R6
= OPC_SPECIAL
| 0120,
371 OPC_DCLZ_R6
= OPC_SPECIAL
| 0122,
373 OPC_REGIMM
= 001 << 26,
374 OPC_BLTZ
= OPC_REGIMM
| (000 << 16),
375 OPC_BGEZ
= OPC_REGIMM
| (001 << 16),
377 OPC_SPECIAL2
= 034 << 26,
378 OPC_MUL_R5
= OPC_SPECIAL2
| 002,
379 OPC_CLZ
= OPC_SPECIAL2
| 040,
380 OPC_DCLZ
= OPC_SPECIAL2
| 044,
382 OPC_SPECIAL3
= 037 << 26,
383 OPC_EXT
= OPC_SPECIAL3
| 000,
384 OPC_DEXTM
= OPC_SPECIAL3
| 001,
385 OPC_DEXTU
= OPC_SPECIAL3
| 002,
386 OPC_DEXT
= OPC_SPECIAL3
| 003,
387 OPC_INS
= OPC_SPECIAL3
| 004,
388 OPC_DINSM
= OPC_SPECIAL3
| 005,
389 OPC_DINSU
= OPC_SPECIAL3
| 006,
390 OPC_DINS
= OPC_SPECIAL3
| 007,
391 OPC_WSBH
= OPC_SPECIAL3
| 00240,
392 OPC_DSBH
= OPC_SPECIAL3
| 00244,
393 OPC_DSHD
= OPC_SPECIAL3
| 00544,
394 OPC_SEB
= OPC_SPECIAL3
| 02040,
395 OPC_SEH
= OPC_SPECIAL3
| 03040,
397 /* MIPS r6 doesn't have JR, JALR should be used instead */
398 OPC_JR
= use_mips32r6_instructions
? OPC_JALR
: OPC_JR_R5
,
401 * MIPS r6 replaces MUL with an alternative encoding which is
402 * backwards-compatible at the assembly level.
404 OPC_MUL
= use_mips32r6_instructions
? OPC_MUL_R6
: OPC_MUL_R5
,
406 /* MIPS r6 introduced names for weaker variants of SYNC. These are
407 backward compatible to previous architecture revisions. */
408 OPC_SYNC_WMB
= OPC_SYNC
| 0x04 << 5,
409 OPC_SYNC_MB
= OPC_SYNC
| 0x10 << 5,
410 OPC_SYNC_ACQUIRE
= OPC_SYNC
| 0x11 << 5,
411 OPC_SYNC_RELEASE
= OPC_SYNC
| 0x12 << 5,
412 OPC_SYNC_RMB
= OPC_SYNC
| 0x13 << 5,
414 /* Aliases for convenience. */
415 ALIAS_PADD
= sizeof(void *) == 4 ? OPC_ADDU
: OPC_DADDU
,
416 ALIAS_PADDI
= sizeof(void *) == 4 ? OPC_ADDIU
: OPC_DADDIU
,
417 ALIAS_TSRL
= TARGET_LONG_BITS
== 32 || TCG_TARGET_REG_BITS
== 32
418 ? OPC_SRL
: OPC_DSRL
,
424 static inline void tcg_out_opc_reg(TCGContext
*s
, MIPSInsn opc
,
425 TCGReg rd
, TCGReg rs
, TCGReg rt
)
430 inst
|= (rs
& 0x1F) << 21;
431 inst
|= (rt
& 0x1F) << 16;
432 inst
|= (rd
& 0x1F) << 11;
439 static inline void tcg_out_opc_imm(TCGContext
*s
, MIPSInsn opc
,
440 TCGReg rt
, TCGReg rs
, TCGArg imm
)
445 inst
|= (rs
& 0x1F) << 21;
446 inst
|= (rt
& 0x1F) << 16;
447 inst
|= (imm
& 0xffff);
454 static inline void tcg_out_opc_bf(TCGContext
*s
, MIPSInsn opc
, TCGReg rt
,
455 TCGReg rs
, int msb
, int lsb
)
460 inst
|= (rs
& 0x1F) << 21;
461 inst
|= (rt
& 0x1F) << 16;
462 inst
|= (msb
& 0x1F) << 11;
463 inst
|= (lsb
& 0x1F) << 6;
467 static inline void tcg_out_opc_bf64(TCGContext
*s
, MIPSInsn opc
, MIPSInsn opm
,
468 MIPSInsn oph
, TCGReg rt
, TCGReg rs
,
475 } else if (msb
>= 32) {
479 tcg_out_opc_bf(s
, opc
, rt
, rs
, msb
, lsb
);
485 static inline void tcg_out_opc_br(TCGContext
*s
, MIPSInsn opc
,
486 TCGReg rt
, TCGReg rs
)
488 /* We pay attention here to not modify the branch target by reading
489 the existing value and using it again. This ensure that caches and
490 memory are kept coherent during retranslation. */
491 uint16_t offset
= (uint16_t)*s
->code_ptr
;
493 tcg_out_opc_imm(s
, opc
, rt
, rs
, offset
);
499 static inline void tcg_out_opc_sa(TCGContext
*s
, MIPSInsn opc
,
500 TCGReg rd
, TCGReg rt
, TCGArg sa
)
505 inst
|= (rt
& 0x1F) << 16;
506 inst
|= (rd
& 0x1F) << 11;
507 inst
|= (sa
& 0x1F) << 6;
512 static void tcg_out_opc_sa64(TCGContext
*s
, MIPSInsn opc1
, MIPSInsn opc2
,
513 TCGReg rd
, TCGReg rt
, TCGArg sa
)
517 inst
= (sa
& 32 ? opc2
: opc1
);
518 inst
|= (rt
& 0x1F) << 16;
519 inst
|= (rd
& 0x1F) << 11;
520 inst
|= (sa
& 0x1F) << 6;
526 * Returns true if the branch was in range and the insn was emitted.
528 static bool tcg_out_opc_jmp(TCGContext
*s
, MIPSInsn opc
, void *target
)
530 uintptr_t dest
= (uintptr_t)target
;
531 uintptr_t from
= (uintptr_t)s
->code_ptr
+ 4;
534 /* The pc-region branch happens within the 256MB region of
535 the delay slot (thus the +4). */
536 if ((from
^ dest
) & -(1 << 28)) {
539 tcg_debug_assert((dest
& 3) == 0);
542 inst
|= (dest
>> 2) & 0x3ffffff;
547 static inline void tcg_out_nop(TCGContext
*s
)
552 static inline void tcg_out_dsll(TCGContext
*s
, TCGReg rd
, TCGReg rt
, TCGArg sa
)
554 tcg_out_opc_sa64(s
, OPC_DSLL
, OPC_DSLL32
, rd
, rt
, sa
);
557 static inline void tcg_out_dsrl(TCGContext
*s
, TCGReg rd
, TCGReg rt
, TCGArg sa
)
559 tcg_out_opc_sa64(s
, OPC_DSRL
, OPC_DSRL32
, rd
, rt
, sa
);
562 static inline void tcg_out_dsra(TCGContext
*s
, TCGReg rd
, TCGReg rt
, TCGArg sa
)
564 tcg_out_opc_sa64(s
, OPC_DSRA
, OPC_DSRA32
, rd
, rt
, sa
);
567 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
568 TCGReg ret
, TCGReg arg
)
570 /* Simple reg-reg move, optimising out the 'do nothing' case */
572 tcg_out_opc_reg(s
, OPC_OR
, ret
, arg
, TCG_REG_ZERO
);
576 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
577 TCGReg ret
, tcg_target_long arg
)
579 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
582 if (arg
== (int16_t)arg
) {
583 tcg_out_opc_imm(s
, OPC_ADDIU
, ret
, TCG_REG_ZERO
, arg
);
586 if (arg
== (uint16_t)arg
) {
587 tcg_out_opc_imm(s
, OPC_ORI
, ret
, TCG_REG_ZERO
, arg
);
590 if (TCG_TARGET_REG_BITS
== 32 || arg
== (int32_t)arg
) {
591 tcg_out_opc_imm(s
, OPC_LUI
, ret
, TCG_REG_ZERO
, arg
>> 16);
593 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, arg
>> 31 >> 1);
594 if (arg
& 0xffff0000ull
) {
595 tcg_out_dsll(s
, ret
, ret
, 16);
596 tcg_out_opc_imm(s
, OPC_ORI
, ret
, ret
, arg
>> 16);
597 tcg_out_dsll(s
, ret
, ret
, 16);
599 tcg_out_dsll(s
, ret
, ret
, 32);
603 tcg_out_opc_imm(s
, OPC_ORI
, ret
, ret
, arg
& 0xffff);
607 static inline void tcg_out_bswap16(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
609 if (use_mips32r2_instructions
) {
610 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
612 /* ret and arg can't be register at */
613 if (ret
== TCG_TMP0
|| arg
== TCG_TMP0
) {
617 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP0
, arg
, 8);
618 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 8);
619 tcg_out_opc_imm(s
, OPC_ANDI
, ret
, ret
, 0xff00);
620 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP0
);
624 static inline void tcg_out_bswap16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
626 if (use_mips32r2_instructions
) {
627 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
628 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, ret
);
630 /* ret and arg can't be register at */
631 if (ret
== TCG_TMP0
|| arg
== TCG_TMP0
) {
635 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP0
, arg
, 8);
636 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
637 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
638 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP0
);
642 static void tcg_out_bswap_subr(TCGContext
*s
, tcg_insn_unit
*sub
)
644 bool ok
= tcg_out_opc_jmp(s
, OPC_JAL
, sub
);
645 tcg_debug_assert(ok
);
648 static void tcg_out_bswap32(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
650 if (use_mips32r2_instructions
) {
651 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
652 tcg_out_opc_sa(s
, OPC_ROTR
, ret
, ret
, 16);
654 tcg_out_bswap_subr(s
, bswap32_addr
);
655 /* delay slot -- never omit the insn, like tcg_out_mov might. */
656 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP0
, arg
, TCG_REG_ZERO
);
657 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, TCG_TMP3
);
661 static void tcg_out_bswap32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
663 if (use_mips32r2_instructions
) {
664 tcg_out_opc_reg(s
, OPC_DSBH
, ret
, 0, arg
);
665 tcg_out_opc_reg(s
, OPC_DSHD
, ret
, 0, ret
);
666 tcg_out_dsrl(s
, ret
, ret
, 32);
668 tcg_out_bswap_subr(s
, bswap32u_addr
);
669 /* delay slot -- never omit the insn, like tcg_out_mov might. */
670 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP0
, arg
, TCG_REG_ZERO
);
671 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, TCG_TMP3
);
675 static void tcg_out_bswap64(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
677 if (use_mips32r2_instructions
) {
678 tcg_out_opc_reg(s
, OPC_DSBH
, ret
, 0, arg
);
679 tcg_out_opc_reg(s
, OPC_DSHD
, ret
, 0, ret
);
681 tcg_out_bswap_subr(s
, bswap64_addr
);
682 /* delay slot -- never omit the insn, like tcg_out_mov might. */
683 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP0
, arg
, TCG_REG_ZERO
);
684 tcg_out_mov(s
, TCG_TYPE_I32
, ret
, TCG_TMP3
);
688 static inline void tcg_out_ext8s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
690 if (use_mips32r2_instructions
) {
691 tcg_out_opc_reg(s
, OPC_SEB
, ret
, 0, arg
);
693 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
694 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 24);
698 static inline void tcg_out_ext16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
700 if (use_mips32r2_instructions
) {
701 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, arg
);
703 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 16);
704 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
708 static inline void tcg_out_ext32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
710 if (use_mips32r2_instructions
) {
711 tcg_out_opc_bf(s
, OPC_DEXT
, ret
, arg
, 31, 0);
713 tcg_out_dsll(s
, ret
, arg
, 32);
714 tcg_out_dsrl(s
, ret
, ret
, 32);
718 static void tcg_out_ldst(TCGContext
*s
, MIPSInsn opc
, TCGReg data
,
719 TCGReg addr
, intptr_t ofs
)
723 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- lo
);
724 if (addr
!= TCG_REG_ZERO
) {
725 tcg_out_opc_reg(s
, ALIAS_PADD
, TCG_TMP0
, TCG_TMP0
, addr
);
729 tcg_out_opc_imm(s
, opc
, data
, addr
, lo
);
732 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
733 TCGReg arg1
, intptr_t arg2
)
735 MIPSInsn opc
= OPC_LD
;
736 if (TCG_TARGET_REG_BITS
== 32 || type
== TCG_TYPE_I32
) {
739 tcg_out_ldst(s
, opc
, arg
, arg1
, arg2
);
742 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
743 TCGReg arg1
, intptr_t arg2
)
745 MIPSInsn opc
= OPC_SD
;
746 if (TCG_TARGET_REG_BITS
== 32 || type
== TCG_TYPE_I32
) {
749 tcg_out_ldst(s
, opc
, arg
, arg1
, arg2
);
752 static inline bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
753 TCGReg base
, intptr_t ofs
)
756 tcg_out_st(s
, type
, TCG_REG_ZERO
, base
, ofs
);
762 static void tcg_out_addsub2(TCGContext
*s
, TCGReg rl
, TCGReg rh
, TCGReg al
,
763 TCGReg ah
, TCGArg bl
, TCGArg bh
, bool cbl
,
764 bool cbh
, bool is_sub
)
766 TCGReg th
= TCG_TMP1
;
768 /* If we have a negative constant such that negating it would
769 make the high part zero, we can (usually) eliminate one insn. */
770 if (cbl
&& cbh
&& bh
== -1 && bl
!= 0) {
776 /* By operating on the high part first, we get to use the final
777 carry operation to move back from the temporary. */
779 tcg_out_opc_reg(s
, (is_sub
? OPC_SUBU
: OPC_ADDU
), th
, ah
, bh
);
780 } else if (bh
!= 0 || ah
== rl
) {
781 tcg_out_opc_imm(s
, OPC_ADDIU
, th
, ah
, (is_sub
? -bh
: bh
));
786 /* Note that tcg optimization should eliminate the bl == 0 case. */
789 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_TMP0
, al
, bl
);
790 tcg_out_opc_imm(s
, OPC_ADDIU
, rl
, al
, -bl
);
792 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_TMP0
, al
, bl
);
793 tcg_out_opc_reg(s
, OPC_SUBU
, rl
, al
, bl
);
795 tcg_out_opc_reg(s
, OPC_SUBU
, rh
, th
, TCG_TMP0
);
798 tcg_out_opc_imm(s
, OPC_ADDIU
, rl
, al
, bl
);
799 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_TMP0
, rl
, bl
);
800 } else if (rl
== al
&& rl
== bl
) {
801 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP0
, al
, 31);
802 tcg_out_opc_reg(s
, OPC_ADDU
, rl
, al
, bl
);
804 tcg_out_opc_reg(s
, OPC_ADDU
, rl
, al
, bl
);
805 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_TMP0
, rl
, (rl
== bl
? al
: bl
));
807 tcg_out_opc_reg(s
, OPC_ADDU
, rh
, th
, TCG_TMP0
);
811 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
812 #define MIPS_CMP_INV 1
813 #define MIPS_CMP_SWAP 2
815 static const uint8_t mips_cmp_map
[16] = {
818 [TCG_COND_GE
] = MIPS_CMP_INV
,
819 [TCG_COND_GEU
] = MIPS_CMP_INV
,
820 [TCG_COND_LE
] = MIPS_CMP_INV
| MIPS_CMP_SWAP
,
821 [TCG_COND_LEU
] = MIPS_CMP_INV
| MIPS_CMP_SWAP
,
822 [TCG_COND_GT
] = MIPS_CMP_SWAP
,
823 [TCG_COND_GTU
] = MIPS_CMP_SWAP
,
826 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
827 TCGReg arg1
, TCGReg arg2
)
829 MIPSInsn s_opc
= OPC_SLTU
;
835 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
838 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, arg1
, 1);
843 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
846 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, arg1
);
860 cmp_map
= mips_cmp_map
[cond
];
861 if (cmp_map
& MIPS_CMP_SWAP
) {
866 tcg_out_opc_reg(s
, s_opc
, ret
, arg1
, arg2
);
867 if (cmp_map
& MIPS_CMP_INV
) {
868 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
878 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
, TCGReg arg1
,
879 TCGReg arg2
, TCGLabel
*l
)
881 static const MIPSInsn b_zero
[16] = {
882 [TCG_COND_LT
] = OPC_BLTZ
,
883 [TCG_COND_GT
] = OPC_BGTZ
,
884 [TCG_COND_LE
] = OPC_BLEZ
,
885 [TCG_COND_GE
] = OPC_BGEZ
,
888 MIPSInsn s_opc
= OPC_SLTU
;
905 b_opc
= b_zero
[cond
];
917 cmp_map
= mips_cmp_map
[cond
];
918 if (cmp_map
& MIPS_CMP_SWAP
) {
923 tcg_out_opc_reg(s
, s_opc
, TCG_TMP0
, arg1
, arg2
);
924 b_opc
= (cmp_map
& MIPS_CMP_INV
? OPC_BEQ
: OPC_BNE
);
934 tcg_out_opc_br(s
, b_opc
, arg1
, arg2
);
936 reloc_pc16(s
->code_ptr
- 1, l
->u
.value_ptr
);
938 tcg_out_reloc(s
, s
->code_ptr
- 1, R_MIPS_PC16
, l
, 0);
943 static TCGReg
tcg_out_reduce_eq2(TCGContext
*s
, TCGReg tmp0
, TCGReg tmp1
,
944 TCGReg al
, TCGReg ah
,
945 TCGReg bl
, TCGReg bh
)
947 /* Merge highpart comparison into AH. */
950 tcg_out_opc_reg(s
, OPC_XOR
, tmp0
, ah
, bh
);
956 /* Merge lowpart comparison into AL. */
959 tcg_out_opc_reg(s
, OPC_XOR
, tmp1
, al
, bl
);
965 /* Merge high and low part comparisons into AL. */
968 tcg_out_opc_reg(s
, OPC_OR
, tmp0
, ah
, al
);
977 static void tcg_out_setcond2(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
978 TCGReg al
, TCGReg ah
, TCGReg bl
, TCGReg bh
)
980 TCGReg tmp0
= TCG_TMP0
;
983 tcg_debug_assert(ret
!= TCG_TMP0
);
984 if (ret
== ah
|| ret
== bh
) {
985 tcg_debug_assert(ret
!= TCG_TMP1
);
992 tmp1
= tcg_out_reduce_eq2(s
, tmp0
, tmp1
, al
, ah
, bl
, bh
);
993 tcg_out_setcond(s
, cond
, ret
, tmp1
, TCG_REG_ZERO
);
997 tcg_out_setcond(s
, TCG_COND_EQ
, tmp0
, ah
, bh
);
998 tcg_out_setcond(s
, tcg_unsigned_cond(cond
), tmp1
, al
, bl
);
999 tcg_out_opc_reg(s
, OPC_AND
, tmp1
, tmp1
, tmp0
);
1000 tcg_out_setcond(s
, tcg_high_cond(cond
), tmp0
, ah
, bh
);
1001 tcg_out_opc_reg(s
, OPC_OR
, ret
, tmp1
, tmp0
);
1006 static void tcg_out_brcond2(TCGContext
*s
, TCGCond cond
, TCGReg al
, TCGReg ah
,
1007 TCGReg bl
, TCGReg bh
, TCGLabel
*l
)
1009 TCGCond b_cond
= TCG_COND_NE
;
1010 TCGReg tmp
= TCG_TMP1
;
1012 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
1013 With setcond, we emit between 3 and 10 insns and only 1 branch,
1014 which ought to get better branch prediction. */
1019 tmp
= tcg_out_reduce_eq2(s
, TCG_TMP0
, TCG_TMP1
, al
, ah
, bl
, bh
);
1023 /* Minimize code size by preferring a compare not requiring INV. */
1024 if (mips_cmp_map
[cond
] & MIPS_CMP_INV
) {
1025 cond
= tcg_invert_cond(cond
);
1026 b_cond
= TCG_COND_EQ
;
1028 tcg_out_setcond2(s
, cond
, tmp
, al
, ah
, bl
, bh
);
1032 tcg_out_brcond(s
, b_cond
, tmp
, TCG_REG_ZERO
, l
);
1035 static void tcg_out_movcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
1036 TCGReg c1
, TCGReg c2
, TCGReg v1
, TCGReg v2
)
1040 /* If one of the values is zero, put it last to match SEL*Z instructions */
1041 if (use_mips32r6_instructions
&& v1
== 0) {
1044 cond
= tcg_invert_cond(cond
);
1053 tcg_out_opc_reg(s
, OPC_XOR
, TCG_TMP0
, c1
, c2
);
1059 /* Minimize code size by preferring a compare not requiring INV. */
1060 if (mips_cmp_map
[cond
] & MIPS_CMP_INV
) {
1061 cond
= tcg_invert_cond(cond
);
1064 tcg_out_setcond(s
, cond
, TCG_TMP0
, c1
, c2
);
1069 if (use_mips32r6_instructions
) {
1070 MIPSInsn m_opc_t
= eqz
? OPC_SELEQZ
: OPC_SELNEZ
;
1071 MIPSInsn m_opc_f
= eqz
? OPC_SELNEZ
: OPC_SELEQZ
;
1074 tcg_out_opc_reg(s
, m_opc_f
, TCG_TMP1
, v2
, c1
);
1076 tcg_out_opc_reg(s
, m_opc_t
, ret
, v1
, c1
);
1078 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP1
);
1081 MIPSInsn m_opc
= eqz
? OPC_MOVZ
: OPC_MOVN
;
1083 tcg_out_opc_reg(s
, m_opc
, ret
, v1
, c1
);
1085 /* This should be guaranteed via constraints */
1086 tcg_debug_assert(v2
== ret
);
1090 static void tcg_out_call_int(TCGContext
*s
, tcg_insn_unit
*arg
, bool tail
)
1092 /* Note that the ABI requires the called function's address to be
1093 loaded into T9, even if a direct branch is in range. */
1094 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T9
, (uintptr_t)arg
);
1096 /* But do try a direct branch, allowing the cpu better insn prefetch. */
1098 if (!tcg_out_opc_jmp(s
, OPC_J
, arg
)) {
1099 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_T9
, 0);
1102 if (!tcg_out_opc_jmp(s
, OPC_JAL
, arg
)) {
1103 tcg_out_opc_reg(s
, OPC_JALR
, TCG_REG_RA
, TCG_REG_T9
, 0);
1108 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*arg
)
1110 tcg_out_call_int(s
, arg
, false);
1114 #if defined(CONFIG_SOFTMMU)
1115 static void * const qemu_ld_helpers
[16] = {
1116 [MO_UB
] = helper_ret_ldub_mmu
,
1117 [MO_SB
] = helper_ret_ldsb_mmu
,
1118 [MO_LEUW
] = helper_le_lduw_mmu
,
1119 [MO_LESW
] = helper_le_ldsw_mmu
,
1120 [MO_LEUL
] = helper_le_ldul_mmu
,
1121 [MO_LEQ
] = helper_le_ldq_mmu
,
1122 [MO_BEUW
] = helper_be_lduw_mmu
,
1123 [MO_BESW
] = helper_be_ldsw_mmu
,
1124 [MO_BEUL
] = helper_be_ldul_mmu
,
1125 [MO_BEQ
] = helper_be_ldq_mmu
,
1126 #if TCG_TARGET_REG_BITS == 64
1127 [MO_LESL
] = helper_le_ldsl_mmu
,
1128 [MO_BESL
] = helper_be_ldsl_mmu
,
1132 static void * const qemu_st_helpers
[16] = {
1133 [MO_UB
] = helper_ret_stb_mmu
,
1134 [MO_LEUW
] = helper_le_stw_mmu
,
1135 [MO_LEUL
] = helper_le_stl_mmu
,
1136 [MO_LEQ
] = helper_le_stq_mmu
,
1137 [MO_BEUW
] = helper_be_stw_mmu
,
1138 [MO_BEUL
] = helper_be_stl_mmu
,
1139 [MO_BEQ
] = helper_be_stq_mmu
,
1142 /* Helper routines for marshalling helper function arguments into
1143 * the correct registers and stack.
1144 * I is where we want to put this argument, and is updated and returned
1145 * for the next call. ARG is the argument itself.
1147 * We provide routines for arguments which are: immediate, 32 bit
1148 * value in register, 16 and 8 bit values in register (which must be zero
1149 * extended before use) and 64 bit value in a lo:hi register pair.
1152 static int tcg_out_call_iarg_reg(TCGContext
*s
, int i
, TCGReg arg
)
1154 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
1155 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[i
], arg
);
1157 /* For N32 and N64, the initial offset is different. But there
1158 we also have 8 argument register so we don't run out here. */
1159 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
1160 tcg_out_st(s
, TCG_TYPE_REG
, arg
, TCG_REG_SP
, 4 * i
);
1165 static int tcg_out_call_iarg_reg8(TCGContext
*s
, int i
, TCGReg arg
)
1167 TCGReg tmp
= TCG_TMP0
;
1168 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
1169 tmp
= tcg_target_call_iarg_regs
[i
];
1171 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xff);
1172 return tcg_out_call_iarg_reg(s
, i
, tmp
);
1175 static int tcg_out_call_iarg_reg16(TCGContext
*s
, int i
, TCGReg arg
)
1177 TCGReg tmp
= TCG_TMP0
;
1178 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
1179 tmp
= tcg_target_call_iarg_regs
[i
];
1181 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xffff);
1182 return tcg_out_call_iarg_reg(s
, i
, tmp
);
1185 static int tcg_out_call_iarg_imm(TCGContext
*s
, int i
, TCGArg arg
)
1187 TCGReg tmp
= TCG_TMP0
;
1191 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
1192 tmp
= tcg_target_call_iarg_regs
[i
];
1194 tcg_out_movi(s
, TCG_TYPE_REG
, tmp
, arg
);
1196 return tcg_out_call_iarg_reg(s
, i
, tmp
);
1199 static int tcg_out_call_iarg_reg2(TCGContext
*s
, int i
, TCGReg al
, TCGReg ah
)
1201 tcg_debug_assert(TCG_TARGET_REG_BITS
== 32);
1203 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? ah
: al
));
1204 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? al
: ah
));
1208 /* Perform the tlb comparison operation. The complete host address is
1209 placed in BASE. Clobbers TMP0, TMP1, TMP2, A0. */
1210 static void tcg_out_tlb_load(TCGContext
*s
, TCGReg base
, TCGReg addrl
,
1211 TCGReg addrh
, TCGMemOpIdx oi
,
1212 tcg_insn_unit
*label_ptr
[2], bool is_load
)
1214 TCGMemOp opc
= get_memop(oi
);
1215 unsigned s_bits
= opc
& MO_SIZE
;
1216 unsigned a_bits
= get_alignment_bits(opc
);
1218 int mem_index
= get_mmuidx(oi
);
1221 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
1222 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
1223 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1225 tcg_out_opc_sa(s
, ALIAS_TSRL
, TCG_REG_A0
, addrl
,
1226 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1227 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_A0
, TCG_REG_A0
,
1228 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
1229 tcg_out_opc_reg(s
, ALIAS_PADD
, TCG_REG_A0
, TCG_REG_A0
, TCG_AREG0
);
1231 /* Compensate for very large offsets. */
1232 if (add_off
>= 0x8000) {
1233 /* Most target env are smaller than 32k; none are larger than 64k.
1234 Simplify the logic here merely to offset by 0x7ff0, giving us a
1235 range just shy of 64k. Check this assumption. */
1236 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
,
1237 tlb_table
[NB_MMU_MODES
- 1][1])
1239 tcg_out_opc_imm(s
, ALIAS_PADDI
, TCG_REG_A0
, TCG_REG_A0
, 0x7ff0);
1244 /* We don't currently support unaligned accesses.
1245 We could do so with mips32r6. */
1246 if (a_bits
< s_bits
) {
1250 mask
= (target_ulong
)TARGET_PAGE_MASK
| ((1 << a_bits
) - 1);
1252 /* Load the (low half) tlb comparator. Mask the page bits, keeping the
1253 alignment bits to compare against. */
1254 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1255 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_TMP0
, TCG_REG_A0
, cmp_off
+ LO_OFF
);
1256 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_TMP1
, mask
);
1259 (TARGET_LONG_BITS
== 64 ? OPC_LD
1260 : TCG_TARGET_REG_BITS
== 64 ? OPC_LWU
: OPC_LW
),
1261 TCG_TMP0
, TCG_REG_A0
, cmp_off
);
1262 tcg_out_movi(s
, TCG_TYPE_TL
, TCG_TMP1
, mask
);
1263 /* No second compare is required here;
1264 load the tlb addend for the fast path. */
1265 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_TMP2
, TCG_REG_A0
, add_off
);
1267 tcg_out_opc_reg(s
, OPC_AND
, TCG_TMP1
, TCG_TMP1
, addrl
);
1269 /* Zero extend a 32-bit guest address for a 64-bit host. */
1270 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1271 tcg_out_ext32u(s
, base
, addrl
);
1275 label_ptr
[0] = s
->code_ptr
;
1276 tcg_out_opc_br(s
, OPC_BNE
, TCG_TMP1
, TCG_TMP0
);
1278 /* Load and test the high half tlb comparator. */
1279 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1281 tcg_out_ld(s
, TCG_TYPE_I32
, TCG_TMP0
, TCG_REG_A0
, cmp_off
+ HI_OFF
);
1283 /* Load the tlb addend for the fast path. */
1284 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_TMP2
, TCG_REG_A0
, add_off
);
1286 label_ptr
[1] = s
->code_ptr
;
1287 tcg_out_opc_br(s
, OPC_BNE
, addrh
, TCG_TMP0
);
1291 tcg_out_opc_reg(s
, ALIAS_PADD
, base
, TCG_TMP2
, addrl
);
1294 static void add_qemu_ldst_label(TCGContext
*s
, int is_ld
, TCGMemOpIdx oi
,
1296 TCGReg datalo
, TCGReg datahi
,
1297 TCGReg addrlo
, TCGReg addrhi
,
1298 void *raddr
, tcg_insn_unit
*label_ptr
[2])
1300 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1302 label
->is_ld
= is_ld
;
1305 label
->datalo_reg
= datalo
;
1306 label
->datahi_reg
= datahi
;
1307 label
->addrlo_reg
= addrlo
;
1308 label
->addrhi_reg
= addrhi
;
1309 label
->raddr
= raddr
;
1310 label
->label_ptr
[0] = label_ptr
[0];
1311 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1312 label
->label_ptr
[1] = label_ptr
[1];
1316 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1318 TCGMemOpIdx oi
= l
->oi
;
1319 TCGMemOp opc
= get_memop(oi
);
1323 /* resolve label address */
1324 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1325 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1326 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1330 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1331 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1333 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1335 i
= tcg_out_call_iarg_imm(s
, i
, oi
);
1336 i
= tcg_out_call_iarg_imm(s
, i
, (intptr_t)l
->raddr
);
1337 tcg_out_call_int(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)], false);
1339 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1342 if (TCG_TARGET_REG_BITS
== 32 && (opc
& MO_SIZE
) == MO_64
) {
1343 /* We eliminated V0 from the possible output registers, so it
1344 cannot be clobbered here. So we must move V1 first. */
1346 tcg_out_mov(s
, TCG_TYPE_I32
, v0
, TCG_REG_V1
);
1349 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_V1
);
1353 reloc_pc16(s
->code_ptr
, l
->raddr
);
1354 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_ZERO
, TCG_REG_ZERO
);
1356 if (TCG_TARGET_REG_BITS
== 64 && l
->type
== TCG_TYPE_I32
) {
1357 /* we always sign-extend 32-bit loads */
1358 tcg_out_opc_sa(s
, OPC_SLL
, v0
, TCG_REG_V0
, 0);
1360 tcg_out_opc_reg(s
, OPC_OR
, v0
, TCG_REG_V0
, TCG_REG_ZERO
);
1364 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1366 TCGMemOpIdx oi
= l
->oi
;
1367 TCGMemOp opc
= get_memop(oi
);
1368 TCGMemOp s_bits
= opc
& MO_SIZE
;
1371 /* resolve label address */
1372 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1373 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1374 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1378 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1379 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1381 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1385 i
= tcg_out_call_iarg_reg8(s
, i
, l
->datalo_reg
);
1388 i
= tcg_out_call_iarg_reg16(s
, i
, l
->datalo_reg
);
1391 i
= tcg_out_call_iarg_reg(s
, i
, l
->datalo_reg
);
1394 if (TCG_TARGET_REG_BITS
== 32) {
1395 i
= tcg_out_call_iarg_reg2(s
, i
, l
->datalo_reg
, l
->datahi_reg
);
1397 i
= tcg_out_call_iarg_reg(s
, i
, l
->datalo_reg
);
1403 i
= tcg_out_call_iarg_imm(s
, i
, oi
);
1405 /* Tail call to the store helper. Thus force the return address
1406 computation to take place in the return address register. */
1407 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RA
, (intptr_t)l
->raddr
);
1408 i
= tcg_out_call_iarg_reg(s
, i
, TCG_REG_RA
);
1409 tcg_out_call_int(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)], true);
1411 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1415 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg lo
, TCGReg hi
,
1416 TCGReg base
, TCGMemOp opc
, bool is_64
)
1418 switch (opc
& (MO_SSIZE
| MO_BSWAP
)) {
1420 tcg_out_opc_imm(s
, OPC_LBU
, lo
, base
, 0);
1423 tcg_out_opc_imm(s
, OPC_LB
, lo
, base
, 0);
1425 case MO_UW
| MO_BSWAP
:
1426 tcg_out_opc_imm(s
, OPC_LHU
, TCG_TMP1
, base
, 0);
1427 tcg_out_bswap16(s
, lo
, TCG_TMP1
);
1430 tcg_out_opc_imm(s
, OPC_LHU
, lo
, base
, 0);
1432 case MO_SW
| MO_BSWAP
:
1433 tcg_out_opc_imm(s
, OPC_LHU
, TCG_TMP1
, base
, 0);
1434 tcg_out_bswap16s(s
, lo
, TCG_TMP1
);
1437 tcg_out_opc_imm(s
, OPC_LH
, lo
, base
, 0);
1439 case MO_UL
| MO_BSWAP
:
1440 if (TCG_TARGET_REG_BITS
== 64 && is_64
) {
1441 if (use_mips32r2_instructions
) {
1442 tcg_out_opc_imm(s
, OPC_LWU
, lo
, base
, 0);
1443 tcg_out_bswap32u(s
, lo
, lo
);
1445 tcg_out_bswap_subr(s
, bswap32u_addr
);
1447 tcg_out_opc_imm(s
, OPC_LWU
, TCG_TMP0
, base
, 0);
1448 tcg_out_mov(s
, TCG_TYPE_I64
, lo
, TCG_TMP3
);
1453 case MO_SL
| MO_BSWAP
:
1454 if (use_mips32r2_instructions
) {
1455 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1456 tcg_out_bswap32(s
, lo
, lo
);
1458 tcg_out_bswap_subr(s
, bswap32_addr
);
1460 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP0
, base
, 0);
1461 tcg_out_mov(s
, TCG_TYPE_I32
, lo
, TCG_TMP3
);
1465 if (TCG_TARGET_REG_BITS
== 64 && is_64
) {
1466 tcg_out_opc_imm(s
, OPC_LWU
, lo
, base
, 0);
1471 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1473 case MO_Q
| MO_BSWAP
:
1474 if (TCG_TARGET_REG_BITS
== 64) {
1475 if (use_mips32r2_instructions
) {
1476 tcg_out_opc_imm(s
, OPC_LD
, lo
, base
, 0);
1477 tcg_out_bswap64(s
, lo
, lo
);
1479 tcg_out_bswap_subr(s
, bswap64_addr
);
1481 tcg_out_opc_imm(s
, OPC_LD
, TCG_TMP0
, base
, 0);
1482 tcg_out_mov(s
, TCG_TYPE_I64
, lo
, TCG_TMP3
);
1484 } else if (use_mips32r2_instructions
) {
1485 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP0
, base
, 0);
1486 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP1
, base
, 4);
1487 tcg_out_opc_reg(s
, OPC_WSBH
, TCG_TMP0
, 0, TCG_TMP0
);
1488 tcg_out_opc_reg(s
, OPC_WSBH
, TCG_TMP1
, 0, TCG_TMP1
);
1489 tcg_out_opc_sa(s
, OPC_ROTR
, MIPS_BE
? lo
: hi
, TCG_TMP0
, 16);
1490 tcg_out_opc_sa(s
, OPC_ROTR
, MIPS_BE
? hi
: lo
, TCG_TMP1
, 16);
1492 tcg_out_bswap_subr(s
, bswap32_addr
);
1494 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP0
, base
, 0);
1495 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP0
, base
, 4);
1496 tcg_out_bswap_subr(s
, bswap32_addr
);
1498 tcg_out_mov(s
, TCG_TYPE_I32
, MIPS_BE
? lo
: hi
, TCG_TMP3
);
1499 tcg_out_mov(s
, TCG_TYPE_I32
, MIPS_BE
? hi
: lo
, TCG_TMP3
);
1503 /* Prefer to load from offset 0 first, but allow for overlap. */
1504 if (TCG_TARGET_REG_BITS
== 64) {
1505 tcg_out_opc_imm(s
, OPC_LD
, lo
, base
, 0);
1506 } else if (MIPS_BE
? hi
!= base
: lo
== base
) {
1507 tcg_out_opc_imm(s
, OPC_LW
, hi
, base
, HI_OFF
);
1508 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, LO_OFF
);
1510 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, LO_OFF
);
1511 tcg_out_opc_imm(s
, OPC_LW
, hi
, base
, HI_OFF
);
1519 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1521 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1522 TCGReg data_regl
, data_regh
;
1525 #if defined(CONFIG_SOFTMMU)
1526 tcg_insn_unit
*label_ptr
[2];
1528 TCGReg base
= TCG_REG_A0
;
1530 data_regl
= *args
++;
1531 data_regh
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1532 addr_regl
= *args
++;
1533 addr_regh
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1535 opc
= get_memop(oi
);
1537 #if defined(CONFIG_SOFTMMU)
1538 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, oi
, label_ptr
, 1);
1539 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
, is_64
);
1540 add_qemu_ldst_label(s
, 1, oi
,
1541 (is_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1542 data_regl
, data_regh
, addr_regl
, addr_regh
,
1543 s
->code_ptr
, label_ptr
);
1545 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1546 tcg_out_ext32u(s
, base
, addr_regl
);
1549 if (guest_base
== 0 && data_regl
!= addr_regl
) {
1551 } else if (guest_base
== (int16_t)guest_base
) {
1552 tcg_out_opc_imm(s
, ALIAS_PADDI
, base
, addr_regl
, guest_base
);
1554 tcg_out_opc_reg(s
, ALIAS_PADD
, base
, TCG_GUEST_BASE_REG
, addr_regl
);
1556 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
, is_64
);
1560 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg lo
, TCGReg hi
,
1561 TCGReg base
, TCGMemOp opc
)
1563 /* Don't clutter the code below with checks to avoid bswapping ZERO. */
1564 if ((lo
| hi
) == 0) {
1568 switch (opc
& (MO_SIZE
| MO_BSWAP
)) {
1570 tcg_out_opc_imm(s
, OPC_SB
, lo
, base
, 0);
1573 case MO_16
| MO_BSWAP
:
1574 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, lo
, 0xffff);
1575 tcg_out_bswap16(s
, TCG_TMP1
, TCG_TMP1
);
1579 tcg_out_opc_imm(s
, OPC_SH
, lo
, base
, 0);
1582 case MO_32
| MO_BSWAP
:
1583 tcg_out_bswap32(s
, TCG_TMP3
, lo
);
1587 tcg_out_opc_imm(s
, OPC_SW
, lo
, base
, 0);
1590 case MO_64
| MO_BSWAP
:
1591 if (TCG_TARGET_REG_BITS
== 64) {
1592 tcg_out_bswap64(s
, TCG_TMP3
, lo
);
1593 tcg_out_opc_imm(s
, OPC_SD
, TCG_TMP3
, base
, 0);
1594 } else if (use_mips32r2_instructions
) {
1595 tcg_out_opc_reg(s
, OPC_WSBH
, TCG_TMP0
, 0, MIPS_BE
? lo
: hi
);
1596 tcg_out_opc_reg(s
, OPC_WSBH
, TCG_TMP1
, 0, MIPS_BE
? hi
: lo
);
1597 tcg_out_opc_sa(s
, OPC_ROTR
, TCG_TMP0
, TCG_TMP0
, 16);
1598 tcg_out_opc_sa(s
, OPC_ROTR
, TCG_TMP1
, TCG_TMP1
, 16);
1599 tcg_out_opc_imm(s
, OPC_SW
, TCG_TMP0
, base
, 0);
1600 tcg_out_opc_imm(s
, OPC_SW
, TCG_TMP1
, base
, 4);
1602 tcg_out_bswap32(s
, TCG_TMP3
, MIPS_BE
? lo
: hi
);
1603 tcg_out_opc_imm(s
, OPC_SW
, TCG_TMP3
, base
, 0);
1604 tcg_out_bswap32(s
, TCG_TMP3
, MIPS_BE
? hi
: lo
);
1605 tcg_out_opc_imm(s
, OPC_SW
, TCG_TMP3
, base
, 4);
1609 if (TCG_TARGET_REG_BITS
== 64) {
1610 tcg_out_opc_imm(s
, OPC_SD
, lo
, base
, 0);
1612 tcg_out_opc_imm(s
, OPC_SW
, MIPS_BE
? hi
: lo
, base
, 0);
1613 tcg_out_opc_imm(s
, OPC_SW
, MIPS_BE
? lo
: hi
, base
, 4);
1622 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1624 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1625 TCGReg data_regl
, data_regh
;
1628 #if defined(CONFIG_SOFTMMU)
1629 tcg_insn_unit
*label_ptr
[2];
1631 TCGReg base
= TCG_REG_A0
;
1633 data_regl
= *args
++;
1634 data_regh
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1635 addr_regl
= *args
++;
1636 addr_regh
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1638 opc
= get_memop(oi
);
1640 #if defined(CONFIG_SOFTMMU)
1641 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, oi
, label_ptr
, 0);
1642 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1643 add_qemu_ldst_label(s
, 0, oi
,
1644 (is_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1645 data_regl
, data_regh
, addr_regl
, addr_regh
,
1646 s
->code_ptr
, label_ptr
);
1649 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1650 tcg_out_ext32u(s
, base
, addr_regl
);
1653 if (guest_base
== 0) {
1655 } else if (guest_base
== (int16_t)guest_base
) {
1656 tcg_out_opc_imm(s
, ALIAS_PADDI
, base
, addr_regl
, guest_base
);
1658 tcg_out_opc_reg(s
, ALIAS_PADD
, base
, TCG_GUEST_BASE_REG
, addr_regl
);
1660 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1664 static void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
1666 static const MIPSInsn sync
[] = {
1667 /* Note that SYNC_MB is a slightly weaker than SYNC 0,
1668 as the former is an ordering barrier and the latter
1669 is a completion barrier. */
1670 [0 ... TCG_MO_ALL
] = OPC_SYNC_MB
,
1671 [TCG_MO_LD_LD
] = OPC_SYNC_RMB
,
1672 [TCG_MO_ST_ST
] = OPC_SYNC_WMB
,
1673 [TCG_MO_LD_ST
] = OPC_SYNC_RELEASE
,
1674 [TCG_MO_LD_ST
| TCG_MO_ST_ST
] = OPC_SYNC_RELEASE
,
1675 [TCG_MO_LD_ST
| TCG_MO_LD_LD
] = OPC_SYNC_ACQUIRE
,
1677 tcg_out32(s
, sync
[a0
& TCG_MO_ALL
]);
1680 static void tcg_out_clz(TCGContext
*s
, MIPSInsn opcv2
, MIPSInsn opcv6
,
1681 int width
, TCGReg a0
, TCGReg a1
, TCGArg a2
)
1683 if (use_mips32r6_instructions
) {
1685 tcg_out_opc_reg(s
, opcv6
, a0
, a1
, 0);
1687 tcg_out_opc_reg(s
, opcv6
, TCG_TMP0
, a1
, 0);
1688 tcg_out_movcond(s
, TCG_COND_EQ
, a0
, a1
, 0, a2
, TCG_TMP0
);
1692 tcg_out_opc_reg(s
, opcv2
, a0
, a1
, a1
);
1693 } else if (a0
== a2
) {
1694 tcg_out_opc_reg(s
, opcv2
, TCG_TMP0
, a1
, a1
);
1695 tcg_out_opc_reg(s
, OPC_MOVN
, a0
, TCG_TMP0
, a1
);
1696 } else if (a0
!= a1
) {
1697 tcg_out_opc_reg(s
, opcv2
, a0
, a1
, a1
);
1698 tcg_out_opc_reg(s
, OPC_MOVZ
, a0
, a2
, a1
);
1700 tcg_out_opc_reg(s
, opcv2
, TCG_TMP0
, a1
, a1
);
1701 tcg_out_opc_reg(s
, OPC_MOVZ
, TCG_TMP0
, a2
, a1
);
1702 tcg_out_mov(s
, TCG_TYPE_REG
, a0
, TCG_TMP0
);
1707 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1708 const TCGArg
*args
, const int *const_args
)
1720 case INDEX_op_exit_tb
:
1722 TCGReg b0
= TCG_REG_ZERO
;
1726 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_V0
, a0
& ~0xffff);
1729 if (!tcg_out_opc_jmp(s
, OPC_J
, tb_ret_addr
)) {
1730 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
,
1731 (uintptr_t)tb_ret_addr
);
1732 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_TMP0
, 0);
1734 tcg_out_opc_imm(s
, OPC_ORI
, TCG_REG_V0
, b0
, a0
& 0xffff);
1737 case INDEX_op_goto_tb
:
1738 if (s
->tb_jmp_insn_offset
) {
1739 /* direct jump method */
1740 s
->tb_jmp_insn_offset
[a0
] = tcg_current_code_size(s
);
1741 /* Avoid clobbering the address during retranslation. */
1742 tcg_out32(s
, OPC_J
| (*(uint32_t *)s
->code_ptr
& 0x3ffffff));
1744 /* indirect jump method */
1745 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_TMP0
, TCG_REG_ZERO
,
1746 (uintptr_t)(s
->tb_jmp_target_addr
+ a0
));
1747 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_TMP0
, 0);
1750 s
->tb_jmp_reset_offset
[a0
] = tcg_current_code_size(s
);
1752 case INDEX_op_goto_ptr
:
1753 /* jmp to the given host address (could be epilogue) */
1754 tcg_out_opc_reg(s
, OPC_JR
, 0, a0
, 0);
1758 tcg_out_brcond(s
, TCG_COND_EQ
, TCG_REG_ZERO
, TCG_REG_ZERO
,
1762 case INDEX_op_ld8u_i32
:
1763 case INDEX_op_ld8u_i64
:
1766 case INDEX_op_ld8s_i32
:
1767 case INDEX_op_ld8s_i64
:
1770 case INDEX_op_ld16u_i32
:
1771 case INDEX_op_ld16u_i64
:
1774 case INDEX_op_ld16s_i32
:
1775 case INDEX_op_ld16s_i64
:
1778 case INDEX_op_ld_i32
:
1779 case INDEX_op_ld32s_i64
:
1782 case INDEX_op_ld32u_i64
:
1785 case INDEX_op_ld_i64
:
1788 case INDEX_op_st8_i32
:
1789 case INDEX_op_st8_i64
:
1792 case INDEX_op_st16_i32
:
1793 case INDEX_op_st16_i64
:
1796 case INDEX_op_st_i32
:
1797 case INDEX_op_st32_i64
:
1800 case INDEX_op_st_i64
:
1803 tcg_out_ldst(s
, i1
, a0
, a1
, a2
);
1806 case INDEX_op_add_i32
:
1807 i1
= OPC_ADDU
, i2
= OPC_ADDIU
;
1809 case INDEX_op_add_i64
:
1810 i1
= OPC_DADDU
, i2
= OPC_DADDIU
;
1812 case INDEX_op_or_i32
:
1813 case INDEX_op_or_i64
:
1814 i1
= OPC_OR
, i2
= OPC_ORI
;
1816 case INDEX_op_xor_i32
:
1817 case INDEX_op_xor_i64
:
1818 i1
= OPC_XOR
, i2
= OPC_XORI
;
1821 tcg_out_opc_imm(s
, i2
, a0
, a1
, a2
);
1825 tcg_out_opc_reg(s
, i1
, a0
, a1
, a2
);
1828 case INDEX_op_sub_i32
:
1829 i1
= OPC_SUBU
, i2
= OPC_ADDIU
;
1831 case INDEX_op_sub_i64
:
1832 i1
= OPC_DSUBU
, i2
= OPC_DADDIU
;
1835 tcg_out_opc_imm(s
, i2
, a0
, a1
, -a2
);
1839 case INDEX_op_and_i32
:
1840 if (c2
&& a2
!= (uint16_t)a2
) {
1841 int msb
= ctz32(~a2
) - 1;
1842 tcg_debug_assert(use_mips32r2_instructions
);
1843 tcg_debug_assert(is_p2m1(a2
));
1844 tcg_out_opc_bf(s
, OPC_EXT
, a0
, a1
, msb
, 0);
1847 i1
= OPC_AND
, i2
= OPC_ANDI
;
1849 case INDEX_op_and_i64
:
1850 if (c2
&& a2
!= (uint16_t)a2
) {
1851 int msb
= ctz64(~a2
) - 1;
1852 tcg_debug_assert(use_mips32r2_instructions
);
1853 tcg_debug_assert(is_p2m1(a2
));
1854 tcg_out_opc_bf64(s
, OPC_DEXT
, OPC_DEXTM
, OPC_DEXTU
, a0
, a1
, msb
, 0);
1857 i1
= OPC_AND
, i2
= OPC_ANDI
;
1859 case INDEX_op_nor_i32
:
1860 case INDEX_op_nor_i64
:
1864 case INDEX_op_mul_i32
:
1865 if (use_mips32_instructions
) {
1866 tcg_out_opc_reg(s
, OPC_MUL
, a0
, a1
, a2
);
1869 i1
= OPC_MULT
, i2
= OPC_MFLO
;
1871 case INDEX_op_mulsh_i32
:
1872 if (use_mips32r6_instructions
) {
1873 tcg_out_opc_reg(s
, OPC_MUH
, a0
, a1
, a2
);
1876 i1
= OPC_MULT
, i2
= OPC_MFHI
;
1878 case INDEX_op_muluh_i32
:
1879 if (use_mips32r6_instructions
) {
1880 tcg_out_opc_reg(s
, OPC_MUHU
, a0
, a1
, a2
);
1883 i1
= OPC_MULTU
, i2
= OPC_MFHI
;
1885 case INDEX_op_div_i32
:
1886 if (use_mips32r6_instructions
) {
1887 tcg_out_opc_reg(s
, OPC_DIV_R6
, a0
, a1
, a2
);
1890 i1
= OPC_DIV
, i2
= OPC_MFLO
;
1892 case INDEX_op_divu_i32
:
1893 if (use_mips32r6_instructions
) {
1894 tcg_out_opc_reg(s
, OPC_DIVU_R6
, a0
, a1
, a2
);
1897 i1
= OPC_DIVU
, i2
= OPC_MFLO
;
1899 case INDEX_op_rem_i32
:
1900 if (use_mips32r6_instructions
) {
1901 tcg_out_opc_reg(s
, OPC_MOD
, a0
, a1
, a2
);
1904 i1
= OPC_DIV
, i2
= OPC_MFHI
;
1906 case INDEX_op_remu_i32
:
1907 if (use_mips32r6_instructions
) {
1908 tcg_out_opc_reg(s
, OPC_MODU
, a0
, a1
, a2
);
1911 i1
= OPC_DIVU
, i2
= OPC_MFHI
;
1913 case INDEX_op_mul_i64
:
1914 if (use_mips32r6_instructions
) {
1915 tcg_out_opc_reg(s
, OPC_DMUL
, a0
, a1
, a2
);
1918 i1
= OPC_DMULT
, i2
= OPC_MFLO
;
1920 case INDEX_op_mulsh_i64
:
1921 if (use_mips32r6_instructions
) {
1922 tcg_out_opc_reg(s
, OPC_DMUH
, a0
, a1
, a2
);
1925 i1
= OPC_DMULT
, i2
= OPC_MFHI
;
1927 case INDEX_op_muluh_i64
:
1928 if (use_mips32r6_instructions
) {
1929 tcg_out_opc_reg(s
, OPC_DMUHU
, a0
, a1
, a2
);
1932 i1
= OPC_DMULTU
, i2
= OPC_MFHI
;
1934 case INDEX_op_div_i64
:
1935 if (use_mips32r6_instructions
) {
1936 tcg_out_opc_reg(s
, OPC_DDIV_R6
, a0
, a1
, a2
);
1939 i1
= OPC_DDIV
, i2
= OPC_MFLO
;
1941 case INDEX_op_divu_i64
:
1942 if (use_mips32r6_instructions
) {
1943 tcg_out_opc_reg(s
, OPC_DDIVU_R6
, a0
, a1
, a2
);
1946 i1
= OPC_DDIVU
, i2
= OPC_MFLO
;
1948 case INDEX_op_rem_i64
:
1949 if (use_mips32r6_instructions
) {
1950 tcg_out_opc_reg(s
, OPC_DMOD
, a0
, a1
, a2
);
1953 i1
= OPC_DDIV
, i2
= OPC_MFHI
;
1955 case INDEX_op_remu_i64
:
1956 if (use_mips32r6_instructions
) {
1957 tcg_out_opc_reg(s
, OPC_DMODU
, a0
, a1
, a2
);
1960 i1
= OPC_DDIVU
, i2
= OPC_MFHI
;
1962 tcg_out_opc_reg(s
, i1
, 0, a1
, a2
);
1963 tcg_out_opc_reg(s
, i2
, a0
, 0, 0);
1966 case INDEX_op_muls2_i32
:
1969 case INDEX_op_mulu2_i32
:
1972 case INDEX_op_muls2_i64
:
1975 case INDEX_op_mulu2_i64
:
1978 tcg_out_opc_reg(s
, i1
, 0, a2
, args
[3]);
1979 tcg_out_opc_reg(s
, OPC_MFLO
, a0
, 0, 0);
1980 tcg_out_opc_reg(s
, OPC_MFHI
, a1
, 0, 0);
1983 case INDEX_op_not_i32
:
1984 case INDEX_op_not_i64
:
1987 case INDEX_op_bswap16_i32
:
1988 case INDEX_op_bswap16_i64
:
1991 case INDEX_op_ext8s_i32
:
1992 case INDEX_op_ext8s_i64
:
1995 case INDEX_op_ext16s_i32
:
1996 case INDEX_op_ext16s_i64
:
1999 tcg_out_opc_reg(s
, i1
, a0
, TCG_REG_ZERO
, a1
);
2002 case INDEX_op_bswap32_i32
:
2003 tcg_out_bswap32(s
, a0
, a1
);
2005 case INDEX_op_bswap32_i64
:
2006 tcg_out_bswap32u(s
, a0
, a1
);
2008 case INDEX_op_bswap64_i64
:
2009 tcg_out_bswap64(s
, a0
, a1
);
2011 case INDEX_op_extrh_i64_i32
:
2012 tcg_out_dsra(s
, a0
, a1
, 32);
2014 case INDEX_op_ext32s_i64
:
2015 case INDEX_op_ext_i32_i64
:
2016 case INDEX_op_extrl_i64_i32
:
2017 tcg_out_opc_sa(s
, OPC_SLL
, a0
, a1
, 0);
2019 case INDEX_op_ext32u_i64
:
2020 case INDEX_op_extu_i32_i64
:
2021 tcg_out_ext32u(s
, a0
, a1
);
2024 case INDEX_op_sar_i32
:
2025 i1
= OPC_SRAV
, i2
= OPC_SRA
;
2027 case INDEX_op_shl_i32
:
2028 i1
= OPC_SLLV
, i2
= OPC_SLL
;
2030 case INDEX_op_shr_i32
:
2031 i1
= OPC_SRLV
, i2
= OPC_SRL
;
2033 case INDEX_op_rotr_i32
:
2034 i1
= OPC_ROTRV
, i2
= OPC_ROTR
;
2037 tcg_out_opc_sa(s
, i2
, a0
, a1
, a2
);
2041 tcg_out_opc_reg(s
, i1
, a0
, a2
, a1
);
2043 case INDEX_op_rotl_i32
:
2045 tcg_out_opc_sa(s
, OPC_ROTR
, a0
, a1
, 32 - a2
);
2047 tcg_out_opc_reg(s
, OPC_SUBU
, TCG_TMP0
, TCG_REG_ZERO
, a2
);
2048 tcg_out_opc_reg(s
, OPC_ROTRV
, a0
, TCG_TMP0
, a1
);
2051 case INDEX_op_sar_i64
:
2053 tcg_out_dsra(s
, a0
, a1
, a2
);
2058 case INDEX_op_shl_i64
:
2060 tcg_out_dsll(s
, a0
, a1
, a2
);
2065 case INDEX_op_shr_i64
:
2067 tcg_out_dsrl(s
, a0
, a1
, a2
);
2072 case INDEX_op_rotr_i64
:
2074 tcg_out_opc_sa64(s
, OPC_DROTR
, OPC_DROTR32
, a0
, a1
, a2
);
2079 case INDEX_op_rotl_i64
:
2081 tcg_out_opc_sa64(s
, OPC_DROTR
, OPC_DROTR32
, a0
, a1
, 64 - a2
);
2083 tcg_out_opc_reg(s
, OPC_DSUBU
, TCG_TMP0
, TCG_REG_ZERO
, a2
);
2084 tcg_out_opc_reg(s
, OPC_DROTRV
, a0
, TCG_TMP0
, a1
);
2088 case INDEX_op_clz_i32
:
2089 tcg_out_clz(s
, OPC_CLZ
, OPC_CLZ_R6
, 32, a0
, a1
, a2
);
2091 case INDEX_op_clz_i64
:
2092 tcg_out_clz(s
, OPC_DCLZ
, OPC_DCLZ_R6
, 64, a0
, a1
, a2
);
2095 case INDEX_op_deposit_i32
:
2096 tcg_out_opc_bf(s
, OPC_INS
, a0
, a2
, args
[3] + args
[4] - 1, args
[3]);
2098 case INDEX_op_deposit_i64
:
2099 tcg_out_opc_bf64(s
, OPC_DINS
, OPC_DINSM
, OPC_DINSU
, a0
, a2
,
2100 args
[3] + args
[4] - 1, args
[3]);
2102 case INDEX_op_extract_i32
:
2103 tcg_out_opc_bf(s
, OPC_EXT
, a0
, a1
, args
[3] - 1, a2
);
2105 case INDEX_op_extract_i64
:
2106 tcg_out_opc_bf64(s
, OPC_DEXT
, OPC_DEXTM
, OPC_DEXTU
, a0
, a1
,
2110 case INDEX_op_brcond_i32
:
2111 case INDEX_op_brcond_i64
:
2112 tcg_out_brcond(s
, a2
, a0
, a1
, arg_label(args
[3]));
2114 case INDEX_op_brcond2_i32
:
2115 tcg_out_brcond2(s
, args
[4], a0
, a1
, a2
, args
[3], arg_label(args
[5]));
2118 case INDEX_op_movcond_i32
:
2119 case INDEX_op_movcond_i64
:
2120 tcg_out_movcond(s
, args
[5], a0
, a1
, a2
, args
[3], args
[4]);
2123 case INDEX_op_setcond_i32
:
2124 case INDEX_op_setcond_i64
:
2125 tcg_out_setcond(s
, args
[3], a0
, a1
, a2
);
2127 case INDEX_op_setcond2_i32
:
2128 tcg_out_setcond2(s
, args
[5], a0
, a1
, a2
, args
[3], args
[4]);
2131 case INDEX_op_qemu_ld_i32
:
2132 tcg_out_qemu_ld(s
, args
, false);
2134 case INDEX_op_qemu_ld_i64
:
2135 tcg_out_qemu_ld(s
, args
, true);
2137 case INDEX_op_qemu_st_i32
:
2138 tcg_out_qemu_st(s
, args
, false);
2140 case INDEX_op_qemu_st_i64
:
2141 tcg_out_qemu_st(s
, args
, true);
2144 case INDEX_op_add2_i32
:
2145 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
2146 const_args
[4], const_args
[5], false);
2148 case INDEX_op_sub2_i32
:
2149 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
2150 const_args
[4], const_args
[5], true);
2156 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2157 case INDEX_op_mov_i64
:
2158 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2159 case INDEX_op_movi_i64
:
2160 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2166 static const TCGTargetOpDef mips_op_defs
[] = {
2167 { INDEX_op_exit_tb
, { } },
2168 { INDEX_op_goto_tb
, { } },
2169 { INDEX_op_br
, { } },
2170 { INDEX_op_goto_ptr
, { "r" } },
2172 { INDEX_op_ld8u_i32
, { "r", "r" } },
2173 { INDEX_op_ld8s_i32
, { "r", "r" } },
2174 { INDEX_op_ld16u_i32
, { "r", "r" } },
2175 { INDEX_op_ld16s_i32
, { "r", "r" } },
2176 { INDEX_op_ld_i32
, { "r", "r" } },
2177 { INDEX_op_st8_i32
, { "rZ", "r" } },
2178 { INDEX_op_st16_i32
, { "rZ", "r" } },
2179 { INDEX_op_st_i32
, { "rZ", "r" } },
2181 { INDEX_op_add_i32
, { "r", "rZ", "rJ" } },
2182 { INDEX_op_mul_i32
, { "r", "rZ", "rZ" } },
2183 #if !use_mips32r6_instructions
2184 { INDEX_op_muls2_i32
, { "r", "r", "rZ", "rZ" } },
2185 { INDEX_op_mulu2_i32
, { "r", "r", "rZ", "rZ" } },
2187 { INDEX_op_mulsh_i32
, { "r", "rZ", "rZ" } },
2188 { INDEX_op_muluh_i32
, { "r", "rZ", "rZ" } },
2189 { INDEX_op_div_i32
, { "r", "rZ", "rZ" } },
2190 { INDEX_op_divu_i32
, { "r", "rZ", "rZ" } },
2191 { INDEX_op_rem_i32
, { "r", "rZ", "rZ" } },
2192 { INDEX_op_remu_i32
, { "r", "rZ", "rZ" } },
2193 { INDEX_op_sub_i32
, { "r", "rZ", "rN" } },
2195 { INDEX_op_and_i32
, { "r", "rZ", "rIK" } },
2196 { INDEX_op_nor_i32
, { "r", "rZ", "rZ" } },
2197 { INDEX_op_not_i32
, { "r", "rZ" } },
2198 { INDEX_op_or_i32
, { "r", "rZ", "rIZ" } },
2199 { INDEX_op_xor_i32
, { "r", "rZ", "rIZ" } },
2201 { INDEX_op_shl_i32
, { "r", "rZ", "ri" } },
2202 { INDEX_op_shr_i32
, { "r", "rZ", "ri" } },
2203 { INDEX_op_sar_i32
, { "r", "rZ", "ri" } },
2204 { INDEX_op_rotr_i32
, { "r", "rZ", "ri" } },
2205 { INDEX_op_rotl_i32
, { "r", "rZ", "ri" } },
2206 { INDEX_op_clz_i32
, { "r", "r", "rWZ" } },
2208 { INDEX_op_bswap16_i32
, { "r", "r" } },
2209 { INDEX_op_bswap32_i32
, { "r", "r" } },
2211 { INDEX_op_ext8s_i32
, { "r", "rZ" } },
2212 { INDEX_op_ext16s_i32
, { "r", "rZ" } },
2214 { INDEX_op_deposit_i32
, { "r", "0", "rZ" } },
2215 { INDEX_op_extract_i32
, { "r", "r" } },
2217 { INDEX_op_brcond_i32
, { "rZ", "rZ" } },
2218 #if use_mips32r6_instructions
2219 { INDEX_op_movcond_i32
, { "r", "rZ", "rZ", "rZ", "rZ" } },
2221 { INDEX_op_movcond_i32
, { "r", "rZ", "rZ", "rZ", "0" } },
2223 { INDEX_op_setcond_i32
, { "r", "rZ", "rZ" } },
2225 #if TCG_TARGET_REG_BITS == 32
2226 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rN", "rN" } },
2227 { INDEX_op_sub2_i32
, { "r", "r", "rZ", "rZ", "rN", "rN" } },
2228 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rZ", "rZ" } },
2229 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rZ", "rZ" } },
2232 #if TCG_TARGET_REG_BITS == 64
2233 { INDEX_op_ld8u_i64
, { "r", "r" } },
2234 { INDEX_op_ld8s_i64
, { "r", "r" } },
2235 { INDEX_op_ld16u_i64
, { "r", "r" } },
2236 { INDEX_op_ld16s_i64
, { "r", "r" } },
2237 { INDEX_op_ld32s_i64
, { "r", "r" } },
2238 { INDEX_op_ld32u_i64
, { "r", "r" } },
2239 { INDEX_op_ld_i64
, { "r", "r" } },
2240 { INDEX_op_st8_i64
, { "rZ", "r" } },
2241 { INDEX_op_st16_i64
, { "rZ", "r" } },
2242 { INDEX_op_st32_i64
, { "rZ", "r" } },
2243 { INDEX_op_st_i64
, { "rZ", "r" } },
2245 { INDEX_op_add_i64
, { "r", "rZ", "rJ" } },
2246 { INDEX_op_mul_i64
, { "r", "rZ", "rZ" } },
2247 #if !use_mips32r6_instructions
2248 { INDEX_op_muls2_i64
, { "r", "r", "rZ", "rZ" } },
2249 { INDEX_op_mulu2_i64
, { "r", "r", "rZ", "rZ" } },
2251 { INDEX_op_mulsh_i64
, { "r", "rZ", "rZ" } },
2252 { INDEX_op_muluh_i64
, { "r", "rZ", "rZ" } },
2253 { INDEX_op_div_i64
, { "r", "rZ", "rZ" } },
2254 { INDEX_op_divu_i64
, { "r", "rZ", "rZ" } },
2255 { INDEX_op_rem_i64
, { "r", "rZ", "rZ" } },
2256 { INDEX_op_remu_i64
, { "r", "rZ", "rZ" } },
2257 { INDEX_op_sub_i64
, { "r", "rZ", "rN" } },
2259 { INDEX_op_and_i64
, { "r", "rZ", "rIK" } },
2260 { INDEX_op_nor_i64
, { "r", "rZ", "rZ" } },
2261 { INDEX_op_not_i64
, { "r", "rZ" } },
2262 { INDEX_op_or_i64
, { "r", "rZ", "rI" } },
2263 { INDEX_op_xor_i64
, { "r", "rZ", "rI" } },
2265 { INDEX_op_shl_i64
, { "r", "rZ", "ri" } },
2266 { INDEX_op_shr_i64
, { "r", "rZ", "ri" } },
2267 { INDEX_op_sar_i64
, { "r", "rZ", "ri" } },
2268 { INDEX_op_rotr_i64
, { "r", "rZ", "ri" } },
2269 { INDEX_op_rotl_i64
, { "r", "rZ", "ri" } },
2270 { INDEX_op_clz_i64
, { "r", "r", "rWZ" } },
2272 { INDEX_op_bswap16_i64
, { "r", "r" } },
2273 { INDEX_op_bswap32_i64
, { "r", "r" } },
2274 { INDEX_op_bswap64_i64
, { "r", "r" } },
2276 { INDEX_op_ext8s_i64
, { "r", "rZ" } },
2277 { INDEX_op_ext16s_i64
, { "r", "rZ" } },
2278 { INDEX_op_ext32s_i64
, { "r", "rZ" } },
2279 { INDEX_op_ext32u_i64
, { "r", "rZ" } },
2280 { INDEX_op_ext_i32_i64
, { "r", "rZ" } },
2281 { INDEX_op_extu_i32_i64
, { "r", "rZ" } },
2282 { INDEX_op_extrl_i64_i32
, { "r", "rZ" } },
2283 { INDEX_op_extrh_i64_i32
, { "r", "rZ" } },
2285 { INDEX_op_deposit_i64
, { "r", "0", "rZ" } },
2286 { INDEX_op_extract_i64
, { "r", "r" } },
2288 { INDEX_op_brcond_i64
, { "rZ", "rZ" } },
2289 #if use_mips32r6_instructions
2290 { INDEX_op_movcond_i64
, { "r", "rZ", "rZ", "rZ", "rZ" } },
2292 { INDEX_op_movcond_i64
, { "r", "rZ", "rZ", "rZ", "0" } },
2294 { INDEX_op_setcond_i64
, { "r", "rZ", "rZ" } },
2296 { INDEX_op_qemu_ld_i32
, { "r", "LZ" } },
2297 { INDEX_op_qemu_st_i32
, { "SZ", "SZ" } },
2298 { INDEX_op_qemu_ld_i64
, { "r", "LZ" } },
2299 { INDEX_op_qemu_st_i64
, { "SZ", "SZ" } },
2300 #elif TARGET_LONG_BITS == 32
2301 { INDEX_op_qemu_ld_i32
, { "r", "LZ" } },
2302 { INDEX_op_qemu_st_i32
, { "SZ", "SZ" } },
2303 { INDEX_op_qemu_ld_i64
, { "r", "r", "LZ" } },
2304 { INDEX_op_qemu_st_i64
, { "SZ", "SZ", "SZ" } },
2306 { INDEX_op_qemu_ld_i32
, { "r", "LZ", "LZ" } },
2307 { INDEX_op_qemu_st_i32
, { "SZ", "SZ", "SZ" } },
2308 { INDEX_op_qemu_ld_i64
, { "r", "r", "LZ", "LZ" } },
2309 { INDEX_op_qemu_st_i64
, { "SZ", "SZ", "SZ", "SZ" } },
2312 { INDEX_op_mb
, { } },
2316 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2318 int i
, n
= ARRAY_SIZE(mips_op_defs
);
2320 for (i
= 0; i
< n
; ++i
) {
2321 if (mips_op_defs
[i
].op
== op
) {
2322 return &mips_op_defs
[i
];
2328 static int tcg_target_callee_save_regs
[] = {
2329 TCG_REG_S0
, /* used for the global env (TCG_AREG0) */
2338 TCG_REG_RA
, /* should be last for ABI compliance */
2341 /* The Linux kernel doesn't provide any information about the available
2342 instruction set. Probe it using a signal handler. */
2345 #ifndef use_movnz_instructions
2346 bool use_movnz_instructions
= false;
2349 #ifndef use_mips32_instructions
2350 bool use_mips32_instructions
= false;
2353 #ifndef use_mips32r2_instructions
2354 bool use_mips32r2_instructions
= false;
2357 static volatile sig_atomic_t got_sigill
;
2359 static void sigill_handler(int signo
, siginfo_t
*si
, void *data
)
2361 /* Skip the faulty instruction */
2362 ucontext_t
*uc
= (ucontext_t
*)data
;
2363 uc
->uc_mcontext
.pc
+= 4;
2368 static void tcg_target_detect_isa(void)
2370 struct sigaction sa_old
, sa_new
;
2372 memset(&sa_new
, 0, sizeof(sa_new
));
2373 sa_new
.sa_flags
= SA_SIGINFO
;
2374 sa_new
.sa_sigaction
= sigill_handler
;
2375 sigaction(SIGILL
, &sa_new
, &sa_old
);
2377 /* Probe for movn/movz, necessary to implement movcond. */
2378 #ifndef use_movnz_instructions
2380 asm volatile(".set push\n"
2382 "movn $zero, $zero, $zero\n"
2383 "movz $zero, $zero, $zero\n"
2386 use_movnz_instructions
= !got_sigill
;
2389 /* Probe for MIPS32 instructions. As no subsetting is allowed
2390 by the specification, it is only necessary to probe for one
2391 of the instructions. */
2392 #ifndef use_mips32_instructions
2394 asm volatile(".set push\n"
2396 "mul $zero, $zero\n"
2399 use_mips32_instructions
= !got_sigill
;
2402 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
2403 available. As no subsetting is allowed by the specification,
2404 it is only necessary to probe for one of the instructions. */
2405 #ifndef use_mips32r2_instructions
2406 if (use_mips32_instructions
) {
2408 asm volatile(".set push\n"
2410 "seb $zero, $zero\n"
2413 use_mips32r2_instructions
= !got_sigill
;
2417 sigaction(SIGILL
, &sa_old
, NULL
);
2420 static tcg_insn_unit
*align_code_ptr(TCGContext
*s
)
2422 uintptr_t p
= (uintptr_t)s
->code_ptr
;
2425 s
->code_ptr
= (void *)p
;
2430 /* Stack frame parameters. */
2431 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2432 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2433 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2435 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2436 + TCG_TARGET_STACK_ALIGN - 1) \
2437 & -TCG_TARGET_STACK_ALIGN)
2438 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2440 /* We're expecting to be able to use an immediate for frame allocation. */
2441 QEMU_BUILD_BUG_ON(FRAME_SIZE
> 0x7fff);
2443 /* Generate global QEMU prologue and epilogue code */
2444 static void tcg_target_qemu_prologue(TCGContext
*s
)
2448 tcg_set_frame(s
, TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
, TEMP_SIZE
);
2451 tcg_out_opc_imm(s
, ALIAS_PADDI
, TCG_REG_SP
, TCG_REG_SP
, -FRAME_SIZE
);
2452 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
2453 tcg_out_st(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
2454 TCG_REG_SP
, SAVE_OFS
+ i
* REG_SIZE
);
2457 #ifndef CONFIG_SOFTMMU
2459 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, guest_base
);
2460 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2464 /* Call generated code */
2465 tcg_out_opc_reg(s
, OPC_JR
, 0, tcg_target_call_iarg_regs
[1], 0);
2467 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2470 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2471 * and fall through to the rest of the epilogue.
2473 s
->code_gen_epilogue
= s
->code_ptr
;
2474 tcg_out_mov(s
, TCG_TYPE_REG
, TCG_REG_V0
, TCG_REG_ZERO
);
2477 tb_ret_addr
= s
->code_ptr
;
2478 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
2479 tcg_out_ld(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
2480 TCG_REG_SP
, SAVE_OFS
+ i
* REG_SIZE
);
2483 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_RA
, 0);
2485 tcg_out_opc_imm(s
, ALIAS_PADDI
, TCG_REG_SP
, TCG_REG_SP
, FRAME_SIZE
);
2487 if (use_mips32r2_instructions
) {
2491 /* Bswap subroutines: Input in TCG_TMP0, output in TCG_TMP3;
2492 clobbers TCG_TMP1, TCG_TMP2. */
2495 * bswap32 -- 32-bit swap (signed result for mips64). a0 = abcd.
2497 bswap32_addr
= align_code_ptr(s
);
2498 /* t3 = (ssss)d000 */
2499 tcg_out_opc_sa(s
, OPC_SLL
, TCG_TMP3
, TCG_TMP0
, 24);
2501 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP1
, TCG_TMP0
, 24);
2503 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP2
, TCG_TMP0
, 0xff00);
2505 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2507 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP1
, TCG_TMP0
, 8);
2509 tcg_out_opc_sa(s
, OPC_SLL
, TCG_TMP2
, TCG_TMP2
, 8);
2511 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, TCG_TMP1
, 0xff00);
2513 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP2
);
2514 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_RA
, 0);
2515 /* t3 = dcba -- delay slot */
2516 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2518 if (TCG_TARGET_REG_BITS
== 32) {
2523 * bswap32u -- unsigned 32-bit swap. a0 = ....abcd.
2525 bswap32u_addr
= align_code_ptr(s
);
2526 /* t1 = (0000)000d */
2527 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, TCG_TMP0
, 0xff);
2529 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP3
, TCG_TMP0
, 24);
2530 /* t1 = (0000)d000 */
2531 tcg_out_dsll(s
, TCG_TMP1
, TCG_TMP1
, 24);
2533 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP2
, TCG_TMP0
, 0xff00);
2535 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2537 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP1
, TCG_TMP0
, 8);
2539 tcg_out_opc_sa(s
, OPC_SLL
, TCG_TMP2
, TCG_TMP2
, 8);
2541 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, TCG_TMP1
, 0xff00);
2543 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP2
);
2544 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_RA
, 0);
2545 /* t3 = dcba -- delay slot */
2546 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2549 * bswap64 -- 64-bit swap. a0 = abcdefgh
2551 bswap64_addr
= align_code_ptr(s
);
2553 tcg_out_dsll(s
, TCG_TMP3
, TCG_TMP0
, 56);
2555 tcg_out_dsrl(s
, TCG_TMP1
, TCG_TMP0
, 56);
2558 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP2
, TCG_TMP0
, 0xff00);
2560 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2562 tcg_out_dsrl(s
, TCG_TMP1
, TCG_TMP0
, 40);
2564 tcg_out_dsll(s
, TCG_TMP2
, TCG_TMP2
, 40);
2566 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, TCG_TMP1
, 0xff00);
2569 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP2
);
2571 tcg_out_dsrl(s
, TCG_TMP2
, TCG_TMP0
, 32);
2573 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2576 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, TCG_TMP2
, 0xff00);
2578 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP2
, TCG_TMP2
, 0x00ff);
2580 tcg_out_dsll(s
, TCG_TMP1
, TCG_TMP1
, 8);
2582 tcg_out_dsll(s
, TCG_TMP2
, TCG_TMP2
, 24);
2585 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2587 tcg_out_dsrl(s
, TCG_TMP1
, TCG_TMP0
, 16);
2589 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP2
);
2592 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP2
, TCG_TMP1
, 0x00ff);
2594 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, TCG_TMP1
, 0xff00);
2596 tcg_out_dsll(s
, TCG_TMP2
, TCG_TMP2
, 40);
2598 tcg_out_dsll(s
, TCG_TMP1
, TCG_TMP1
, 24);
2601 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP2
);
2602 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_RA
, 0);
2603 /* t3 = hgfedcba -- delay slot */
2604 tcg_out_opc_reg(s
, OPC_OR
, TCG_TMP3
, TCG_TMP3
, TCG_TMP1
);
2607 static void tcg_target_init(TCGContext
*s
)
2609 tcg_target_detect_isa();
2610 tcg_regset_set(tcg_target_available_regs
[TCG_TYPE_I32
], 0xffffffff);
2611 if (TCG_TARGET_REG_BITS
== 64) {
2612 tcg_regset_set(tcg_target_available_regs
[TCG_TYPE_I64
], 0xffffffff);
2614 tcg_regset_set(tcg_target_call_clobber_regs
,
2632 tcg_regset_clear(s
->reserved_regs
);
2633 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ZERO
); /* zero register */
2634 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K0
); /* kernel use only */
2635 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K1
); /* kernel use only */
2636 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
); /* internal use */
2637 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP1
); /* internal use */
2638 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP2
); /* internal use */
2639 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP3
); /* internal use */
2640 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RA
); /* return address */
2641 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
); /* stack pointer */
2642 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_GP
); /* global pointer */
2645 void tb_set_jmp_target1(uintptr_t jmp_addr
, uintptr_t addr
)
2647 atomic_set((uint32_t *)jmp_addr
, deposit32(OPC_J
, 0, 26, addr
>> 2));
2648 flush_icache_range(jmp_addr
, jmp_addr
+ 4);
2653 uint8_t fde_def_cfa
[4];
2654 uint8_t fde_reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2];
2657 #define ELF_HOST_MACHINE EM_MIPS
2658 /* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS,
2659 which is good because they're really quite complicated for MIPS. */
2661 static const DebugFrame debug_frame
= {
2662 .h
.cie
.len
= sizeof(DebugFrameCIE
) - 4, /* length after .len member */
2665 .h
.cie
.code_align
= 1,
2666 .h
.cie
.data_align
= -(TCG_TARGET_REG_BITS
/ 8) & 0x7f, /* sleb128 */
2667 .h
.cie
.return_column
= TCG_REG_RA
,
2669 /* Total FDE size does not include the "len" member. */
2670 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2673 12, TCG_REG_SP
, /* DW_CFA_def_cfa sp, ... */
2674 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2678 0x80 + 16, 9, /* DW_CFA_offset, s0, -72 */
2679 0x80 + 17, 8, /* DW_CFA_offset, s2, -64 */
2680 0x80 + 18, 7, /* DW_CFA_offset, s3, -56 */
2681 0x80 + 19, 6, /* DW_CFA_offset, s4, -48 */
2682 0x80 + 20, 5, /* DW_CFA_offset, s5, -40 */
2683 0x80 + 21, 4, /* DW_CFA_offset, s6, -32 */
2684 0x80 + 22, 3, /* DW_CFA_offset, s7, -24 */
2685 0x80 + 30, 2, /* DW_CFA_offset, s8, -16 */
2686 0x80 + 31, 1, /* DW_CFA_offset, ra, -8 */
2690 void tcg_register_jit(void *buf
, size_t buf_size
)
2692 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));