2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "../tcg-pool.inc.c"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
32 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
34 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
35 "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
36 #if TCG_TARGET_REG_BITS == 64
37 "%xmm8", "%xmm9", "%xmm10", "%xmm11",
38 "%xmm12", "%xmm13", "%xmm14", "%xmm15",
43 static const int tcg_target_reg_alloc_order
[] = {
44 #if TCG_TARGET_REG_BITS == 64
76 /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save
77 any of them. Therefore only allow xmm0-xmm5 to be allocated. */
80 #if TCG_TARGET_REG_BITS == 64
93 static const int tcg_target_call_iarg_regs
[] = {
94 #if TCG_TARGET_REG_BITS == 64
107 /* 32 bit mode uses stack based calling convention (GCC default). */
111 static const int tcg_target_call_oarg_regs
[] = {
113 #if TCG_TARGET_REG_BITS == 32
118 /* Constants we accept. */
119 #define TCG_CT_CONST_S32 0x100
120 #define TCG_CT_CONST_U32 0x200
121 #define TCG_CT_CONST_I32 0x400
122 #define TCG_CT_CONST_WSZ 0x800
124 /* Registers used with L constraint, which are the first argument
125 registers on x86_64, and two random call clobbered registers on
127 #if TCG_TARGET_REG_BITS == 64
128 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
129 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
131 # define TCG_REG_L0 TCG_REG_EAX
132 # define TCG_REG_L1 TCG_REG_EDX
135 /* The host compiler should supply <cpuid.h> to enable runtime features
136 detection, as we're not going to go so far as our own inline assembly.
137 If not available, default values will be assumed. */
138 #if defined(CONFIG_CPUID_H)
139 #include "qemu/cpuid.h"
142 /* For 64-bit, we always know that CMOV is available. */
143 #if TCG_TARGET_REG_BITS == 64
145 #elif defined(CONFIG_CPUID_H)
146 static bool have_cmov
;
151 /* We need these symbols in tcg-target.h, and we can't properly conditionalize
152 it there. Therefore we always define the variable. */
158 #ifdef CONFIG_CPUID_H
159 static bool have_movbe
;
160 static bool have_bmi2
;
161 static bool have_lzcnt
;
163 # define have_movbe 0
165 # define have_lzcnt 0
168 static tcg_insn_unit
*tb_ret_addr
;
170 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
171 intptr_t value
, intptr_t addend
)
176 value
-= (uintptr_t)code_ptr
;
177 if (value
!= (int32_t)value
) {
182 tcg_patch32(code_ptr
, value
);
185 value
-= (uintptr_t)code_ptr
;
186 if (value
!= (int8_t)value
) {
189 tcg_patch8(code_ptr
, value
);
197 #if TCG_TARGET_REG_BITS == 64
198 #define ALL_GENERAL_REGS 0x0000ffffu
199 #define ALL_VECTOR_REGS 0xffff0000u
201 #define ALL_GENERAL_REGS 0x000000ffu
202 #define ALL_VECTOR_REGS 0x00ff0000u
205 /* parse target specific constraints */
206 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
207 const char *ct_str
, TCGType type
)
211 ct
->ct
|= TCG_CT_REG
;
212 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
215 ct
->ct
|= TCG_CT_REG
;
216 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
219 ct
->ct
|= TCG_CT_REG
;
220 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
223 ct
->ct
|= TCG_CT_REG
;
224 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
227 ct
->ct
|= TCG_CT_REG
;
228 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
231 ct
->ct
|= TCG_CT_REG
;
232 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
235 /* A register that can be used as a byte operand. */
236 ct
->ct
|= TCG_CT_REG
;
237 ct
->u
.regs
= TCG_TARGET_REG_BITS
== 64 ? 0xffff : 0xf;
240 /* A register with an addressable second byte (e.g. %ah). */
241 ct
->ct
|= TCG_CT_REG
;
245 /* A general register. */
246 ct
->ct
|= TCG_CT_REG
;
247 ct
->u
.regs
|= ALL_GENERAL_REGS
;
250 /* With TZCNT/LZCNT, we can have operand-size as an input. */
251 ct
->ct
|= TCG_CT_CONST_WSZ
;
254 /* A vector register. */
255 ct
->ct
|= TCG_CT_REG
;
256 ct
->u
.regs
|= ALL_VECTOR_REGS
;
259 /* qemu_ld/st address constraint */
261 ct
->ct
|= TCG_CT_REG
;
262 ct
->u
.regs
= TCG_TARGET_REG_BITS
== 64 ? 0xffff : 0xff;
263 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L0
);
264 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L1
);
268 ct
->ct
|= (type
== TCG_TYPE_I32
? TCG_CT_CONST
: TCG_CT_CONST_S32
);
271 ct
->ct
|= (type
== TCG_TYPE_I32
? TCG_CT_CONST
: TCG_CT_CONST_U32
);
274 ct
->ct
|= (type
== TCG_TYPE_I32
? TCG_CT_CONST
: TCG_CT_CONST_I32
);
283 /* test if a constant matches the constraint */
284 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
285 const TCGArgConstraint
*arg_ct
)
288 if (ct
& TCG_CT_CONST
) {
291 if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
294 if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
297 if ((ct
& TCG_CT_CONST_I32
) && ~val
== (int32_t)~val
) {
300 if ((ct
& TCG_CT_CONST_WSZ
) && val
== (type
== TCG_TYPE_I32
? 32 : 64)) {
306 # define LOWREGMASK(x) ((x) & 7)
308 #define P_EXT 0x100 /* 0x0f opcode prefix */
309 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
310 #define P_DATA16 0x400 /* 0x66 opcode prefix */
311 #if TCG_TARGET_REG_BITS == 64
312 # define P_REXW 0x1000 /* Set REX.W = 1 */
313 # define P_REXB_R 0x2000 /* REG field as byte register */
314 # define P_REXB_RM 0x4000 /* R/M field as byte register */
315 # define P_GS 0x8000 /* gs segment override */
322 #define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */
323 #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
324 #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
325 #define P_VEXL 0x80000 /* Set VEX.L = 1 */
327 #define OPC_ARITH_EvIz (0x81)
328 #define OPC_ARITH_EvIb (0x83)
329 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
330 #define OPC_ANDN (0xf2 | P_EXT38)
331 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
332 #define OPC_AND_GvEv (OPC_ARITH_GvEv | (ARITH_AND << 3))
333 #define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16)
334 #define OPC_BSF (0xbc | P_EXT)
335 #define OPC_BSR (0xbd | P_EXT)
336 #define OPC_BSWAP (0xc8 | P_EXT)
337 #define OPC_CALL_Jz (0xe8)
338 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
339 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
340 #define OPC_DEC_r32 (0x48)
341 #define OPC_IMUL_GvEv (0xaf | P_EXT)
342 #define OPC_IMUL_GvEvIb (0x6b)
343 #define OPC_IMUL_GvEvIz (0x69)
344 #define OPC_INC_r32 (0x40)
345 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
346 #define OPC_JCC_short (0x70) /* ... plus condition code */
347 #define OPC_JMP_long (0xe9)
348 #define OPC_JMP_short (0xeb)
349 #define OPC_LEA (0x8d)
350 #define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
351 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
352 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
353 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
354 #define OPC_MOVB_EvIz (0xc6)
355 #define OPC_MOVL_EvIz (0xc7)
356 #define OPC_MOVL_Iv (0xb8)
357 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
358 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
359 #define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16)
360 #define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16)
361 #define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2)
362 #define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
363 #define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
364 #define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
365 #define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3)
366 #define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3)
367 #define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16)
368 #define OPC_MOVSBL (0xbe | P_EXT)
369 #define OPC_MOVSWL (0xbf | P_EXT)
370 #define OPC_MOVSLQ (0x63 | P_REXW)
371 #define OPC_MOVZBL (0xb6 | P_EXT)
372 #define OPC_MOVZWL (0xb7 | P_EXT)
373 #define OPC_PABSB (0x1c | P_EXT38 | P_DATA16)
374 #define OPC_PABSW (0x1d | P_EXT38 | P_DATA16)
375 #define OPC_PABSD (0x1e | P_EXT38 | P_DATA16)
376 #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
377 #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
378 #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
379 #define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16)
380 #define OPC_PADDB (0xfc | P_EXT | P_DATA16)
381 #define OPC_PADDW (0xfd | P_EXT | P_DATA16)
382 #define OPC_PADDD (0xfe | P_EXT | P_DATA16)
383 #define OPC_PADDQ (0xd4 | P_EXT | P_DATA16)
384 #define OPC_PADDSB (0xec | P_EXT | P_DATA16)
385 #define OPC_PADDSW (0xed | P_EXT | P_DATA16)
386 #define OPC_PADDUB (0xdc | P_EXT | P_DATA16)
387 #define OPC_PADDUW (0xdd | P_EXT | P_DATA16)
388 #define OPC_PAND (0xdb | P_EXT | P_DATA16)
389 #define OPC_PANDN (0xdf | P_EXT | P_DATA16)
390 #define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16)
391 #define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16)
392 #define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16)
393 #define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16)
394 #define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16)
395 #define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16)
396 #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16)
397 #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16)
398 #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16)
399 #define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16)
400 #define OPC_PMAXSW (0xee | P_EXT | P_DATA16)
401 #define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16)
402 #define OPC_PMAXUB (0xde | P_EXT | P_DATA16)
403 #define OPC_PMAXUW (0x3e | P_EXT38 | P_DATA16)
404 #define OPC_PMAXUD (0x3f | P_EXT38 | P_DATA16)
405 #define OPC_PMINSB (0x38 | P_EXT38 | P_DATA16)
406 #define OPC_PMINSW (0xea | P_EXT | P_DATA16)
407 #define OPC_PMINSD (0x39 | P_EXT38 | P_DATA16)
408 #define OPC_PMINUB (0xda | P_EXT | P_DATA16)
409 #define OPC_PMINUW (0x3a | P_EXT38 | P_DATA16)
410 #define OPC_PMINUD (0x3b | P_EXT38 | P_DATA16)
411 #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
412 #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
413 #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
414 #define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16)
415 #define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16)
416 #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
417 #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
418 #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
419 #define OPC_POR (0xeb | P_EXT | P_DATA16)
420 #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
421 #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
422 #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
423 #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
424 #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
425 #define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
426 #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
427 #define OPC_PSLLW (0xf1 | P_EXT | P_DATA16)
428 #define OPC_PSLLD (0xf2 | P_EXT | P_DATA16)
429 #define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16)
430 #define OPC_PSRAW (0xe1 | P_EXT | P_DATA16)
431 #define OPC_PSRAD (0xe2 | P_EXT | P_DATA16)
432 #define OPC_PSRLW (0xd1 | P_EXT | P_DATA16)
433 #define OPC_PSRLD (0xd2 | P_EXT | P_DATA16)
434 #define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16)
435 #define OPC_PSUBB (0xf8 | P_EXT | P_DATA16)
436 #define OPC_PSUBW (0xf9 | P_EXT | P_DATA16)
437 #define OPC_PSUBD (0xfa | P_EXT | P_DATA16)
438 #define OPC_PSUBQ (0xfb | P_EXT | P_DATA16)
439 #define OPC_PSUBSB (0xe8 | P_EXT | P_DATA16)
440 #define OPC_PSUBSW (0xe9 | P_EXT | P_DATA16)
441 #define OPC_PSUBUB (0xd8 | P_EXT | P_DATA16)
442 #define OPC_PSUBUW (0xd9 | P_EXT | P_DATA16)
443 #define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16)
444 #define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16)
445 #define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16)
446 #define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16)
447 #define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16)
448 #define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16)
449 #define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16)
450 #define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16)
451 #define OPC_PXOR (0xef | P_EXT | P_DATA16)
452 #define OPC_POP_r32 (0x58)
453 #define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3)
454 #define OPC_PUSH_r32 (0x50)
455 #define OPC_PUSH_Iv (0x68)
456 #define OPC_PUSH_Ib (0x6a)
457 #define OPC_RET (0xc3)
458 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
459 #define OPC_SHIFT_1 (0xd1)
460 #define OPC_SHIFT_Ib (0xc1)
461 #define OPC_SHIFT_cl (0xd3)
462 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
463 #define OPC_SHUFPS (0xc6 | P_EXT)
464 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
465 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
466 #define OPC_SHRD_Ib (0xac | P_EXT)
467 #define OPC_TESTL (0x85)
468 #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
469 #define OPC_UD2 (0x0b | P_EXT)
470 #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
471 #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
472 #define OPC_VPINSRB (0x20 | P_EXT3A | P_DATA16)
473 #define OPC_VPINSRW (0xc4 | P_EXT | P_DATA16)
474 #define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16)
475 #define OPC_VBROADCASTSD (0x19 | P_EXT38 | P_DATA16)
476 #define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
477 #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
478 #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
479 #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
480 #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
481 #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
482 #define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
483 #define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW)
484 #define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
485 #define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
486 #define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW)
487 #define OPC_VZEROUPPER (0x77 | P_EXT)
488 #define OPC_XCHG_ax_r32 (0x90)
490 #define OPC_GRP3_Ev (0xf7)
491 #define OPC_GRP5 (0xff)
492 #define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
494 /* Group 1 opcode extensions for 0x80-0x83.
495 These are also used as modifiers for OPC_ARITH. */
505 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
512 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
520 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
521 #define EXT5_INC_Ev 0
522 #define EXT5_DEC_Ev 1
523 #define EXT5_CALLN_Ev 2
524 #define EXT5_JMPN_Ev 4
526 /* Condition codes to be added to OPC_JCC_{long,short}. */
545 static const uint8_t tcg_cond_to_jcc
[] = {
546 [TCG_COND_EQ
] = JCC_JE
,
547 [TCG_COND_NE
] = JCC_JNE
,
548 [TCG_COND_LT
] = JCC_JL
,
549 [TCG_COND_GE
] = JCC_JGE
,
550 [TCG_COND_LE
] = JCC_JLE
,
551 [TCG_COND_GT
] = JCC_JG
,
552 [TCG_COND_LTU
] = JCC_JB
,
553 [TCG_COND_GEU
] = JCC_JAE
,
554 [TCG_COND_LEU
] = JCC_JBE
,
555 [TCG_COND_GTU
] = JCC_JA
,
558 #if TCG_TARGET_REG_BITS == 64
559 static void tcg_out_opc(TCGContext
*s
, int opc
, int r
, int rm
, int x
)
566 if (opc
& P_DATA16
) {
567 /* We should never be asking for both 16 and 64-bit operation. */
568 tcg_debug_assert((opc
& P_REXW
) == 0);
571 if (opc
& P_SIMDF3
) {
573 } else if (opc
& P_SIMDF2
) {
578 rex
|= (opc
& P_REXW
) ? 0x8 : 0x0; /* REX.W */
579 rex
|= (r
& 8) >> 1; /* REX.R */
580 rex
|= (x
& 8) >> 2; /* REX.X */
581 rex
|= (rm
& 8) >> 3; /* REX.B */
583 /* P_REXB_{R,RM} indicates that the given register is the low byte.
584 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
585 as otherwise the encoding indicates %[abcd]h. Note that the values
586 that are ORed in merely indicate that the REX byte must be present;
587 those bits get discarded in output. */
588 rex
|= opc
& (r
>= 4 ? P_REXB_R
: 0);
589 rex
|= opc
& (rm
>= 4 ? P_REXB_RM
: 0);
592 tcg_out8(s
, (uint8_t)(rex
| 0x40));
595 if (opc
& (P_EXT
| P_EXT38
| P_EXT3A
)) {
599 } else if (opc
& P_EXT3A
) {
607 static void tcg_out_opc(TCGContext
*s
, int opc
)
609 if (opc
& P_DATA16
) {
612 if (opc
& P_SIMDF3
) {
614 } else if (opc
& P_SIMDF2
) {
617 if (opc
& (P_EXT
| P_EXT38
| P_EXT3A
)) {
621 } else if (opc
& P_EXT3A
) {
627 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
628 the 32-bit compilation paths. This method works with all versions of gcc,
629 whereas relying on optimization may not be able to exclude them. */
630 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
633 static void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
635 tcg_out_opc(s
, opc
, r
, rm
, 0);
636 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
639 static void tcg_out_vex_opc(TCGContext
*s
, int opc
, int r
, int v
,
644 /* Use the two byte form if possible, which cannot encode
645 VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
646 if ((opc
& (P_EXT
| P_EXT38
| P_EXT3A
| P_REXW
)) == P_EXT
647 && ((rm
| index
) & 8) == 0) {
648 /* Two byte VEX prefix. */
651 tmp
= (r
& 8 ? 0 : 0x80); /* VEX.R */
653 /* Three byte VEX prefix. */
659 } else if (opc
& P_EXT38
) {
661 } else if (opc
& P_EXT
) {
664 g_assert_not_reached();
666 tmp
|= (r
& 8 ? 0 : 0x80); /* VEX.R */
667 tmp
|= (index
& 8 ? 0 : 0x40); /* VEX.X */
668 tmp
|= (rm
& 8 ? 0 : 0x20); /* VEX.B */
671 tmp
= (opc
& P_REXW
? 0x80 : 0); /* VEX.W */
674 tmp
|= (opc
& P_VEXL
? 0x04 : 0); /* VEX.L */
676 if (opc
& P_DATA16
) {
678 } else if (opc
& P_SIMDF3
) {
680 } else if (opc
& P_SIMDF2
) {
683 tmp
|= (~v
& 15) << 3; /* VEX.vvvv */
688 static void tcg_out_vex_modrm(TCGContext
*s
, int opc
, int r
, int v
, int rm
)
690 tcg_out_vex_opc(s
, opc
, r
, v
, rm
, 0);
691 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
694 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
695 We handle either RM and INDEX missing with a negative value. In 64-bit
696 mode for absolute addresses, ~RM is the size of the immediate operand
697 that will follow the instruction. */
699 static void tcg_out_sib_offset(TCGContext
*s
, int r
, int rm
, int index
,
700 int shift
, intptr_t offset
)
704 if (index
< 0 && rm
< 0) {
705 if (TCG_TARGET_REG_BITS
== 64) {
706 /* Try for a rip-relative addressing mode. This has replaced
707 the 32-bit-mode absolute addressing encoding. */
708 intptr_t pc
= (intptr_t)s
->code_ptr
+ 5 + ~rm
;
709 intptr_t disp
= offset
- pc
;
710 if (disp
== (int32_t)disp
) {
711 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 5);
716 /* Try for an absolute address encoding. This requires the
717 use of the MODRM+SIB encoding and is therefore larger than
718 rip-relative addressing. */
719 if (offset
== (int32_t)offset
) {
720 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 4);
721 tcg_out8(s
, (4 << 3) | 5);
722 tcg_out32(s
, offset
);
726 /* ??? The memory isn't directly addressable. */
727 g_assert_not_reached();
729 /* Absolute address. */
730 tcg_out8(s
, (r
<< 3) | 5);
731 tcg_out32(s
, offset
);
736 /* Find the length of the immediate addend. Note that the encoding
737 that would be used for (%ebp) indicates absolute addressing. */
739 mod
= 0, len
= 4, rm
= 5;
740 } else if (offset
== 0 && LOWREGMASK(rm
) != TCG_REG_EBP
) {
742 } else if (offset
== (int8_t)offset
) {
748 /* Use a single byte MODRM format if possible. Note that the encoding
749 that would be used for %esp is the escape to the two byte form. */
750 if (index
< 0 && LOWREGMASK(rm
) != TCG_REG_ESP
) {
751 /* Single byte MODRM format. */
752 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
754 /* Two byte MODRM+SIB format. */
756 /* Note that the encoding that would place %esp into the index
757 field indicates no index register. In 64-bit mode, the REX.X
758 bit counts, so %r12 can be used as the index. */
762 tcg_debug_assert(index
!= TCG_REG_ESP
);
765 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | 4);
766 tcg_out8(s
, (shift
<< 6) | (LOWREGMASK(index
) << 3) | LOWREGMASK(rm
));
771 } else if (len
== 4) {
772 tcg_out32(s
, offset
);
776 static void tcg_out_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int rm
,
777 int index
, int shift
, intptr_t offset
)
779 tcg_out_opc(s
, opc
, r
, rm
< 0 ? 0 : rm
, index
< 0 ? 0 : index
);
780 tcg_out_sib_offset(s
, r
, rm
, index
, shift
, offset
);
783 static void tcg_out_vex_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int v
,
784 int rm
, int index
, int shift
,
787 tcg_out_vex_opc(s
, opc
, r
, v
, rm
< 0 ? 0 : rm
, index
< 0 ? 0 : index
);
788 tcg_out_sib_offset(s
, r
, rm
, index
, shift
, offset
);
791 /* A simplification of the above with no index or shift. */
792 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
,
793 int rm
, intptr_t offset
)
795 tcg_out_modrm_sib_offset(s
, opc
, r
, rm
, -1, 0, offset
);
798 static inline void tcg_out_vex_modrm_offset(TCGContext
*s
, int opc
, int r
,
799 int v
, int rm
, intptr_t offset
)
801 tcg_out_vex_modrm_sib_offset(s
, opc
, r
, v
, rm
, -1, 0, offset
);
804 /* Output an opcode with an expected reference to the constant pool. */
805 static inline void tcg_out_modrm_pool(TCGContext
*s
, int opc
, int r
)
807 tcg_out_opc(s
, opc
, r
, 0, 0);
808 /* Absolute for 32-bit, pc-relative for 64-bit. */
809 tcg_out8(s
, LOWREGMASK(r
) << 3 | 5);
813 /* Output an opcode with an expected reference to the constant pool. */
814 static inline void tcg_out_vex_modrm_pool(TCGContext
*s
, int opc
, int r
)
816 tcg_out_vex_opc(s
, opc
, r
, 0, 0, 0);
817 /* Absolute for 32-bit, pc-relative for 64-bit. */
818 tcg_out8(s
, LOWREGMASK(r
) << 3 | 5);
822 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
823 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
825 /* Propagate an opcode prefix, such as P_REXW. */
826 int ext
= subop
& ~0x7;
829 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3) + ext
, dest
, src
);
832 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
846 tcg_out_modrm(s
, OPC_MOVL_GvEv
+ rexw
, ret
, arg
);
848 tcg_out_vex_modrm(s
, OPC_MOVD_EyVy
+ rexw
, arg
, 0, ret
);
852 tcg_out_vex_modrm(s
, OPC_MOVD_VyEy
+ rexw
, ret
, 0, arg
);
854 tcg_out_vex_modrm(s
, OPC_MOVQ_VqWq
, ret
, 0, arg
);
860 tcg_debug_assert(ret
>= 16 && arg
>= 16);
861 tcg_out_vex_modrm(s
, OPC_MOVQ_VqWq
, ret
, 0, arg
);
864 tcg_debug_assert(ret
>= 16 && arg
>= 16);
865 tcg_out_vex_modrm(s
, OPC_MOVDQA_VxWx
, ret
, 0, arg
);
868 tcg_debug_assert(ret
>= 16 && arg
>= 16);
869 tcg_out_vex_modrm(s
, OPC_MOVDQA_VxWx
| P_VEXL
, ret
, 0, arg
);
873 g_assert_not_reached();
878 static const int avx2_dup_insn
[4] = {
879 OPC_VPBROADCASTB
, OPC_VPBROADCASTW
,
880 OPC_VPBROADCASTD
, OPC_VPBROADCASTQ
,
883 static bool tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
887 int vex_l
= (type
== TCG_TYPE_V256
? P_VEXL
: 0);
888 tcg_out_vex_modrm(s
, avx2_dup_insn
[vece
] + vex_l
, r
, 0, a
);
892 /* ??? With zero in a register, use PSHUFB. */
893 tcg_out_vex_modrm(s
, OPC_PUNPCKLBW
, r
, a
, a
);
897 tcg_out_vex_modrm(s
, OPC_PUNPCKLWD
, r
, a
, a
);
901 tcg_out_vex_modrm(s
, OPC_PSHUFD
, r
, 0, a
);
902 /* imm8 operand: all output lanes selected from input lane 0. */
906 tcg_out_vex_modrm(s
, OPC_PUNPCKLQDQ
, r
, a
, a
);
909 g_assert_not_reached();
915 static bool tcg_out_dupm_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
916 TCGReg r
, TCGReg base
, intptr_t offset
)
919 int vex_l
= (type
== TCG_TYPE_V256
? P_VEXL
: 0);
920 tcg_out_vex_modrm_offset(s
, avx2_dup_insn
[vece
] + vex_l
,
925 tcg_out_vex_modrm_offset(s
, OPC_MOVDDUP
, r
, 0, base
, offset
);
928 tcg_out_vex_modrm_offset(s
, OPC_VBROADCASTSS
, r
, 0, base
, offset
);
931 tcg_out_vex_modrm_offset(s
, OPC_VPINSRW
, r
, r
, base
, offset
);
932 tcg_out8(s
, 0); /* imm8 */
933 tcg_out_dup_vec(s
, type
, vece
, r
, r
);
936 tcg_out_vex_modrm_offset(s
, OPC_VPINSRB
, r
, r
, base
, offset
);
937 tcg_out8(s
, 0); /* imm8 */
938 tcg_out_dup_vec(s
, type
, vece
, r
, r
);
941 g_assert_not_reached();
947 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
,
948 TCGReg ret
, tcg_target_long arg
)
950 int vex_l
= (type
== TCG_TYPE_V256
? P_VEXL
: 0);
953 tcg_out_vex_modrm(s
, OPC_PXOR
, ret
, ret
, ret
);
957 tcg_out_vex_modrm(s
, OPC_PCMPEQB
+ vex_l
, ret
, ret
, ret
);
961 if (TCG_TARGET_REG_BITS
== 64) {
962 if (type
== TCG_TYPE_V64
) {
963 tcg_out_vex_modrm_pool(s
, OPC_MOVQ_VqWq
, ret
);
964 } else if (have_avx2
) {
965 tcg_out_vex_modrm_pool(s
, OPC_VPBROADCASTQ
+ vex_l
, ret
);
967 tcg_out_vex_modrm_pool(s
, OPC_MOVDDUP
, ret
);
969 new_pool_label(s
, arg
, R_386_PC32
, s
->code_ptr
- 4, -4);
972 tcg_out_vex_modrm_pool(s
, OPC_VPBROADCASTW
+ vex_l
, ret
);
974 tcg_out_vex_modrm_pool(s
, OPC_VBROADCASTSS
, ret
);
976 new_pool_label(s
, arg
, R_386_32
, s
->code_ptr
- 4, 0);
980 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
981 TCGReg ret
, tcg_target_long arg
)
983 tcg_target_long diff
;
987 #if TCG_TARGET_REG_BITS == 64
997 tcg_debug_assert(ret
>= 16);
998 tcg_out_dupi_vec(s
, type
, ret
, arg
);
1001 g_assert_not_reached();
1005 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
1008 if (arg
== (uint32_t)arg
|| type
== TCG_TYPE_I32
) {
1009 tcg_out_opc(s
, OPC_MOVL_Iv
+ LOWREGMASK(ret
), 0, ret
, 0);
1013 if (arg
== (int32_t)arg
) {
1014 tcg_out_modrm(s
, OPC_MOVL_EvIz
+ P_REXW
, 0, ret
);
1019 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
1020 diff
= arg
- ((uintptr_t)s
->code_ptr
+ 7);
1021 if (diff
== (int32_t)diff
) {
1022 tcg_out_opc(s
, OPC_LEA
| P_REXW
, ret
, 0, 0);
1023 tcg_out8(s
, (LOWREGMASK(ret
) << 3) | 5);
1028 tcg_out_opc(s
, OPC_MOVL_Iv
+ P_REXW
+ LOWREGMASK(ret
), 0, ret
, 0);
1032 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
1034 if (val
== (int8_t)val
) {
1035 tcg_out_opc(s
, OPC_PUSH_Ib
, 0, 0, 0);
1037 } else if (val
== (int32_t)val
) {
1038 tcg_out_opc(s
, OPC_PUSH_Iv
, 0, 0, 0);
1045 static inline void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
1047 /* Given the strength of x86 memory ordering, we only need care for
1048 store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
1049 faster than "mfence", so don't bother with the sse insn. */
1050 if (a0
& TCG_MO_ST_LD
) {
1052 tcg_out_modrm_offset(s
, OPC_ARITH_EvIb
, ARITH_OR
, TCG_REG_ESP
, 0);
1057 static inline void tcg_out_push(TCGContext
*s
, int reg
)
1059 tcg_out_opc(s
, OPC_PUSH_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
1062 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
1064 tcg_out_opc(s
, OPC_POP_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
1067 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
1068 TCGReg arg1
, intptr_t arg2
)
1073 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
, ret
, arg1
, arg2
);
1075 tcg_out_vex_modrm_offset(s
, OPC_MOVD_VyEy
, ret
, 0, arg1
, arg2
);
1080 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
| P_REXW
, ret
, arg1
, arg2
);
1085 /* There is no instruction that can validate 8-byte alignment. */
1086 tcg_debug_assert(ret
>= 16);
1087 tcg_out_vex_modrm_offset(s
, OPC_MOVQ_VqWq
, ret
, 0, arg1
, arg2
);
1091 * The gvec infrastructure is asserts that v128 vector loads
1092 * and stores use a 16-byte aligned offset. Validate that the
1093 * final pointer is aligned by using an insn that will SIGSEGV.
1095 tcg_debug_assert(ret
>= 16);
1096 tcg_out_vex_modrm_offset(s
, OPC_MOVDQA_VxWx
, ret
, 0, arg1
, arg2
);
1100 * The gvec infrastructure only requires 16-byte alignment,
1101 * so here we must use an unaligned load.
1103 tcg_debug_assert(ret
>= 16);
1104 tcg_out_vex_modrm_offset(s
, OPC_MOVDQU_VxWx
| P_VEXL
,
1105 ret
, 0, arg1
, arg2
);
1108 g_assert_not_reached();
1112 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
1113 TCGReg arg1
, intptr_t arg2
)
1118 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, arg
, arg1
, arg2
);
1120 tcg_out_vex_modrm_offset(s
, OPC_MOVD_EyVy
, arg
, 0, arg1
, arg2
);
1125 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_REXW
, arg
, arg1
, arg2
);
1130 /* There is no instruction that can validate 8-byte alignment. */
1131 tcg_debug_assert(arg
>= 16);
1132 tcg_out_vex_modrm_offset(s
, OPC_MOVQ_WqVq
, arg
, 0, arg1
, arg2
);
1136 * The gvec infrastructure is asserts that v128 vector loads
1137 * and stores use a 16-byte aligned offset. Validate that the
1138 * final pointer is aligned by using an insn that will SIGSEGV.
1140 tcg_debug_assert(arg
>= 16);
1141 tcg_out_vex_modrm_offset(s
, OPC_MOVDQA_WxVx
, arg
, 0, arg1
, arg2
);
1145 * The gvec infrastructure only requires 16-byte alignment,
1146 * so here we must use an unaligned store.
1148 tcg_debug_assert(arg
>= 16);
1149 tcg_out_vex_modrm_offset(s
, OPC_MOVDQU_WxVx
| P_VEXL
,
1150 arg
, 0, arg1
, arg2
);
1153 g_assert_not_reached();
1157 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
1158 TCGReg base
, intptr_t ofs
)
1161 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I64
) {
1162 if (val
!= (int32_t)val
) {
1166 } else if (type
!= TCG_TYPE_I32
) {
1169 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| rexw
, 0, base
, ofs
);
1174 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
1176 /* Propagate an opcode prefix, such as P_DATA16. */
1177 int ext
= subopc
& ~0x7;
1181 tcg_out_modrm(s
, OPC_SHIFT_1
+ ext
, subopc
, reg
);
1183 tcg_out_modrm(s
, OPC_SHIFT_Ib
+ ext
, subopc
, reg
);
1188 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
1190 tcg_out_opc(s
, OPC_BSWAP
+ LOWREGMASK(reg
), 0, reg
, 0);
1193 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
1195 tcg_out_shifti(s
, SHIFT_ROL
+ P_DATA16
, reg
, 8);
1198 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
1201 tcg_debug_assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
1202 tcg_out_modrm(s
, OPC_MOVZBL
+ P_REXB_RM
, dest
, src
);
1205 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
, int rexw
)
1208 tcg_debug_assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
1209 tcg_out_modrm(s
, OPC_MOVSBL
+ P_REXB_RM
+ rexw
, dest
, src
);
1212 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
1215 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
1218 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
, int rexw
)
1221 tcg_out_modrm(s
, OPC_MOVSWL
+ rexw
, dest
, src
);
1224 static inline void tcg_out_ext32u(TCGContext
*s
, int dest
, int src
)
1226 /* 32-bit mov zero extends. */
1227 tcg_out_modrm(s
, OPC_MOVL_GvEv
, dest
, src
);
1230 static inline void tcg_out_ext32s(TCGContext
*s
, int dest
, int src
)
1232 tcg_out_modrm(s
, OPC_MOVSLQ
, dest
, src
);
1235 static inline void tcg_out_bswap64(TCGContext
*s
, int reg
)
1237 tcg_out_opc(s
, OPC_BSWAP
+ P_REXW
+ LOWREGMASK(reg
), 0, reg
, 0);
1240 static void tgen_arithi(TCGContext
*s
, int c
, int r0
,
1241 tcg_target_long val
, int cf
)
1245 if (TCG_TARGET_REG_BITS
== 64) {
1250 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
1251 partial flags update stalls on Pentium4 and are not recommended
1252 by current Intel optimization manuals. */
1253 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
1254 int is_inc
= (c
== ARITH_ADD
) ^ (val
< 0);
1255 if (TCG_TARGET_REG_BITS
== 64) {
1256 /* The single-byte increment encodings are re-tasked as the
1257 REX prefixes. Use the MODRM encoding. */
1258 tcg_out_modrm(s
, OPC_GRP5
+ rexw
,
1259 (is_inc
? EXT5_INC_Ev
: EXT5_DEC_Ev
), r0
);
1261 tcg_out8(s
, (is_inc
? OPC_INC_r32
: OPC_DEC_r32
) + r0
);
1266 if (c
== ARITH_AND
) {
1267 if (TCG_TARGET_REG_BITS
== 64) {
1268 if (val
== 0xffffffffu
) {
1269 tcg_out_ext32u(s
, r0
, r0
);
1272 if (val
== (uint32_t)val
) {
1273 /* AND with no high bits set can use a 32-bit operation. */
1277 if (val
== 0xffu
&& (r0
< 4 || TCG_TARGET_REG_BITS
== 64)) {
1278 tcg_out_ext8u(s
, r0
, r0
);
1281 if (val
== 0xffffu
) {
1282 tcg_out_ext16u(s
, r0
, r0
);
1287 if (val
== (int8_t)val
) {
1288 tcg_out_modrm(s
, OPC_ARITH_EvIb
+ rexw
, c
, r0
);
1292 if (rexw
== 0 || val
== (int32_t)val
) {
1293 tcg_out_modrm(s
, OPC_ARITH_EvIz
+ rexw
, c
, r0
);
1301 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
1304 tgen_arithi(s
, ARITH_ADD
+ P_REXW
, reg
, val
, 0);
1308 /* Use SMALL != 0 to force a short forward branch. */
1309 static void tcg_out_jxx(TCGContext
*s
, int opc
, TCGLabel
*l
, int small
)
1314 val
= tcg_pcrel_diff(s
, l
->u
.value_ptr
);
1316 if ((int8_t)val1
== val1
) {
1318 tcg_out8(s
, OPC_JMP_short
);
1320 tcg_out8(s
, OPC_JCC_short
+ opc
);
1328 tcg_out8(s
, OPC_JMP_long
);
1329 tcg_out32(s
, val
- 5);
1331 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
1332 tcg_out32(s
, val
- 6);
1337 tcg_out8(s
, OPC_JMP_short
);
1339 tcg_out8(s
, OPC_JCC_short
+ opc
);
1341 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, l
, -1);
1345 tcg_out8(s
, OPC_JMP_long
);
1347 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
1349 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, l
, -4);
1354 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
1355 int const_arg2
, int rexw
)
1360 tcg_out_modrm(s
, OPC_TESTL
+ rexw
, arg1
, arg1
);
1362 tgen_arithi(s
, ARITH_CMP
+ rexw
, arg1
, arg2
, 0);
1365 tgen_arithr(s
, ARITH_CMP
+ rexw
, arg1
, arg2
);
1369 static void tcg_out_brcond32(TCGContext
*s
, TCGCond cond
,
1370 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
1371 TCGLabel
*label
, int small
)
1373 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
1374 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label
, small
);
1377 #if TCG_TARGET_REG_BITS == 64
1378 static void tcg_out_brcond64(TCGContext
*s
, TCGCond cond
,
1379 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
1380 TCGLabel
*label
, int small
)
1382 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
1383 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label
, small
);
1386 /* XXX: we implement it at the target level to avoid having to
1387 handle cross basic blocks temporaries */
1388 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
1389 const int *const_args
, int small
)
1391 TCGLabel
*label_next
= gen_new_label();
1392 TCGLabel
*label_this
= arg_label(args
[5]);
1396 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
1398 tcg_out_brcond32(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
1402 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
1404 tcg_out_brcond32(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
1408 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
1410 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1411 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
1415 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
1417 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1418 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
1422 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
1424 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1425 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
1429 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
1431 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1432 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
1436 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
1438 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1439 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
1443 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
1445 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1446 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
1450 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
1452 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1453 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
1457 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
1459 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1460 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
1466 tcg_out_label(s
, label_next
, s
->code_ptr
);
1470 static void tcg_out_setcond32(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1471 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
1473 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
1474 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
1475 tcg_out_ext8u(s
, dest
, dest
);
1478 #if TCG_TARGET_REG_BITS == 64
1479 static void tcg_out_setcond64(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1480 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
1482 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
1483 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
1484 tcg_out_ext8u(s
, dest
, dest
);
1487 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
1488 const int *const_args
)
1491 TCGLabel
*label_true
, *label_over
;
1493 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
1495 if (args
[0] == args
[1] || args
[0] == args
[2]
1496 || (!const_args
[3] && args
[0] == args
[3])
1497 || (!const_args
[4] && args
[0] == args
[4])) {
1498 /* When the destination overlaps with one of the argument
1499 registers, don't do anything tricky. */
1500 label_true
= gen_new_label();
1501 label_over
= gen_new_label();
1503 new_args
[5] = label_arg(label_true
);
1504 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
1506 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
1507 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
1508 tcg_out_label(s
, label_true
, s
->code_ptr
);
1510 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
1511 tcg_out_label(s
, label_over
, s
->code_ptr
);
1513 /* When the destination does not overlap one of the arguments,
1514 clear the destination first, jump if cond false, and emit an
1515 increment in the true case. This results in smaller code. */
1517 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
1519 label_over
= gen_new_label();
1520 new_args
[4] = tcg_invert_cond(new_args
[4]);
1521 new_args
[5] = label_arg(label_over
);
1522 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
1524 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
1525 tcg_out_label(s
, label_over
, s
->code_ptr
);
1530 static void tcg_out_cmov(TCGContext
*s
, TCGCond cond
, int rexw
,
1531 TCGReg dest
, TCGReg v1
)
1534 tcg_out_modrm(s
, OPC_CMOVCC
| tcg_cond_to_jcc
[cond
] | rexw
, dest
, v1
);
1536 TCGLabel
*over
= gen_new_label();
1537 tcg_out_jxx(s
, tcg_cond_to_jcc
[tcg_invert_cond(cond
)], over
, 1);
1538 tcg_out_mov(s
, TCG_TYPE_I32
, dest
, v1
);
1539 tcg_out_label(s
, over
, s
->code_ptr
);
1543 static void tcg_out_movcond32(TCGContext
*s
, TCGCond cond
, TCGReg dest
,
1544 TCGReg c1
, TCGArg c2
, int const_c2
,
1547 tcg_out_cmp(s
, c1
, c2
, const_c2
, 0);
1548 tcg_out_cmov(s
, cond
, 0, dest
, v1
);
1551 #if TCG_TARGET_REG_BITS == 64
1552 static void tcg_out_movcond64(TCGContext
*s
, TCGCond cond
, TCGReg dest
,
1553 TCGReg c1
, TCGArg c2
, int const_c2
,
1556 tcg_out_cmp(s
, c1
, c2
, const_c2
, P_REXW
);
1557 tcg_out_cmov(s
, cond
, P_REXW
, dest
, v1
);
1561 static void tcg_out_ctz(TCGContext
*s
, int rexw
, TCGReg dest
, TCGReg arg1
,
1562 TCGArg arg2
, bool const_a2
)
1565 tcg_out_modrm(s
, OPC_TZCNT
+ rexw
, dest
, arg1
);
1567 tcg_debug_assert(arg2
== (rexw
? 64 : 32));
1569 tcg_debug_assert(dest
!= arg2
);
1570 tcg_out_cmov(s
, TCG_COND_LTU
, rexw
, dest
, arg2
);
1573 tcg_debug_assert(dest
!= arg2
);
1574 tcg_out_modrm(s
, OPC_BSF
+ rexw
, dest
, arg1
);
1575 tcg_out_cmov(s
, TCG_COND_EQ
, rexw
, dest
, arg2
);
1579 static void tcg_out_clz(TCGContext
*s
, int rexw
, TCGReg dest
, TCGReg arg1
,
1580 TCGArg arg2
, bool const_a2
)
1583 tcg_out_modrm(s
, OPC_LZCNT
+ rexw
, dest
, arg1
);
1585 tcg_debug_assert(arg2
== (rexw
? 64 : 32));
1587 tcg_debug_assert(dest
!= arg2
);
1588 tcg_out_cmov(s
, TCG_COND_LTU
, rexw
, dest
, arg2
);
1591 tcg_debug_assert(!const_a2
);
1592 tcg_debug_assert(dest
!= arg1
);
1593 tcg_debug_assert(dest
!= arg2
);
1595 /* Recall that the output of BSR is the index not the count. */
1596 tcg_out_modrm(s
, OPC_BSR
+ rexw
, dest
, arg1
);
1597 tgen_arithi(s
, ARITH_XOR
+ rexw
, dest
, rexw
? 63 : 31, 0);
1599 /* Since we have destroyed the flags from BSR, we have to re-test. */
1600 tcg_out_cmp(s
, arg1
, 0, 1, rexw
);
1601 tcg_out_cmov(s
, TCG_COND_EQ
, rexw
, dest
, arg2
);
1605 static void tcg_out_branch(TCGContext
*s
, int call
, tcg_insn_unit
*dest
)
1607 intptr_t disp
= tcg_pcrel_diff(s
, dest
) - 5;
1609 if (disp
== (int32_t)disp
) {
1610 tcg_out_opc(s
, call
? OPC_CALL_Jz
: OPC_JMP_long
, 0, 0, 0);
1613 /* rip-relative addressing into the constant pool.
1614 This is 6 + 8 = 14 bytes, as compared to using an
1615 an immediate load 10 + 6 = 16 bytes, plus we may
1616 be able to re-use the pool constant for more calls. */
1617 tcg_out_opc(s
, OPC_GRP5
, 0, 0, 0);
1618 tcg_out8(s
, (call
? EXT5_CALLN_Ev
: EXT5_JMPN_Ev
) << 3 | 5);
1619 new_pool_label(s
, (uintptr_t)dest
, R_386_PC32
, s
->code_ptr
, -4);
1624 static inline void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*dest
)
1626 tcg_out_branch(s
, 1, dest
);
1629 static void tcg_out_jmp(TCGContext
*s
, tcg_insn_unit
*dest
)
1631 tcg_out_branch(s
, 0, dest
);
1634 static void tcg_out_nopn(TCGContext
*s
, int n
)
1637 /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1638 * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1639 * duplicate prefix, and all of the interesting recent cores can
1640 * decode and discard the duplicates in a single cycle.
1642 tcg_debug_assert(n
>= 1);
1643 for (i
= 1; i
< n
; ++i
) {
1649 #if defined(CONFIG_SOFTMMU)
1650 #include "../tcg-ldst.inc.c"
1652 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1653 * int mmu_idx, uintptr_t ra)
1655 static void * const qemu_ld_helpers
[16] = {
1656 [MO_UB
] = helper_ret_ldub_mmu
,
1657 [MO_LEUW
] = helper_le_lduw_mmu
,
1658 [MO_LEUL
] = helper_le_ldul_mmu
,
1659 [MO_LEQ
] = helper_le_ldq_mmu
,
1660 [MO_BEUW
] = helper_be_lduw_mmu
,
1661 [MO_BEUL
] = helper_be_ldul_mmu
,
1662 [MO_BEQ
] = helper_be_ldq_mmu
,
1665 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1666 * uintxx_t val, int mmu_idx, uintptr_t ra)
1668 static void * const qemu_st_helpers
[16] = {
1669 [MO_UB
] = helper_ret_stb_mmu
,
1670 [MO_LEUW
] = helper_le_stw_mmu
,
1671 [MO_LEUL
] = helper_le_stl_mmu
,
1672 [MO_LEQ
] = helper_le_stq_mmu
,
1673 [MO_BEUW
] = helper_be_stw_mmu
,
1674 [MO_BEUL
] = helper_be_stl_mmu
,
1675 [MO_BEQ
] = helper_be_stq_mmu
,
1678 /* Perform the TLB load and compare.
1681 ADDRLO and ADDRHI contain the low and high part of the address.
1683 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1685 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1686 This should be offsetof addr_read or addr_write.
1689 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1690 positions of the displacements of forward jumps to the TLB miss case.
1692 Second argument register is loaded with the low part of the address.
1693 In the TLB hit case, it has been adjusted as indicated by the TLB
1694 and so is a host address. In the TLB miss case, it continues to
1695 hold a guest address.
1697 First argument register is clobbered. */
1699 static inline void tcg_out_tlb_load(TCGContext
*s
, TCGReg addrlo
, TCGReg addrhi
,
1700 int mem_index
, MemOp opc
,
1701 tcg_insn_unit
**label_ptr
, int which
)
1703 const TCGReg r0
= TCG_REG_L0
;
1704 const TCGReg r1
= TCG_REG_L1
;
1705 TCGType ttype
= TCG_TYPE_I32
;
1706 TCGType tlbtype
= TCG_TYPE_I32
;
1707 int trexw
= 0, hrexw
= 0, tlbrexw
= 0;
1708 unsigned a_bits
= get_alignment_bits(opc
);
1709 unsigned s_bits
= opc
& MO_SIZE
;
1710 unsigned a_mask
= (1 << a_bits
) - 1;
1711 unsigned s_mask
= (1 << s_bits
) - 1;
1712 target_ulong tlb_mask
;
1714 if (TCG_TARGET_REG_BITS
== 64) {
1715 if (TARGET_LONG_BITS
== 64) {
1716 ttype
= TCG_TYPE_I64
;
1719 if (TCG_TYPE_PTR
== TCG_TYPE_I64
) {
1721 if (TARGET_PAGE_BITS
+ CPU_TLB_DYN_MAX_BITS
> 32) {
1722 tlbtype
= TCG_TYPE_I64
;
1728 tcg_out_mov(s
, tlbtype
, r0
, addrlo
);
1729 tcg_out_shifti(s
, SHIFT_SHR
+ tlbrexw
, r0
,
1730 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1732 tcg_out_modrm_offset(s
, OPC_AND_GvEv
+ trexw
, r0
, TCG_AREG0
,
1733 TLB_MASK_TABLE_OFS(mem_index
) +
1734 offsetof(CPUTLBDescFast
, mask
));
1736 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
+ hrexw
, r0
, TCG_AREG0
,
1737 TLB_MASK_TABLE_OFS(mem_index
) +
1738 offsetof(CPUTLBDescFast
, table
));
1740 /* If the required alignment is at least as large as the access, simply
1741 copy the address and mask. For lesser alignments, check that we don't
1742 cross pages for the complete access. */
1743 if (a_bits
>= s_bits
) {
1744 tcg_out_mov(s
, ttype
, r1
, addrlo
);
1746 tcg_out_modrm_offset(s
, OPC_LEA
+ trexw
, r1
, addrlo
, s_mask
- a_mask
);
1748 tlb_mask
= (target_ulong
)TARGET_PAGE_MASK
| a_mask
;
1749 tgen_arithi(s
, ARITH_AND
+ trexw
, r1
, tlb_mask
, 0);
1752 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
+ trexw
, r1
, r0
, which
);
1754 /* Prepare for both the fast path add of the tlb addend, and the slow
1755 path function argument setup. */
1756 tcg_out_mov(s
, ttype
, r1
, addrlo
);
1759 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1760 label_ptr
[0] = s
->code_ptr
;
1763 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1764 /* cmp 4(r0), addrhi */
1765 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addrhi
, r0
, which
+ 4);
1768 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1769 label_ptr
[1] = s
->code_ptr
;
1775 /* add addend(r0), r1 */
1776 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
+ hrexw
, r1
, r0
,
1777 offsetof(CPUTLBEntry
, addend
));
1781 * Record the context of a call to the out of line helper code for the slow path
1782 * for a load or store, so that we can later generate the correct helper code
1784 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, bool is_64
,
1786 TCGReg datalo
, TCGReg datahi
,
1787 TCGReg addrlo
, TCGReg addrhi
,
1788 tcg_insn_unit
*raddr
,
1789 tcg_insn_unit
**label_ptr
)
1791 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1793 label
->is_ld
= is_ld
;
1795 label
->type
= is_64
? TCG_TYPE_I64
: TCG_TYPE_I32
;
1796 label
->datalo_reg
= datalo
;
1797 label
->datahi_reg
= datahi
;
1798 label
->addrlo_reg
= addrlo
;
1799 label
->addrhi_reg
= addrhi
;
1800 label
->raddr
= raddr
;
1801 label
->label_ptr
[0] = label_ptr
[0];
1802 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1803 label
->label_ptr
[1] = label_ptr
[1];
1808 * Generate code for the slow path for a load at the end of block
1810 static bool tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1812 TCGMemOpIdx oi
= l
->oi
;
1813 MemOp opc
= get_memop(oi
);
1815 tcg_insn_unit
**label_ptr
= &l
->label_ptr
[0];
1816 int rexw
= (l
->type
== TCG_TYPE_I64
? P_REXW
: 0);
1818 /* resolve label address */
1819 tcg_patch32(label_ptr
[0], s
->code_ptr
- label_ptr
[0] - 4);
1820 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1821 tcg_patch32(label_ptr
[1], s
->code_ptr
- label_ptr
[1] - 4);
1824 if (TCG_TARGET_REG_BITS
== 32) {
1827 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
, ofs
);
1830 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrlo_reg
, TCG_REG_ESP
, ofs
);
1833 if (TARGET_LONG_BITS
== 64) {
1834 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrhi_reg
, TCG_REG_ESP
, ofs
);
1838 tcg_out_sti(s
, TCG_TYPE_I32
, oi
, TCG_REG_ESP
, ofs
);
1841 tcg_out_sti(s
, TCG_TYPE_PTR
, (uintptr_t)l
->raddr
, TCG_REG_ESP
, ofs
);
1843 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1844 /* The second argument is already loaded with addrlo. */
1845 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[2], oi
);
1846 tcg_out_movi(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[3],
1847 (uintptr_t)l
->raddr
);
1850 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1852 data_reg
= l
->datalo_reg
;
1853 switch (opc
& MO_SSIZE
) {
1855 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
, rexw
);
1858 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
, rexw
);
1860 #if TCG_TARGET_REG_BITS == 64
1862 tcg_out_ext32s(s
, data_reg
, TCG_REG_EAX
);
1867 /* Note that the helpers have zero-extended to tcg_target_long. */
1869 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1872 if (TCG_TARGET_REG_BITS
== 64) {
1873 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_RAX
);
1874 } else if (data_reg
== TCG_REG_EDX
) {
1875 /* xchg %edx, %eax */
1876 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
, 0, 0, 0);
1877 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_EAX
);
1879 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1880 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_EDX
);
1887 /* Jump to the code corresponding to next IR of qemu_st */
1888 tcg_out_jmp(s
, l
->raddr
);
1893 * Generate code for the slow path for a store at the end of block
1895 static bool tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1897 TCGMemOpIdx oi
= l
->oi
;
1898 MemOp opc
= get_memop(oi
);
1899 MemOp s_bits
= opc
& MO_SIZE
;
1900 tcg_insn_unit
**label_ptr
= &l
->label_ptr
[0];
1903 /* resolve label address */
1904 tcg_patch32(label_ptr
[0], s
->code_ptr
- label_ptr
[0] - 4);
1905 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1906 tcg_patch32(label_ptr
[1], s
->code_ptr
- label_ptr
[1] - 4);
1909 if (TCG_TARGET_REG_BITS
== 32) {
1912 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
, ofs
);
1915 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrlo_reg
, TCG_REG_ESP
, ofs
);
1918 if (TARGET_LONG_BITS
== 64) {
1919 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrhi_reg
, TCG_REG_ESP
, ofs
);
1923 tcg_out_st(s
, TCG_TYPE_I32
, l
->datalo_reg
, TCG_REG_ESP
, ofs
);
1926 if (s_bits
== MO_64
) {
1927 tcg_out_st(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_ESP
, ofs
);
1931 tcg_out_sti(s
, TCG_TYPE_I32
, oi
, TCG_REG_ESP
, ofs
);
1934 retaddr
= TCG_REG_EAX
;
1935 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1936 tcg_out_st(s
, TCG_TYPE_PTR
, retaddr
, TCG_REG_ESP
, ofs
);
1938 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1939 /* The second argument is already loaded with addrlo. */
1940 tcg_out_mov(s
, (s_bits
== MO_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1941 tcg_target_call_iarg_regs
[2], l
->datalo_reg
);
1942 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[3], oi
);
1944 if (ARRAY_SIZE(tcg_target_call_iarg_regs
) > 4) {
1945 retaddr
= tcg_target_call_iarg_regs
[4];
1946 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1948 retaddr
= TCG_REG_RAX
;
1949 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1950 tcg_out_st(s
, TCG_TYPE_PTR
, retaddr
, TCG_REG_ESP
,
1951 TCG_TARGET_CALL_STACK_OFFSET
);
1955 /* "Tail call" to the helper, with the return address back inline. */
1956 tcg_out_push(s
, retaddr
);
1957 tcg_out_jmp(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1960 #elif TCG_TARGET_REG_BITS == 32
1961 # define x86_guest_base_seg 0
1962 # define x86_guest_base_index -1
1963 # define x86_guest_base_offset guest_base
1965 static int x86_guest_base_seg
;
1966 static int x86_guest_base_index
= -1;
1967 static int32_t x86_guest_base_offset
;
1968 # if defined(__x86_64__) && defined(__linux__)
1969 # include <asm/prctl.h>
1970 # include <sys/prctl.h>
1971 int arch_prctl(int code
, unsigned long addr
);
1972 static inline int setup_guest_base_seg(void)
1974 if (arch_prctl(ARCH_SET_GS
, guest_base
) == 0) {
1979 # elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
1980 # include <machine/sysarch.h>
1981 static inline int setup_guest_base_seg(void)
1983 if (sysarch(AMD64_SET_GSBASE
, &guest_base
) == 0) {
1989 static inline int setup_guest_base_seg(void)
1994 #endif /* SOFTMMU */
1996 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1997 TCGReg base
, int index
, intptr_t ofs
,
1998 int seg
, bool is64
, MemOp memop
)
2000 const MemOp real_bswap
= memop
& MO_BSWAP
;
2001 MemOp bswap
= real_bswap
;
2002 int rexw
= is64
* P_REXW
;
2003 int movop
= OPC_MOVL_GvEv
;
2005 if (have_movbe
&& real_bswap
) {
2007 movop
= OPC_MOVBE_GyMy
;
2010 switch (memop
& MO_SSIZE
) {
2012 tcg_out_modrm_sib_offset(s
, OPC_MOVZBL
+ seg
, datalo
,
2013 base
, index
, 0, ofs
);
2016 tcg_out_modrm_sib_offset(s
, OPC_MOVSBL
+ rexw
+ seg
, datalo
,
2017 base
, index
, 0, ofs
);
2020 tcg_out_modrm_sib_offset(s
, OPC_MOVZWL
+ seg
, datalo
,
2021 base
, index
, 0, ofs
);
2023 tcg_out_rolw_8(s
, datalo
);
2029 tcg_out_modrm_sib_offset(s
, OPC_MOVBE_GyMy
+ P_DATA16
+ seg
,
2030 datalo
, base
, index
, 0, ofs
);
2032 tcg_out_modrm_sib_offset(s
, OPC_MOVZWL
+ seg
, datalo
,
2033 base
, index
, 0, ofs
);
2034 tcg_out_rolw_8(s
, datalo
);
2036 tcg_out_modrm(s
, OPC_MOVSWL
+ rexw
, datalo
, datalo
);
2038 tcg_out_modrm_sib_offset(s
, OPC_MOVSWL
+ rexw
+ seg
,
2039 datalo
, base
, index
, 0, ofs
);
2043 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
, base
, index
, 0, ofs
);
2045 tcg_out_bswap32(s
, datalo
);
2048 #if TCG_TARGET_REG_BITS == 64
2051 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
2052 base
, index
, 0, ofs
);
2054 tcg_out_bswap32(s
, datalo
);
2056 tcg_out_ext32s(s
, datalo
, datalo
);
2058 tcg_out_modrm_sib_offset(s
, OPC_MOVSLQ
+ seg
, datalo
,
2059 base
, index
, 0, ofs
);
2064 if (TCG_TARGET_REG_BITS
== 64) {
2065 tcg_out_modrm_sib_offset(s
, movop
+ P_REXW
+ seg
, datalo
,
2066 base
, index
, 0, ofs
);
2068 tcg_out_bswap64(s
, datalo
);
2076 if (base
!= datalo
) {
2077 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
2078 base
, index
, 0, ofs
);
2079 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datahi
,
2080 base
, index
, 0, ofs
+ 4);
2082 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datahi
,
2083 base
, index
, 0, ofs
+ 4);
2084 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
2085 base
, index
, 0, ofs
);
2088 tcg_out_bswap32(s
, datalo
);
2089 tcg_out_bswap32(s
, datahi
);
2098 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
2099 EAX. It will be useful once fixed registers globals are less
2101 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is64
)
2103 TCGReg datalo
, datahi
, addrlo
;
2104 TCGReg addrhi
__attribute__((unused
));
2107 #if defined(CONFIG_SOFTMMU)
2109 tcg_insn_unit
*label_ptr
[2];
2113 datahi
= (TCG_TARGET_REG_BITS
== 32 && is64
? *args
++ : 0);
2115 addrhi
= (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
? *args
++ : 0);
2117 opc
= get_memop(oi
);
2119 #if defined(CONFIG_SOFTMMU)
2120 mem_index
= get_mmuidx(oi
);
2122 tcg_out_tlb_load(s
, addrlo
, addrhi
, mem_index
, opc
,
2123 label_ptr
, offsetof(CPUTLBEntry
, addr_read
));
2126 tcg_out_qemu_ld_direct(s
, datalo
, datahi
, TCG_REG_L1
, -1, 0, 0, is64
, opc
);
2128 /* Record the current context of a load into ldst label */
2129 add_qemu_ldst_label(s
, true, is64
, oi
, datalo
, datahi
, addrlo
, addrhi
,
2130 s
->code_ptr
, label_ptr
);
2132 tcg_out_qemu_ld_direct(s
, datalo
, datahi
, addrlo
, x86_guest_base_index
,
2133 x86_guest_base_offset
, x86_guest_base_seg
,
2138 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
2139 TCGReg base
, int index
, intptr_t ofs
,
2140 int seg
, MemOp memop
)
2142 /* ??? Ideally we wouldn't need a scratch register. For user-only,
2143 we could perform the bswap twice to restore the original value
2144 instead of moving to the scratch. But as it is, the L constraint
2145 means that TCG_REG_L0 is definitely free here. */
2146 const TCGReg scratch
= TCG_REG_L0
;
2147 const MemOp real_bswap
= memop
& MO_BSWAP
;
2148 MemOp bswap
= real_bswap
;
2149 int movop
= OPC_MOVL_EvGv
;
2151 if (have_movbe
&& real_bswap
) {
2153 movop
= OPC_MOVBE_MyGy
;
2156 switch (memop
& MO_SIZE
) {
2158 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
2159 Use the scratch register if necessary. */
2160 if (TCG_TARGET_REG_BITS
== 32 && datalo
>= 4) {
2161 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2164 tcg_out_modrm_sib_offset(s
, OPC_MOVB_EvGv
+ P_REXB_R
+ seg
,
2165 datalo
, base
, index
, 0, ofs
);
2169 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2170 tcg_out_rolw_8(s
, scratch
);
2173 tcg_out_modrm_sib_offset(s
, movop
+ P_DATA16
+ seg
, datalo
,
2174 base
, index
, 0, ofs
);
2178 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2179 tcg_out_bswap32(s
, scratch
);
2182 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
, base
, index
, 0, ofs
);
2185 if (TCG_TARGET_REG_BITS
== 64) {
2187 tcg_out_mov(s
, TCG_TYPE_I64
, scratch
, datalo
);
2188 tcg_out_bswap64(s
, scratch
);
2191 tcg_out_modrm_sib_offset(s
, movop
+ P_REXW
+ seg
, datalo
,
2192 base
, index
, 0, ofs
);
2194 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datahi
);
2195 tcg_out_bswap32(s
, scratch
);
2196 tcg_out_modrm_sib_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
,
2197 base
, index
, 0, ofs
);
2198 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2199 tcg_out_bswap32(s
, scratch
);
2200 tcg_out_modrm_sib_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
,
2201 base
, index
, 0, ofs
+ 4);
2208 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
2209 base
, index
, 0, ofs
);
2210 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datahi
,
2211 base
, index
, 0, ofs
+ 4);
2219 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is64
)
2221 TCGReg datalo
, datahi
, addrlo
;
2222 TCGReg addrhi
__attribute__((unused
));
2225 #if defined(CONFIG_SOFTMMU)
2227 tcg_insn_unit
*label_ptr
[2];
2231 datahi
= (TCG_TARGET_REG_BITS
== 32 && is64
? *args
++ : 0);
2233 addrhi
= (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
? *args
++ : 0);
2235 opc
= get_memop(oi
);
2237 #if defined(CONFIG_SOFTMMU)
2238 mem_index
= get_mmuidx(oi
);
2240 tcg_out_tlb_load(s
, addrlo
, addrhi
, mem_index
, opc
,
2241 label_ptr
, offsetof(CPUTLBEntry
, addr_write
));
2244 tcg_out_qemu_st_direct(s
, datalo
, datahi
, TCG_REG_L1
, -1, 0, 0, opc
);
2246 /* Record the current context of a store into ldst label */
2247 add_qemu_ldst_label(s
, false, is64
, oi
, datalo
, datahi
, addrlo
, addrhi
,
2248 s
->code_ptr
, label_ptr
);
2250 tcg_out_qemu_st_direct(s
, datalo
, datahi
, addrlo
, x86_guest_base_index
,
2251 x86_guest_base_offset
, x86_guest_base_seg
, opc
);
2255 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
2256 const TCGArg
*args
, const int *const_args
)
2259 int c
, const_a2
, vexop
, rexw
= 0;
2261 #if TCG_TARGET_REG_BITS == 64
2262 # define OP_32_64(x) \
2263 case glue(glue(INDEX_op_, x), _i64): \
2264 rexw = P_REXW; /* FALLTHRU */ \
2265 case glue(glue(INDEX_op_, x), _i32)
2267 # define OP_32_64(x) \
2268 case glue(glue(INDEX_op_, x), _i32)
2271 /* Hoist the loads of the most common arguments. */
2275 const_a2
= const_args
[2];
2278 case INDEX_op_exit_tb
:
2279 /* Reuse the zeroing that exists for goto_ptr. */
2281 tcg_out_jmp(s
, s
->code_gen_epilogue
);
2283 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_EAX
, a0
);
2284 tcg_out_jmp(s
, tb_ret_addr
);
2287 case INDEX_op_goto_tb
:
2288 if (s
->tb_jmp_insn_offset
) {
2289 /* direct jump method */
2291 /* jump displacement must be aligned for atomic patching;
2292 * see if we need to add extra nops before jump
2294 gap
= tcg_pcrel_diff(s
, QEMU_ALIGN_PTR_UP(s
->code_ptr
+ 1, 4));
2296 tcg_out_nopn(s
, gap
- 1);
2298 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
2299 s
->tb_jmp_insn_offset
[a0
] = tcg_current_code_size(s
);
2302 /* indirect jump method */
2303 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
2304 (intptr_t)(s
->tb_jmp_target_addr
+ a0
));
2306 set_jmp_reset_offset(s
, a0
);
2308 case INDEX_op_goto_ptr
:
2309 /* jmp to the given host address (could be epilogue) */
2310 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, a0
);
2313 tcg_out_jxx(s
, JCC_JMP
, arg_label(a0
), 0);
2316 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2317 tcg_out_modrm_offset(s
, OPC_MOVZBL
, a0
, a1
, a2
);
2320 tcg_out_modrm_offset(s
, OPC_MOVSBL
+ rexw
, a0
, a1
, a2
);
2323 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2324 tcg_out_modrm_offset(s
, OPC_MOVZWL
, a0
, a1
, a2
);
2327 tcg_out_modrm_offset(s
, OPC_MOVSWL
+ rexw
, a0
, a1
, a2
);
2329 #if TCG_TARGET_REG_BITS == 64
2330 case INDEX_op_ld32u_i64
:
2332 case INDEX_op_ld_i32
:
2333 tcg_out_ld(s
, TCG_TYPE_I32
, a0
, a1
, a2
);
2337 if (const_args
[0]) {
2338 tcg_out_modrm_offset(s
, OPC_MOVB_EvIz
, 0, a1
, a2
);
2341 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
| P_REXB_R
, a0
, a1
, a2
);
2345 if (const_args
[0]) {
2346 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_DATA16
, 0, a1
, a2
);
2349 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_DATA16
, a0
, a1
, a2
);
2352 #if TCG_TARGET_REG_BITS == 64
2353 case INDEX_op_st32_i64
:
2355 case INDEX_op_st_i32
:
2356 if (const_args
[0]) {
2357 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
, 0, a1
, a2
);
2360 tcg_out_st(s
, TCG_TYPE_I32
, a0
, a1
, a2
);
2365 /* For 3-operand addition, use LEA. */
2370 } else if (a0
== a2
) {
2371 /* Watch out for dest = src + dest, since we've removed
2372 the matching constraint on the add. */
2373 tgen_arithr(s
, ARITH_ADD
+ rexw
, a0
, a1
);
2377 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, a1
, a2
, 0, c3
);
2396 tgen_arithi(s
, c
+ rexw
, a0
, a2
, 0);
2398 tgen_arithr(s
, c
+ rexw
, a0
, a2
);
2404 tcg_out_mov(s
, rexw
? TCG_TYPE_I64
: TCG_TYPE_I32
, a0
, a1
);
2405 tgen_arithi(s
, ARITH_AND
+ rexw
, a0
, ~a2
, 0);
2407 tcg_out_vex_modrm(s
, OPC_ANDN
+ rexw
, a0
, a2
, a1
);
2415 if (val
== (int8_t)val
) {
2416 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
+ rexw
, a0
, a0
);
2419 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
+ rexw
, a0
, a0
);
2423 tcg_out_modrm(s
, OPC_IMUL_GvEv
+ rexw
, a0
, a2
);
2428 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IDIV
, args
[4]);
2431 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_DIV
, args
[4]);
2435 /* For small constant 3-operand shift, use LEA. */
2436 if (const_a2
&& a0
!= a1
&& (a2
- 1) < 3) {
2438 /* shl $1,a1,a0 -> lea (a1,a1),a0 */
2439 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, a1
, a1
, 0, 0);
2441 /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
2442 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, -1, a1
, a2
, 0);
2448 goto gen_shift_maybe_vex
;
2452 goto gen_shift_maybe_vex
;
2456 goto gen_shift_maybe_vex
;
2463 gen_shift_maybe_vex
:
2466 tcg_out_vex_modrm(s
, vexop
+ rexw
, a0
, a2
, a1
);
2469 tcg_out_mov(s
, rexw
? TCG_TYPE_I64
: TCG_TYPE_I32
, a0
, a1
);
2474 tcg_out_shifti(s
, c
+ rexw
, a0
, a2
);
2476 tcg_out_modrm(s
, OPC_SHIFT_cl
+ rexw
, c
, a0
);
2481 tcg_out_ctz(s
, rexw
, args
[0], args
[1], args
[2], const_args
[2]);
2484 tcg_out_clz(s
, rexw
, args
[0], args
[1], args
[2], const_args
[2]);
2487 tcg_out_modrm(s
, OPC_POPCNT
+ rexw
, a0
, a1
);
2490 case INDEX_op_brcond_i32
:
2491 tcg_out_brcond32(s
, a2
, a0
, a1
, const_args
[1], arg_label(args
[3]), 0);
2493 case INDEX_op_setcond_i32
:
2494 tcg_out_setcond32(s
, args
[3], a0
, a1
, a2
, const_a2
);
2496 case INDEX_op_movcond_i32
:
2497 tcg_out_movcond32(s
, args
[5], a0
, a1
, a2
, const_a2
, args
[3]);
2501 tcg_out_rolw_8(s
, a0
);
2504 tcg_out_bswap32(s
, a0
);
2508 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NEG
, a0
);
2511 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NOT
, a0
);
2515 tcg_out_ext8s(s
, a0
, a1
, rexw
);
2518 tcg_out_ext16s(s
, a0
, a1
, rexw
);
2521 tcg_out_ext8u(s
, a0
, a1
);
2524 tcg_out_ext16u(s
, a0
, a1
);
2527 case INDEX_op_qemu_ld_i32
:
2528 tcg_out_qemu_ld(s
, args
, 0);
2530 case INDEX_op_qemu_ld_i64
:
2531 tcg_out_qemu_ld(s
, args
, 1);
2533 case INDEX_op_qemu_st_i32
:
2534 tcg_out_qemu_st(s
, args
, 0);
2536 case INDEX_op_qemu_st_i64
:
2537 tcg_out_qemu_st(s
, args
, 1);
2541 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_MUL
, args
[3]);
2544 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IMUL
, args
[3]);
2547 if (const_args
[4]) {
2548 tgen_arithi(s
, ARITH_ADD
+ rexw
, a0
, args
[4], 1);
2550 tgen_arithr(s
, ARITH_ADD
+ rexw
, a0
, args
[4]);
2552 if (const_args
[5]) {
2553 tgen_arithi(s
, ARITH_ADC
+ rexw
, a1
, args
[5], 1);
2555 tgen_arithr(s
, ARITH_ADC
+ rexw
, a1
, args
[5]);
2559 if (const_args
[4]) {
2560 tgen_arithi(s
, ARITH_SUB
+ rexw
, a0
, args
[4], 1);
2562 tgen_arithr(s
, ARITH_SUB
+ rexw
, a0
, args
[4]);
2564 if (const_args
[5]) {
2565 tgen_arithi(s
, ARITH_SBB
+ rexw
, a1
, args
[5], 1);
2567 tgen_arithr(s
, ARITH_SBB
+ rexw
, a1
, args
[5]);
2571 #if TCG_TARGET_REG_BITS == 32
2572 case INDEX_op_brcond2_i32
:
2573 tcg_out_brcond2(s
, args
, const_args
, 0);
2575 case INDEX_op_setcond2_i32
:
2576 tcg_out_setcond2(s
, args
, const_args
);
2578 #else /* TCG_TARGET_REG_BITS == 64 */
2579 case INDEX_op_ld32s_i64
:
2580 tcg_out_modrm_offset(s
, OPC_MOVSLQ
, a0
, a1
, a2
);
2582 case INDEX_op_ld_i64
:
2583 tcg_out_ld(s
, TCG_TYPE_I64
, a0
, a1
, a2
);
2585 case INDEX_op_st_i64
:
2586 if (const_args
[0]) {
2587 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_REXW
, 0, a1
, a2
);
2590 tcg_out_st(s
, TCG_TYPE_I64
, a0
, a1
, a2
);
2594 case INDEX_op_brcond_i64
:
2595 tcg_out_brcond64(s
, a2
, a0
, a1
, const_args
[1], arg_label(args
[3]), 0);
2597 case INDEX_op_setcond_i64
:
2598 tcg_out_setcond64(s
, args
[3], a0
, a1
, a2
, const_a2
);
2600 case INDEX_op_movcond_i64
:
2601 tcg_out_movcond64(s
, args
[5], a0
, a1
, a2
, const_a2
, args
[3]);
2604 case INDEX_op_bswap64_i64
:
2605 tcg_out_bswap64(s
, a0
);
2607 case INDEX_op_extu_i32_i64
:
2608 case INDEX_op_ext32u_i64
:
2609 case INDEX_op_extrl_i64_i32
:
2610 tcg_out_ext32u(s
, a0
, a1
);
2612 case INDEX_op_ext_i32_i64
:
2613 case INDEX_op_ext32s_i64
:
2614 tcg_out_ext32s(s
, a0
, a1
);
2616 case INDEX_op_extrh_i64_i32
:
2617 tcg_out_shifti(s
, SHIFT_SHR
+ P_REXW
, a0
, 32);
2622 if (args
[3] == 0 && args
[4] == 8) {
2623 /* load bits 0..7 */
2624 tcg_out_modrm(s
, OPC_MOVB_EvGv
| P_REXB_R
| P_REXB_RM
, a2
, a0
);
2625 } else if (args
[3] == 8 && args
[4] == 8) {
2626 /* load bits 8..15 */
2627 tcg_out_modrm(s
, OPC_MOVB_EvGv
, a2
, a0
+ 4);
2628 } else if (args
[3] == 0 && args
[4] == 16) {
2629 /* load bits 0..15 */
2630 tcg_out_modrm(s
, OPC_MOVL_EvGv
| P_DATA16
, a2
, a0
);
2636 case INDEX_op_extract_i64
:
2637 if (a2
+ args
[3] == 32) {
2638 /* This is a 32-bit zero-extending right shift. */
2639 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
2640 tcg_out_shifti(s
, SHIFT_SHR
, a0
, a2
);
2644 case INDEX_op_extract_i32
:
2645 /* On the off-chance that we can use the high-byte registers.
2646 Otherwise we emit the same ext16 + shift pattern that we
2647 would have gotten from the normal tcg-op.c expansion. */
2648 tcg_debug_assert(a2
== 8 && args
[3] == 8);
2649 if (a1
< 4 && a0
< 8) {
2650 tcg_out_modrm(s
, OPC_MOVZBL
, a0
, a1
+ 4);
2652 tcg_out_ext16u(s
, a0
, a1
);
2653 tcg_out_shifti(s
, SHIFT_SHR
, a0
, 8);
2657 case INDEX_op_sextract_i32
:
2658 /* We don't implement sextract_i64, as we cannot sign-extend to
2659 64-bits without using the REX prefix that explicitly excludes
2660 access to the high-byte registers. */
2661 tcg_debug_assert(a2
== 8 && args
[3] == 8);
2662 if (a1
< 4 && a0
< 8) {
2663 tcg_out_modrm(s
, OPC_MOVSBL
, a0
, a1
+ 4);
2665 tcg_out_ext16s(s
, a0
, a1
, 0);
2666 tcg_out_shifti(s
, SHIFT_SAR
, a0
, 8);
2671 /* Note that SHRD outputs to the r/m operand. */
2672 tcg_out_modrm(s
, OPC_SHRD_Ib
+ rexw
, a2
, a0
);
2673 tcg_out8(s
, args
[3]);
2679 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2680 case INDEX_op_mov_i64
:
2681 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2682 case INDEX_op_movi_i64
:
2683 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2691 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
2692 unsigned vecl
, unsigned vece
,
2693 const TCGArg
*args
, const int *const_args
)
2695 static int const add_insn
[4] = {
2696 OPC_PADDB
, OPC_PADDW
, OPC_PADDD
, OPC_PADDQ
2698 static int const ssadd_insn
[4] = {
2699 OPC_PADDSB
, OPC_PADDSW
, OPC_UD2
, OPC_UD2
2701 static int const usadd_insn
[4] = {
2702 OPC_PADDUB
, OPC_PADDUW
, OPC_UD2
, OPC_UD2
2704 static int const sub_insn
[4] = {
2705 OPC_PSUBB
, OPC_PSUBW
, OPC_PSUBD
, OPC_PSUBQ
2707 static int const sssub_insn
[4] = {
2708 OPC_PSUBSB
, OPC_PSUBSW
, OPC_UD2
, OPC_UD2
2710 static int const ussub_insn
[4] = {
2711 OPC_PSUBUB
, OPC_PSUBUW
, OPC_UD2
, OPC_UD2
2713 static int const mul_insn
[4] = {
2714 OPC_UD2
, OPC_PMULLW
, OPC_PMULLD
, OPC_UD2
2716 static int const shift_imm_insn
[4] = {
2717 OPC_UD2
, OPC_PSHIFTW_Ib
, OPC_PSHIFTD_Ib
, OPC_PSHIFTQ_Ib
2719 static int const cmpeq_insn
[4] = {
2720 OPC_PCMPEQB
, OPC_PCMPEQW
, OPC_PCMPEQD
, OPC_PCMPEQQ
2722 static int const cmpgt_insn
[4] = {
2723 OPC_PCMPGTB
, OPC_PCMPGTW
, OPC_PCMPGTD
, OPC_PCMPGTQ
2725 static int const punpckl_insn
[4] = {
2726 OPC_PUNPCKLBW
, OPC_PUNPCKLWD
, OPC_PUNPCKLDQ
, OPC_PUNPCKLQDQ
2728 static int const punpckh_insn
[4] = {
2729 OPC_PUNPCKHBW
, OPC_PUNPCKHWD
, OPC_PUNPCKHDQ
, OPC_PUNPCKHQDQ
2731 static int const packss_insn
[4] = {
2732 OPC_PACKSSWB
, OPC_PACKSSDW
, OPC_UD2
, OPC_UD2
2734 static int const packus_insn
[4] = {
2735 OPC_PACKUSWB
, OPC_PACKUSDW
, OPC_UD2
, OPC_UD2
2737 static int const smin_insn
[4] = {
2738 OPC_PMINSB
, OPC_PMINSW
, OPC_PMINSD
, OPC_UD2
2740 static int const smax_insn
[4] = {
2741 OPC_PMAXSB
, OPC_PMAXSW
, OPC_PMAXSD
, OPC_UD2
2743 static int const umin_insn
[4] = {
2744 OPC_PMINUB
, OPC_PMINUW
, OPC_PMINUD
, OPC_UD2
2746 static int const umax_insn
[4] = {
2747 OPC_PMAXUB
, OPC_PMAXUW
, OPC_PMAXUD
, OPC_UD2
2749 static int const shlv_insn
[4] = {
2750 /* TODO: AVX512 adds support for MO_16. */
2751 OPC_UD2
, OPC_UD2
, OPC_VPSLLVD
, OPC_VPSLLVQ
2753 static int const shrv_insn
[4] = {
2754 /* TODO: AVX512 adds support for MO_16. */
2755 OPC_UD2
, OPC_UD2
, OPC_VPSRLVD
, OPC_VPSRLVQ
2757 static int const sarv_insn
[4] = {
2758 /* TODO: AVX512 adds support for MO_16, MO_64. */
2759 OPC_UD2
, OPC_UD2
, OPC_VPSRAVD
, OPC_UD2
2761 static int const shls_insn
[4] = {
2762 OPC_UD2
, OPC_PSLLW
, OPC_PSLLD
, OPC_PSLLQ
2764 static int const shrs_insn
[4] = {
2765 OPC_UD2
, OPC_PSRLW
, OPC_PSRLD
, OPC_PSRLQ
2767 static int const sars_insn
[4] = {
2768 OPC_UD2
, OPC_PSRAW
, OPC_PSRAD
, OPC_UD2
2770 static int const abs_insn
[4] = {
2771 /* TODO: AVX512 adds support for MO_64. */
2772 OPC_PABSB
, OPC_PABSW
, OPC_PABSD
, OPC_UD2
2775 TCGType type
= vecl
+ TCG_TYPE_V64
;
2784 case INDEX_op_add_vec
:
2785 insn
= add_insn
[vece
];
2787 case INDEX_op_ssadd_vec
:
2788 insn
= ssadd_insn
[vece
];
2790 case INDEX_op_usadd_vec
:
2791 insn
= usadd_insn
[vece
];
2793 case INDEX_op_sub_vec
:
2794 insn
= sub_insn
[vece
];
2796 case INDEX_op_sssub_vec
:
2797 insn
= sssub_insn
[vece
];
2799 case INDEX_op_ussub_vec
:
2800 insn
= ussub_insn
[vece
];
2802 case INDEX_op_mul_vec
:
2803 insn
= mul_insn
[vece
];
2805 case INDEX_op_and_vec
:
2808 case INDEX_op_or_vec
:
2811 case INDEX_op_xor_vec
:
2814 case INDEX_op_smin_vec
:
2815 insn
= smin_insn
[vece
];
2817 case INDEX_op_umin_vec
:
2818 insn
= umin_insn
[vece
];
2820 case INDEX_op_smax_vec
:
2821 insn
= smax_insn
[vece
];
2823 case INDEX_op_umax_vec
:
2824 insn
= umax_insn
[vece
];
2826 case INDEX_op_shlv_vec
:
2827 insn
= shlv_insn
[vece
];
2829 case INDEX_op_shrv_vec
:
2830 insn
= shrv_insn
[vece
];
2832 case INDEX_op_sarv_vec
:
2833 insn
= sarv_insn
[vece
];
2835 case INDEX_op_shls_vec
:
2836 insn
= shls_insn
[vece
];
2838 case INDEX_op_shrs_vec
:
2839 insn
= shrs_insn
[vece
];
2841 case INDEX_op_sars_vec
:
2842 insn
= sars_insn
[vece
];
2844 case INDEX_op_x86_punpckl_vec
:
2845 insn
= punpckl_insn
[vece
];
2847 case INDEX_op_x86_punpckh_vec
:
2848 insn
= punpckh_insn
[vece
];
2850 case INDEX_op_x86_packss_vec
:
2851 insn
= packss_insn
[vece
];
2853 case INDEX_op_x86_packus_vec
:
2854 insn
= packus_insn
[vece
];
2856 #if TCG_TARGET_REG_BITS == 32
2857 case INDEX_op_dup2_vec
:
2858 /* First merge the two 32-bit inputs to a single 64-bit element. */
2859 tcg_out_vex_modrm(s
, OPC_PUNPCKLDQ
, a0
, a1
, a2
);
2860 /* Then replicate the 64-bit elements across the rest of the vector. */
2861 if (type
!= TCG_TYPE_V64
) {
2862 tcg_out_dup_vec(s
, type
, MO_64
, a0
, a0
);
2866 case INDEX_op_abs_vec
:
2867 insn
= abs_insn
[vece
];
2872 tcg_debug_assert(insn
!= OPC_UD2
);
2873 if (type
== TCG_TYPE_V256
) {
2876 tcg_out_vex_modrm(s
, insn
, a0
, a1
, a2
);
2879 case INDEX_op_cmp_vec
:
2881 if (sub
== TCG_COND_EQ
) {
2882 insn
= cmpeq_insn
[vece
];
2883 } else if (sub
== TCG_COND_GT
) {
2884 insn
= cmpgt_insn
[vece
];
2886 g_assert_not_reached();
2890 case INDEX_op_andc_vec
:
2892 if (type
== TCG_TYPE_V256
) {
2895 tcg_out_vex_modrm(s
, insn
, a0
, a2
, a1
);
2898 case INDEX_op_shli_vec
:
2901 case INDEX_op_shri_vec
:
2904 case INDEX_op_sari_vec
:
2905 tcg_debug_assert(vece
!= MO_64
);
2908 tcg_debug_assert(vece
!= MO_8
);
2909 insn
= shift_imm_insn
[vece
];
2910 if (type
== TCG_TYPE_V256
) {
2913 tcg_out_vex_modrm(s
, insn
, sub
, a0
, a1
);
2917 case INDEX_op_ld_vec
:
2918 tcg_out_ld(s
, type
, a0
, a1
, a2
);
2920 case INDEX_op_st_vec
:
2921 tcg_out_st(s
, type
, a0
, a1
, a2
);
2923 case INDEX_op_dupm_vec
:
2924 tcg_out_dupm_vec(s
, type
, vece
, a0
, a1
, a2
);
2927 case INDEX_op_x86_shufps_vec
:
2931 case INDEX_op_x86_blend_vec
:
2932 if (vece
== MO_16
) {
2934 } else if (vece
== MO_32
) {
2935 insn
= (have_avx2
? OPC_VPBLENDD
: OPC_BLENDPS
);
2937 g_assert_not_reached();
2941 case INDEX_op_x86_vperm2i128_vec
:
2942 insn
= OPC_VPERM2I128
;
2946 if (type
== TCG_TYPE_V256
) {
2949 tcg_out_vex_modrm(s
, insn
, a0
, a1
, a2
);
2953 case INDEX_op_x86_vpblendvb_vec
:
2954 insn
= OPC_VPBLENDVB
;
2955 if (type
== TCG_TYPE_V256
) {
2958 tcg_out_vex_modrm(s
, insn
, a0
, a1
, a2
);
2959 tcg_out8(s
, args
[3] << 4);
2962 case INDEX_op_x86_psrldq_vec
:
2963 tcg_out_vex_modrm(s
, OPC_GRP14
, 3, a0
, a1
);
2967 case INDEX_op_mov_vec
: /* Always emitted via tcg_out_mov. */
2968 case INDEX_op_dupi_vec
: /* Always emitted via tcg_out_movi. */
2969 case INDEX_op_dup_vec
: /* Always emitted via tcg_out_dup_vec. */
2971 g_assert_not_reached();
2975 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2977 static const TCGTargetOpDef r
= { .args_ct_str
= { "r" } };
2978 static const TCGTargetOpDef ri_r
= { .args_ct_str
= { "ri", "r" } };
2979 static const TCGTargetOpDef re_r
= { .args_ct_str
= { "re", "r" } };
2980 static const TCGTargetOpDef qi_r
= { .args_ct_str
= { "qi", "r" } };
2981 static const TCGTargetOpDef r_r
= { .args_ct_str
= { "r", "r" } };
2982 static const TCGTargetOpDef r_q
= { .args_ct_str
= { "r", "q" } };
2983 static const TCGTargetOpDef r_re
= { .args_ct_str
= { "r", "re" } };
2984 static const TCGTargetOpDef r_0
= { .args_ct_str
= { "r", "0" } };
2985 static const TCGTargetOpDef r_r_ri
= { .args_ct_str
= { "r", "r", "ri" } };
2986 static const TCGTargetOpDef r_r_re
= { .args_ct_str
= { "r", "r", "re" } };
2987 static const TCGTargetOpDef r_0_r
= { .args_ct_str
= { "r", "0", "r" } };
2988 static const TCGTargetOpDef r_0_re
= { .args_ct_str
= { "r", "0", "re" } };
2989 static const TCGTargetOpDef r_0_ci
= { .args_ct_str
= { "r", "0", "ci" } };
2990 static const TCGTargetOpDef r_L
= { .args_ct_str
= { "r", "L" } };
2991 static const TCGTargetOpDef L_L
= { .args_ct_str
= { "L", "L" } };
2992 static const TCGTargetOpDef r_L_L
= { .args_ct_str
= { "r", "L", "L" } };
2993 static const TCGTargetOpDef r_r_L
= { .args_ct_str
= { "r", "r", "L" } };
2994 static const TCGTargetOpDef L_L_L
= { .args_ct_str
= { "L", "L", "L" } };
2995 static const TCGTargetOpDef r_r_L_L
2996 = { .args_ct_str
= { "r", "r", "L", "L" } };
2997 static const TCGTargetOpDef L_L_L_L
2998 = { .args_ct_str
= { "L", "L", "L", "L" } };
2999 static const TCGTargetOpDef x_x
= { .args_ct_str
= { "x", "x" } };
3000 static const TCGTargetOpDef x_x_x
= { .args_ct_str
= { "x", "x", "x" } };
3001 static const TCGTargetOpDef x_x_x_x
3002 = { .args_ct_str
= { "x", "x", "x", "x" } };
3003 static const TCGTargetOpDef x_r
= { .args_ct_str
= { "x", "r" } };
3006 case INDEX_op_goto_ptr
:
3009 case INDEX_op_ld8u_i32
:
3010 case INDEX_op_ld8u_i64
:
3011 case INDEX_op_ld8s_i32
:
3012 case INDEX_op_ld8s_i64
:
3013 case INDEX_op_ld16u_i32
:
3014 case INDEX_op_ld16u_i64
:
3015 case INDEX_op_ld16s_i32
:
3016 case INDEX_op_ld16s_i64
:
3017 case INDEX_op_ld_i32
:
3018 case INDEX_op_ld32u_i64
:
3019 case INDEX_op_ld32s_i64
:
3020 case INDEX_op_ld_i64
:
3023 case INDEX_op_st8_i32
:
3024 case INDEX_op_st8_i64
:
3026 case INDEX_op_st16_i32
:
3027 case INDEX_op_st16_i64
:
3028 case INDEX_op_st_i32
:
3029 case INDEX_op_st32_i64
:
3031 case INDEX_op_st_i64
:
3034 case INDEX_op_add_i32
:
3035 case INDEX_op_add_i64
:
3037 case INDEX_op_sub_i32
:
3038 case INDEX_op_sub_i64
:
3039 case INDEX_op_mul_i32
:
3040 case INDEX_op_mul_i64
:
3041 case INDEX_op_or_i32
:
3042 case INDEX_op_or_i64
:
3043 case INDEX_op_xor_i32
:
3044 case INDEX_op_xor_i64
:
3047 case INDEX_op_and_i32
:
3048 case INDEX_op_and_i64
:
3050 static const TCGTargetOpDef
and
3051 = { .args_ct_str
= { "r", "0", "reZ" } };
3055 case INDEX_op_andc_i32
:
3056 case INDEX_op_andc_i64
:
3058 static const TCGTargetOpDef andc
3059 = { .args_ct_str
= { "r", "r", "rI" } };
3064 case INDEX_op_shl_i32
:
3065 case INDEX_op_shl_i64
:
3066 case INDEX_op_shr_i32
:
3067 case INDEX_op_shr_i64
:
3068 case INDEX_op_sar_i32
:
3069 case INDEX_op_sar_i64
:
3070 return have_bmi2
? &r_r_ri
: &r_0_ci
;
3071 case INDEX_op_rotl_i32
:
3072 case INDEX_op_rotl_i64
:
3073 case INDEX_op_rotr_i32
:
3074 case INDEX_op_rotr_i64
:
3077 case INDEX_op_brcond_i32
:
3078 case INDEX_op_brcond_i64
:
3081 case INDEX_op_bswap16_i32
:
3082 case INDEX_op_bswap16_i64
:
3083 case INDEX_op_bswap32_i32
:
3084 case INDEX_op_bswap32_i64
:
3085 case INDEX_op_bswap64_i64
:
3086 case INDEX_op_neg_i32
:
3087 case INDEX_op_neg_i64
:
3088 case INDEX_op_not_i32
:
3089 case INDEX_op_not_i64
:
3090 case INDEX_op_extrh_i64_i32
:
3093 case INDEX_op_ext8s_i32
:
3094 case INDEX_op_ext8s_i64
:
3095 case INDEX_op_ext8u_i32
:
3096 case INDEX_op_ext8u_i64
:
3098 case INDEX_op_ext16s_i32
:
3099 case INDEX_op_ext16s_i64
:
3100 case INDEX_op_ext16u_i32
:
3101 case INDEX_op_ext16u_i64
:
3102 case INDEX_op_ext32s_i64
:
3103 case INDEX_op_ext32u_i64
:
3104 case INDEX_op_ext_i32_i64
:
3105 case INDEX_op_extu_i32_i64
:
3106 case INDEX_op_extrl_i64_i32
:
3107 case INDEX_op_extract_i32
:
3108 case INDEX_op_extract_i64
:
3109 case INDEX_op_sextract_i32
:
3110 case INDEX_op_ctpop_i32
:
3111 case INDEX_op_ctpop_i64
:
3113 case INDEX_op_extract2_i32
:
3114 case INDEX_op_extract2_i64
:
3117 case INDEX_op_deposit_i32
:
3118 case INDEX_op_deposit_i64
:
3120 static const TCGTargetOpDef dep
3121 = { .args_ct_str
= { "Q", "0", "Q" } };
3124 case INDEX_op_setcond_i32
:
3125 case INDEX_op_setcond_i64
:
3127 static const TCGTargetOpDef setc
3128 = { .args_ct_str
= { "q", "r", "re" } };
3131 case INDEX_op_movcond_i32
:
3132 case INDEX_op_movcond_i64
:
3134 static const TCGTargetOpDef movc
3135 = { .args_ct_str
= { "r", "r", "re", "r", "0" } };
3138 case INDEX_op_div2_i32
:
3139 case INDEX_op_div2_i64
:
3140 case INDEX_op_divu2_i32
:
3141 case INDEX_op_divu2_i64
:
3143 static const TCGTargetOpDef div2
3144 = { .args_ct_str
= { "a", "d", "0", "1", "r" } };
3147 case INDEX_op_mulu2_i32
:
3148 case INDEX_op_mulu2_i64
:
3149 case INDEX_op_muls2_i32
:
3150 case INDEX_op_muls2_i64
:
3152 static const TCGTargetOpDef mul2
3153 = { .args_ct_str
= { "a", "d", "a", "r" } };
3156 case INDEX_op_add2_i32
:
3157 case INDEX_op_add2_i64
:
3158 case INDEX_op_sub2_i32
:
3159 case INDEX_op_sub2_i64
:
3161 static const TCGTargetOpDef arith2
3162 = { .args_ct_str
= { "r", "r", "0", "1", "re", "re" } };
3165 case INDEX_op_ctz_i32
:
3166 case INDEX_op_ctz_i64
:
3168 static const TCGTargetOpDef ctz
[2] = {
3169 { .args_ct_str
= { "&r", "r", "r" } },
3170 { .args_ct_str
= { "&r", "r", "rW" } },
3172 return &ctz
[have_bmi1
];
3174 case INDEX_op_clz_i32
:
3175 case INDEX_op_clz_i64
:
3177 static const TCGTargetOpDef clz
[2] = {
3178 { .args_ct_str
= { "&r", "r", "r" } },
3179 { .args_ct_str
= { "&r", "r", "rW" } },
3181 return &clz
[have_lzcnt
];
3184 case INDEX_op_qemu_ld_i32
:
3185 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_L
: &r_L_L
;
3186 case INDEX_op_qemu_st_i32
:
3187 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &L_L
: &L_L_L
;
3188 case INDEX_op_qemu_ld_i64
:
3189 return (TCG_TARGET_REG_BITS
== 64 ? &r_L
3190 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_r_L
3192 case INDEX_op_qemu_st_i64
:
3193 return (TCG_TARGET_REG_BITS
== 64 ? &L_L
3194 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &L_L_L
3197 case INDEX_op_brcond2_i32
:
3199 static const TCGTargetOpDef b2
3200 = { .args_ct_str
= { "r", "r", "ri", "ri" } };
3203 case INDEX_op_setcond2_i32
:
3205 static const TCGTargetOpDef s2
3206 = { .args_ct_str
= { "r", "r", "r", "ri", "ri" } };
3210 case INDEX_op_ld_vec
:
3211 case INDEX_op_st_vec
:
3212 case INDEX_op_dupm_vec
:
3215 case INDEX_op_add_vec
:
3216 case INDEX_op_sub_vec
:
3217 case INDEX_op_mul_vec
:
3218 case INDEX_op_and_vec
:
3219 case INDEX_op_or_vec
:
3220 case INDEX_op_xor_vec
:
3221 case INDEX_op_andc_vec
:
3222 case INDEX_op_ssadd_vec
:
3223 case INDEX_op_usadd_vec
:
3224 case INDEX_op_sssub_vec
:
3225 case INDEX_op_ussub_vec
:
3226 case INDEX_op_smin_vec
:
3227 case INDEX_op_umin_vec
:
3228 case INDEX_op_smax_vec
:
3229 case INDEX_op_umax_vec
:
3230 case INDEX_op_shlv_vec
:
3231 case INDEX_op_shrv_vec
:
3232 case INDEX_op_sarv_vec
:
3233 case INDEX_op_shls_vec
:
3234 case INDEX_op_shrs_vec
:
3235 case INDEX_op_sars_vec
:
3236 case INDEX_op_rotls_vec
:
3237 case INDEX_op_cmp_vec
:
3238 case INDEX_op_x86_shufps_vec
:
3239 case INDEX_op_x86_blend_vec
:
3240 case INDEX_op_x86_packss_vec
:
3241 case INDEX_op_x86_packus_vec
:
3242 case INDEX_op_x86_vperm2i128_vec
:
3243 case INDEX_op_x86_punpckl_vec
:
3244 case INDEX_op_x86_punpckh_vec
:
3245 #if TCG_TARGET_REG_BITS == 32
3246 case INDEX_op_dup2_vec
:
3249 case INDEX_op_abs_vec
:
3250 case INDEX_op_dup_vec
:
3251 case INDEX_op_shli_vec
:
3252 case INDEX_op_shri_vec
:
3253 case INDEX_op_sari_vec
:
3254 case INDEX_op_x86_psrldq_vec
:
3256 case INDEX_op_x86_vpblendvb_vec
:
3265 int tcg_can_emit_vec_op(TCGOpcode opc
, TCGType type
, unsigned vece
)
3268 case INDEX_op_add_vec
:
3269 case INDEX_op_sub_vec
:
3270 case INDEX_op_and_vec
:
3271 case INDEX_op_or_vec
:
3272 case INDEX_op_xor_vec
:
3273 case INDEX_op_andc_vec
:
3275 case INDEX_op_rotli_vec
:
3276 case INDEX_op_cmp_vec
:
3277 case INDEX_op_cmpsel_vec
:
3280 case INDEX_op_shli_vec
:
3281 case INDEX_op_shri_vec
:
3282 /* We must expand the operation for MO_8. */
3283 return vece
== MO_8
? -1 : 1;
3285 case INDEX_op_sari_vec
:
3286 /* We must expand the operation for MO_8. */
3290 /* We can emulate this for MO_64, but it does not pay off
3291 unless we're producing at least 4 values. */
3292 if (vece
== MO_64
) {
3293 return type
>= TCG_TYPE_V256
? -1 : 0;
3297 case INDEX_op_shls_vec
:
3298 case INDEX_op_shrs_vec
:
3299 return vece
>= MO_16
;
3300 case INDEX_op_sars_vec
:
3301 return vece
>= MO_16
&& vece
<= MO_32
;
3302 case INDEX_op_rotls_vec
:
3303 return vece
>= MO_16
? -1 : 0;
3305 case INDEX_op_shlv_vec
:
3306 case INDEX_op_shrv_vec
:
3307 return have_avx2
&& vece
>= MO_32
;
3308 case INDEX_op_sarv_vec
:
3309 return have_avx2
&& vece
== MO_32
;
3310 case INDEX_op_rotlv_vec
:
3311 case INDEX_op_rotrv_vec
:
3312 return have_avx2
&& vece
>= MO_32
? -1 : 0;
3314 case INDEX_op_mul_vec
:
3316 /* We can expand the operation for MO_8. */
3319 if (vece
== MO_64
) {
3324 case INDEX_op_ssadd_vec
:
3325 case INDEX_op_usadd_vec
:
3326 case INDEX_op_sssub_vec
:
3327 case INDEX_op_ussub_vec
:
3328 return vece
<= MO_16
;
3329 case INDEX_op_smin_vec
:
3330 case INDEX_op_smax_vec
:
3331 case INDEX_op_umin_vec
:
3332 case INDEX_op_umax_vec
:
3333 case INDEX_op_abs_vec
:
3334 return vece
<= MO_32
;
3341 static void expand_vec_shi(TCGType type
, unsigned vece
, TCGOpcode opc
,
3342 TCGv_vec v0
, TCGv_vec v1
, TCGArg imm
)
3346 tcg_debug_assert(vece
== MO_8
);
3348 t1
= tcg_temp_new_vec(type
);
3349 t2
= tcg_temp_new_vec(type
);
3352 * Unpack to W, shift, and repack. Tricky bits:
3353 * (1) Use punpck*bw x,x to produce DDCCBBAA,
3354 * i.e. duplicate in other half of the 16-bit lane.
3355 * (2) For right-shift, add 8 so that the high half of the lane
3356 * becomes zero. For left-shift, and left-rotate, we must
3357 * shift up and down again.
3358 * (3) Step 2 leaves high half zero such that PACKUSWB
3359 * (pack with unsigned saturation) does not modify
3362 vec_gen_3(INDEX_op_x86_punpckl_vec
, type
, MO_8
,
3363 tcgv_vec_arg(t1
), tcgv_vec_arg(v1
), tcgv_vec_arg(v1
));
3364 vec_gen_3(INDEX_op_x86_punpckh_vec
, type
, MO_8
,
3365 tcgv_vec_arg(t2
), tcgv_vec_arg(v1
), tcgv_vec_arg(v1
));
3367 if (opc
!= INDEX_op_rotli_vec
) {
3370 if (opc
== INDEX_op_shri_vec
) {
3371 tcg_gen_shri_vec(MO_16
, t1
, t1
, imm
);
3372 tcg_gen_shri_vec(MO_16
, t2
, t2
, imm
);
3374 tcg_gen_shli_vec(MO_16
, t1
, t1
, imm
);
3375 tcg_gen_shli_vec(MO_16
, t2
, t2
, imm
);
3376 tcg_gen_shri_vec(MO_16
, t1
, t1
, 8);
3377 tcg_gen_shri_vec(MO_16
, t2
, t2
, 8);
3380 vec_gen_3(INDEX_op_x86_packus_vec
, type
, MO_8
,
3381 tcgv_vec_arg(v0
), tcgv_vec_arg(t1
), tcgv_vec_arg(t2
));
3382 tcg_temp_free_vec(t1
);
3383 tcg_temp_free_vec(t2
);
3386 static void expand_vec_sari(TCGType type
, unsigned vece
,
3387 TCGv_vec v0
, TCGv_vec v1
, TCGArg imm
)
3393 /* Unpack to W, shift, and repack, as in expand_vec_shi. */
3394 t1
= tcg_temp_new_vec(type
);
3395 t2
= tcg_temp_new_vec(type
);
3396 vec_gen_3(INDEX_op_x86_punpckl_vec
, type
, MO_8
,
3397 tcgv_vec_arg(t1
), tcgv_vec_arg(v1
), tcgv_vec_arg(v1
));
3398 vec_gen_3(INDEX_op_x86_punpckh_vec
, type
, MO_8
,
3399 tcgv_vec_arg(t2
), tcgv_vec_arg(v1
), tcgv_vec_arg(v1
));
3400 tcg_gen_sari_vec(MO_16
, t1
, t1
, imm
+ 8);
3401 tcg_gen_sari_vec(MO_16
, t2
, t2
, imm
+ 8);
3402 vec_gen_3(INDEX_op_x86_packss_vec
, type
, MO_8
,
3403 tcgv_vec_arg(v0
), tcgv_vec_arg(t1
), tcgv_vec_arg(t2
));
3404 tcg_temp_free_vec(t1
);
3405 tcg_temp_free_vec(t2
);
3411 * We can emulate a small sign extend by performing an arithmetic
3412 * 32-bit shift and overwriting the high half of a 64-bit logical
3413 * shift. Note that the ISA says shift of 32 is valid, but TCG
3414 * does not, so we have to bound the smaller shift -- we get the
3415 * same result in the high half either way.
3417 t1
= tcg_temp_new_vec(type
);
3418 tcg_gen_sari_vec(MO_32
, t1
, v1
, MIN(imm
, 31));
3419 tcg_gen_shri_vec(MO_64
, v0
, v1
, imm
);
3420 vec_gen_4(INDEX_op_x86_blend_vec
, type
, MO_32
,
3421 tcgv_vec_arg(v0
), tcgv_vec_arg(v0
),
3422 tcgv_vec_arg(t1
), 0xaa);
3423 tcg_temp_free_vec(t1
);
3425 /* Otherwise we will need to use a compare vs 0 to produce
3426 * the sign-extend, shift and merge.
3428 t1
= tcg_const_zeros_vec(type
);
3429 tcg_gen_cmp_vec(TCG_COND_GT
, MO_64
, t1
, t1
, v1
);
3430 tcg_gen_shri_vec(MO_64
, v0
, v1
, imm
);
3431 tcg_gen_shli_vec(MO_64
, t1
, t1
, 64 - imm
);
3432 tcg_gen_or_vec(MO_64
, v0
, v0
, t1
);
3433 tcg_temp_free_vec(t1
);
3438 g_assert_not_reached();
3442 static void expand_vec_rotli(TCGType type
, unsigned vece
,
3443 TCGv_vec v0
, TCGv_vec v1
, TCGArg imm
)
3448 expand_vec_shi(type
, vece
, INDEX_op_rotli_vec
, v0
, v1
, imm
);
3452 t
= tcg_temp_new_vec(type
);
3453 tcg_gen_shli_vec(vece
, t
, v1
, imm
);
3454 tcg_gen_shri_vec(vece
, v0
, v1
, (8 << vece
) - imm
);
3455 tcg_gen_or_vec(vece
, v0
, v0
, t
);
3456 tcg_temp_free_vec(t
);
3459 static void expand_vec_rotls(TCGType type
, unsigned vece
,
3460 TCGv_vec v0
, TCGv_vec v1
, TCGv_i32 lsh
)
3465 tcg_debug_assert(vece
!= MO_8
);
3467 t
= tcg_temp_new_vec(type
);
3468 rsh
= tcg_temp_new_i32();
3470 tcg_gen_neg_i32(rsh
, lsh
);
3471 tcg_gen_andi_i32(rsh
, rsh
, (8 << vece
) - 1);
3472 tcg_gen_shls_vec(vece
, t
, v1
, lsh
);
3473 tcg_gen_shrs_vec(vece
, v0
, v1
, rsh
);
3474 tcg_gen_or_vec(vece
, v0
, v0
, t
);
3475 tcg_temp_free_vec(t
);
3476 tcg_temp_free_i32(rsh
);
3479 static void expand_vec_rotv(TCGType type
, unsigned vece
, TCGv_vec v0
,
3480 TCGv_vec v1
, TCGv_vec sh
, bool right
)
3482 TCGv_vec t
= tcg_temp_new_vec(type
);
3484 tcg_gen_dupi_vec(vece
, t
, 8 << vece
);
3485 tcg_gen_sub_vec(vece
, t
, t
, sh
);
3487 tcg_gen_shlv_vec(vece
, t
, v1
, t
);
3488 tcg_gen_shrv_vec(vece
, v0
, v1
, sh
);
3490 tcg_gen_shrv_vec(vece
, t
, v1
, t
);
3491 tcg_gen_shlv_vec(vece
, v0
, v1
, sh
);
3493 tcg_gen_or_vec(vece
, v0
, v0
, t
);
3494 tcg_temp_free_vec(t
);
3497 static void expand_vec_mul(TCGType type
, unsigned vece
,
3498 TCGv_vec v0
, TCGv_vec v1
, TCGv_vec v2
)
3500 TCGv_vec t1
, t2
, t3
, t4
;
3502 tcg_debug_assert(vece
== MO_8
);
3505 * Unpack v1 bytes to words, 0 | x.
3506 * Unpack v2 bytes to words, y | 0.
3507 * This leaves the 8-bit result, x * y, with 8 bits of right padding.
3508 * Shift logical right by 8 bits to clear the high 8 bytes before
3509 * using an unsigned saturated pack.
3511 * The difference between the V64, V128 and V256 cases is merely how
3512 * we distribute the expansion between temporaries.
3516 t1
= tcg_temp_new_vec(TCG_TYPE_V128
);
3517 t2
= tcg_temp_new_vec(TCG_TYPE_V128
);
3518 tcg_gen_dup16i_vec(t2
, 0);
3519 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V128
, MO_8
,
3520 tcgv_vec_arg(t1
), tcgv_vec_arg(v1
), tcgv_vec_arg(t2
));
3521 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V128
, MO_8
,
3522 tcgv_vec_arg(t2
), tcgv_vec_arg(t2
), tcgv_vec_arg(v2
));
3523 tcg_gen_mul_vec(MO_16
, t1
, t1
, t2
);
3524 tcg_gen_shri_vec(MO_16
, t1
, t1
, 8);
3525 vec_gen_3(INDEX_op_x86_packus_vec
, TCG_TYPE_V128
, MO_8
,
3526 tcgv_vec_arg(v0
), tcgv_vec_arg(t1
), tcgv_vec_arg(t1
));
3527 tcg_temp_free_vec(t1
);
3528 tcg_temp_free_vec(t2
);
3533 t1
= tcg_temp_new_vec(type
);
3534 t2
= tcg_temp_new_vec(type
);
3535 t3
= tcg_temp_new_vec(type
);
3536 t4
= tcg_temp_new_vec(type
);
3537 tcg_gen_dup16i_vec(t4
, 0);
3538 vec_gen_3(INDEX_op_x86_punpckl_vec
, type
, MO_8
,
3539 tcgv_vec_arg(t1
), tcgv_vec_arg(v1
), tcgv_vec_arg(t4
));
3540 vec_gen_3(INDEX_op_x86_punpckl_vec
, type
, MO_8
,
3541 tcgv_vec_arg(t2
), tcgv_vec_arg(t4
), tcgv_vec_arg(v2
));
3542 vec_gen_3(INDEX_op_x86_punpckh_vec
, type
, MO_8
,
3543 tcgv_vec_arg(t3
), tcgv_vec_arg(v1
), tcgv_vec_arg(t4
));
3544 vec_gen_3(INDEX_op_x86_punpckh_vec
, type
, MO_8
,
3545 tcgv_vec_arg(t4
), tcgv_vec_arg(t4
), tcgv_vec_arg(v2
));
3546 tcg_gen_mul_vec(MO_16
, t1
, t1
, t2
);
3547 tcg_gen_mul_vec(MO_16
, t3
, t3
, t4
);
3548 tcg_gen_shri_vec(MO_16
, t1
, t1
, 8);
3549 tcg_gen_shri_vec(MO_16
, t3
, t3
, 8);
3550 vec_gen_3(INDEX_op_x86_packus_vec
, type
, MO_8
,
3551 tcgv_vec_arg(v0
), tcgv_vec_arg(t1
), tcgv_vec_arg(t3
));
3552 tcg_temp_free_vec(t1
);
3553 tcg_temp_free_vec(t2
);
3554 tcg_temp_free_vec(t3
);
3555 tcg_temp_free_vec(t4
);
3559 g_assert_not_reached();
3563 static bool expand_vec_cmp_noinv(TCGType type
, unsigned vece
, TCGv_vec v0
,
3564 TCGv_vec v1
, TCGv_vec v2
, TCGCond cond
)
3589 fixup
= NEED_SWAP
| NEED_INV
;
3592 if (vece
<= MO_32
) {
3595 fixup
= NEED_BIAS
| NEED_INV
;
3599 if (vece
<= MO_32
) {
3600 fixup
= NEED_UMIN
| NEED_INV
;
3606 if (vece
<= MO_32
) {
3609 fixup
= NEED_BIAS
| NEED_SWAP
| NEED_INV
;
3613 if (vece
<= MO_32
) {
3614 fixup
= NEED_UMAX
| NEED_INV
;
3616 fixup
= NEED_BIAS
| NEED_SWAP
;
3620 g_assert_not_reached();
3623 if (fixup
& NEED_INV
) {
3624 cond
= tcg_invert_cond(cond
);
3626 if (fixup
& NEED_SWAP
) {
3627 t1
= v1
, v1
= v2
, v2
= t1
;
3628 cond
= tcg_swap_cond(cond
);
3632 if (fixup
& (NEED_UMIN
| NEED_UMAX
)) {
3633 t1
= tcg_temp_new_vec(type
);
3634 if (fixup
& NEED_UMIN
) {
3635 tcg_gen_umin_vec(vece
, t1
, v1
, v2
);
3637 tcg_gen_umax_vec(vece
, t1
, v1
, v2
);
3641 } else if (fixup
& NEED_BIAS
) {
3642 t1
= tcg_temp_new_vec(type
);
3643 t2
= tcg_temp_new_vec(type
);
3644 tcg_gen_dupi_vec(vece
, t2
, 1ull << ((8 << vece
) - 1));
3645 tcg_gen_sub_vec(vece
, t1
, v1
, t2
);
3646 tcg_gen_sub_vec(vece
, t2
, v2
, t2
);
3649 cond
= tcg_signed_cond(cond
);
3652 tcg_debug_assert(cond
== TCG_COND_EQ
|| cond
== TCG_COND_GT
);
3653 /* Expand directly; do not recurse. */
3654 vec_gen_4(INDEX_op_cmp_vec
, type
, vece
,
3655 tcgv_vec_arg(v0
), tcgv_vec_arg(v1
), tcgv_vec_arg(v2
), cond
);
3658 tcg_temp_free_vec(t1
);
3660 tcg_temp_free_vec(t2
);
3663 return fixup
& NEED_INV
;
3666 static void expand_vec_cmp(TCGType type
, unsigned vece
, TCGv_vec v0
,
3667 TCGv_vec v1
, TCGv_vec v2
, TCGCond cond
)
3669 if (expand_vec_cmp_noinv(type
, vece
, v0
, v1
, v2
, cond
)) {
3670 tcg_gen_not_vec(vece
, v0
, v0
);
3674 static void expand_vec_cmpsel(TCGType type
, unsigned vece
, TCGv_vec v0
,
3675 TCGv_vec c1
, TCGv_vec c2
,
3676 TCGv_vec v3
, TCGv_vec v4
, TCGCond cond
)
3678 TCGv_vec t
= tcg_temp_new_vec(type
);
3680 if (expand_vec_cmp_noinv(type
, vece
, t
, c1
, c2
, cond
)) {
3681 /* Invert the sense of the compare by swapping arguments. */
3683 x
= v3
, v3
= v4
, v4
= x
;
3685 vec_gen_4(INDEX_op_x86_vpblendvb_vec
, type
, vece
,
3686 tcgv_vec_arg(v0
), tcgv_vec_arg(v4
),
3687 tcgv_vec_arg(v3
), tcgv_vec_arg(t
));
3688 tcg_temp_free_vec(t
);
3691 void tcg_expand_vec_op(TCGOpcode opc
, TCGType type
, unsigned vece
,
3696 TCGv_vec v0
, v1
, v2
, v3
, v4
;
3699 v0
= temp_tcgv_vec(arg_temp(a0
));
3700 v1
= temp_tcgv_vec(arg_temp(va_arg(va
, TCGArg
)));
3701 a2
= va_arg(va
, TCGArg
);
3704 case INDEX_op_shli_vec
:
3705 case INDEX_op_shri_vec
:
3706 expand_vec_shi(type
, vece
, opc
, v0
, v1
, a2
);
3709 case INDEX_op_sari_vec
:
3710 expand_vec_sari(type
, vece
, v0
, v1
, a2
);
3713 case INDEX_op_rotli_vec
:
3714 expand_vec_rotli(type
, vece
, v0
, v1
, a2
);
3717 case INDEX_op_rotls_vec
:
3718 expand_vec_rotls(type
, vece
, v0
, v1
, temp_tcgv_i32(arg_temp(a2
)));
3721 case INDEX_op_rotlv_vec
:
3722 v2
= temp_tcgv_vec(arg_temp(a2
));
3723 expand_vec_rotv(type
, vece
, v0
, v1
, v2
, false);
3725 case INDEX_op_rotrv_vec
:
3726 v2
= temp_tcgv_vec(arg_temp(a2
));
3727 expand_vec_rotv(type
, vece
, v0
, v1
, v2
, true);
3730 case INDEX_op_mul_vec
:
3731 v2
= temp_tcgv_vec(arg_temp(a2
));
3732 expand_vec_mul(type
, vece
, v0
, v1
, v2
);
3735 case INDEX_op_cmp_vec
:
3736 v2
= temp_tcgv_vec(arg_temp(a2
));
3737 expand_vec_cmp(type
, vece
, v0
, v1
, v2
, va_arg(va
, TCGArg
));
3740 case INDEX_op_cmpsel_vec
:
3741 v2
= temp_tcgv_vec(arg_temp(a2
));
3742 v3
= temp_tcgv_vec(arg_temp(va_arg(va
, TCGArg
)));
3743 v4
= temp_tcgv_vec(arg_temp(va_arg(va
, TCGArg
)));
3744 expand_vec_cmpsel(type
, vece
, v0
, v1
, v2
, v3
, v4
, va_arg(va
, TCGArg
));
3754 static const int tcg_target_callee_save_regs
[] = {
3755 #if TCG_TARGET_REG_BITS == 64
3764 TCG_REG_R14
, /* Currently used for the global env. */
3767 TCG_REG_EBP
, /* Currently used for the global env. */
3774 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3775 and tcg_register_jit. */
3778 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
3779 * (TCG_TARGET_REG_BITS / 8))
3781 #define FRAME_SIZE \
3783 + TCG_STATIC_CALL_ARGS_SIZE \
3784 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3785 + TCG_TARGET_STACK_ALIGN - 1) \
3786 & ~(TCG_TARGET_STACK_ALIGN - 1))
3788 /* Generate global QEMU prologue and epilogue code */
3789 static void tcg_target_qemu_prologue(TCGContext
*s
)
3791 int i
, stack_addend
;
3795 /* Reserve some stack space, also for TCG temps. */
3796 stack_addend
= FRAME_SIZE
- PUSH_SIZE
;
3797 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
3798 CPU_TEMP_BUF_NLONGS
* sizeof(long));
3800 /* Save all callee saved registers. */
3801 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
3802 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
3805 #if TCG_TARGET_REG_BITS == 32
3806 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
,
3807 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 1) * 4);
3808 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
3810 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, TCG_REG_ESP
,
3811 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 2) * 4
3814 # if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64
3816 int seg
= setup_guest_base_seg();
3818 x86_guest_base_seg
= seg
;
3819 } else if (guest_base
== (int32_t)guest_base
) {
3820 x86_guest_base_offset
= guest_base
;
3822 /* Choose R12 because, as a base, it requires a SIB byte. */
3823 x86_guest_base_index
= TCG_REG_R12
;
3824 tcg_out_movi(s
, TCG_TYPE_PTR
, x86_guest_base_index
, guest_base
);
3825 tcg_regset_set_reg(s
->reserved_regs
, x86_guest_base_index
);
3829 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
3830 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
3832 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, tcg_target_call_iarg_regs
[1]);
3836 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3837 * and fall through to the rest of the epilogue.
3839 s
->code_gen_epilogue
= s
->code_ptr
;
3840 tcg_out_movi(s
, TCG_TYPE_REG
, TCG_REG_EAX
, 0);
3843 tb_ret_addr
= s
->code_ptr
;
3845 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_addend
);
3848 tcg_out_vex_opc(s
, OPC_VZEROUPPER
, 0, 0, 0, 0);
3850 for (i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
3851 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
3853 tcg_out_opc(s
, OPC_RET
, 0, 0, 0);
3856 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
3858 memset(p
, 0x90, count
);
3861 static void tcg_target_init(TCGContext
*s
)
3863 #ifdef CONFIG_CPUID_H
3864 unsigned a
, b
, c
, d
, b7
= 0;
3865 int max
= __get_cpuid_max(0, 0);
3868 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
3869 __cpuid_count(7, 0, a
, b7
, c
, d
);
3870 have_bmi1
= (b7
& bit_BMI
) != 0;
3871 have_bmi2
= (b7
& bit_BMI2
) != 0;
3875 __cpuid(1, a
, b
, c
, d
);
3877 /* For 32-bit, 99% certainty that we're running on hardware that
3878 supports cmov, but we still need to check. In case cmov is not
3879 available, we'll use a small forward branch. */
3880 have_cmov
= (d
& bit_CMOV
) != 0;
3883 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
3884 need to probe for it. */
3885 have_movbe
= (c
& bit_MOVBE
) != 0;
3886 have_popcnt
= (c
& bit_POPCNT
) != 0;
3888 /* There are a number of things we must check before we can be
3889 sure of not hitting invalid opcode. */
3890 if (c
& bit_OSXSAVE
) {
3891 unsigned xcrl
, xcrh
;
3892 /* The xgetbv instruction is not available to older versions of
3893 * the assembler, so we encode the instruction manually.
3895 asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl
), "=d" (xcrh
) : "c" (0));
3896 if ((xcrl
& 6) == 6) {
3897 have_avx1
= (c
& bit_AVX
) != 0;
3898 have_avx2
= (b7
& bit_AVX2
) != 0;
3903 max
= __get_cpuid_max(0x8000000, 0);
3905 __cpuid(0x80000001, a
, b
, c
, d
);
3906 /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
3907 have_lzcnt
= (c
& bit_LZCNT
) != 0;
3909 #endif /* CONFIG_CPUID_H */
3911 tcg_target_available_regs
[TCG_TYPE_I32
] = ALL_GENERAL_REGS
;
3912 if (TCG_TARGET_REG_BITS
== 64) {
3913 tcg_target_available_regs
[TCG_TYPE_I64
] = ALL_GENERAL_REGS
;
3916 tcg_target_available_regs
[TCG_TYPE_V64
] = ALL_VECTOR_REGS
;
3917 tcg_target_available_regs
[TCG_TYPE_V128
] = ALL_VECTOR_REGS
;
3920 tcg_target_available_regs
[TCG_TYPE_V256
] = ALL_VECTOR_REGS
;
3923 tcg_target_call_clobber_regs
= ALL_VECTOR_REGS
;
3924 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
3925 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
3926 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
3927 if (TCG_TARGET_REG_BITS
== 64) {
3928 #if !defined(_WIN64)
3929 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RDI
);
3930 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RSI
);
3932 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R8
);
3933 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R9
);
3934 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R10
);
3935 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R11
);
3938 s
->reserved_regs
= 0;
3939 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
3944 uint8_t fde_def_cfa
[4];
3945 uint8_t fde_reg_ofs
[14];
3948 /* We're expecting a 2 byte uleb128 encoded value. */
3949 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
3951 #if !defined(__ELF__)
3952 /* Host machine without ELF. */
3953 #elif TCG_TARGET_REG_BITS == 64
3954 #define ELF_HOST_MACHINE EM_X86_64
3955 static const DebugFrame debug_frame
= {
3956 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
3959 .h
.cie
.code_align
= 1,
3960 .h
.cie
.data_align
= 0x78, /* sleb128 -8 */
3961 .h
.cie
.return_column
= 16,
3963 /* Total FDE size does not include the "len" member. */
3964 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
3967 12, 7, /* DW_CFA_def_cfa %rsp, ... */
3968 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3972 0x90, 1, /* DW_CFA_offset, %rip, -8 */
3973 /* The following ordering must match tcg_target_callee_save_regs. */
3974 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
3975 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
3976 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
3977 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
3978 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
3979 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
3983 #define ELF_HOST_MACHINE EM_386
3984 static const DebugFrame debug_frame
= {
3985 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
3988 .h
.cie
.code_align
= 1,
3989 .h
.cie
.data_align
= 0x7c, /* sleb128 -4 */
3990 .h
.cie
.return_column
= 8,
3992 /* Total FDE size does not include the "len" member. */
3993 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
3996 12, 4, /* DW_CFA_def_cfa %esp, ... */
3997 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
4001 0x88, 1, /* DW_CFA_offset, %eip, -4 */
4002 /* The following ordering must match tcg_target_callee_save_regs. */
4003 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
4004 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
4005 0x86, 4, /* DW_CFA_offset, %esi, -16 */
4006 0x87, 5, /* DW_CFA_offset, %edi, -20 */
4011 #if defined(ELF_HOST_MACHINE)
4012 void tcg_register_jit(void *buf
, size_t buf_size
)
4014 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));