2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* x86 64-bit arch dependent functions. */
29 static sljit_s32
emit_load_imm64(struct sljit_compiler
*compiler
, sljit_s32 reg
, sljit_sw imm
)
33 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2 + sizeof(sljit_sw
));
35 INC_SIZE(2 + sizeof(sljit_sw
));
36 *inst
++ = REX_W
| ((reg_map
[reg
] <= 7) ? 0 : REX_B
);
37 *inst
++ = MOV_r_i32
+ (reg_map
[reg
] & 0x7);
38 sljit_unaligned_store_sw(inst
, imm
);
42 static sljit_u8
* generate_far_jump_code(struct sljit_jump
*jump
, sljit_u8
*code_ptr
)
44 sljit_s32 type
= jump
->flags
>> TYPE_SHIFT
;
46 int short_addr
= !(jump
->flags
& SLJIT_REWRITABLE_JUMP
) && !(jump
->flags
& JUMP_LABEL
) && (jump
->u
.target
<= 0xffffffff);
48 /* The relative jump below specialized for this case. */
49 SLJIT_ASSERT(reg_map
[TMP_REG2
] >= 8);
51 if (type
< SLJIT_JUMP
) {
53 *code_ptr
++ = get_jump_code(type
^ 0x1) - 0x10;
54 *code_ptr
++ = short_addr
? (6 + 3) : (10 + 3);
57 *code_ptr
++ = short_addr
? REX_B
: (REX_W
| REX_B
);
58 *code_ptr
++ = MOV_r_i32
| reg_lmap
[TMP_REG2
];
59 jump
->addr
= (sljit_uw
)code_ptr
;
61 if (jump
->flags
& JUMP_LABEL
)
62 jump
->flags
|= PATCH_MD
;
64 sljit_unaligned_store_s32(code_ptr
, (sljit_s32
)jump
->u
.target
);
66 sljit_unaligned_store_sw(code_ptr
, jump
->u
.target
);
68 code_ptr
+= short_addr
? sizeof(sljit_s32
) : sizeof(sljit_sw
);
71 *code_ptr
++ = GROUP_FF
;
72 *code_ptr
++ = MOD_REG
| (type
>= SLJIT_FAST_CALL
? CALL_rm
: JMP_rm
) | reg_lmap
[TMP_REG2
];
77 static sljit_u8
* generate_put_label_code(struct sljit_put_label
*put_label
, sljit_u8
*code_ptr
, sljit_uw max_label
)
79 if (max_label
> HALFWORD_MAX
) {
80 put_label
->addr
-= put_label
->flags
;
81 put_label
->flags
= PATCH_MD
;
85 if (put_label
->flags
== 0) {
86 /* Destination is register. */
87 code_ptr
= (sljit_u8
*)put_label
->addr
- 2 - sizeof(sljit_uw
);
89 SLJIT_ASSERT((code_ptr
[0] & 0xf8) == REX_W
);
90 SLJIT_ASSERT((code_ptr
[1] & 0xf8) == MOV_r_i32
);
92 if ((code_ptr
[0] & 0x07) != 0) {
93 code_ptr
[0] = (sljit_u8
)(code_ptr
[0] & ~0x08);
94 code_ptr
+= 2 + sizeof(sljit_s32
);
97 code_ptr
[0] = code_ptr
[1];
98 code_ptr
+= 1 + sizeof(sljit_s32
);
101 put_label
->addr
= (sljit_uw
)code_ptr
;
105 code_ptr
-= put_label
->flags
+ (2 + sizeof(sljit_uw
));
106 SLJIT_MEMMOVE(code_ptr
, code_ptr
+ (2 + sizeof(sljit_uw
)), put_label
->flags
);
108 SLJIT_ASSERT((code_ptr
[0] & 0xf8) == REX_W
);
110 if ((code_ptr
[1] & 0xf8) == MOV_r_i32
) {
111 code_ptr
+= 2 + sizeof(sljit_uw
);
112 SLJIT_ASSERT((code_ptr
[0] & 0xf8) == REX_W
);
115 SLJIT_ASSERT(code_ptr
[1] == MOV_rm_r
);
117 code_ptr
[0] = (sljit_u8
)(code_ptr
[0] & ~0x4);
118 code_ptr
[1] = MOV_rm_i32
;
119 code_ptr
[2] = (sljit_u8
)(code_ptr
[2] & ~(0x7 << 3));
121 code_ptr
= (sljit_u8
*)(put_label
->addr
- (2 + sizeof(sljit_uw
)) + sizeof(sljit_s32
));
122 put_label
->addr
= (sljit_uw
)code_ptr
;
123 put_label
->flags
= 0;
127 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_enter(struct sljit_compiler
*compiler
,
128 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
129 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
131 sljit_s32 args
, i
, tmp
, size
, saved_register_size
;
135 CHECK(check_sljit_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
136 set_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
138 /* Emit ENDBR64 at function entry if needed. */
139 FAIL_IF(emit_endbranch(compiler
));
141 compiler
->mode32
= 0;
144 /* Two/four register slots for parameters plus space for xmm6 register if needed. */
145 if (fscratches
>= 6 || fsaveds
>= 1)
146 compiler
->locals_offset
= 6 * sizeof(sljit_sw
);
148 compiler
->locals_offset
= ((scratches
> 2) ? 4 : 2) * sizeof(sljit_sw
);
151 /* Including the return address saved by the call instruction. */
152 saved_register_size
= GET_SAVED_REGISTERS_SIZE(scratches
, saveds
, 1);
154 tmp
= saveds
< SLJIT_NUMBER_OF_SAVED_REGISTERS
? (SLJIT_S0
+ 1 - saveds
) : SLJIT_FIRST_SAVED_REG
;
155 for (i
= SLJIT_S0
; i
>= tmp
; i
--) {
156 size
= reg_map
[i
] >= 8 ? 2 : 1;
157 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
162 PUSH_REG(reg_lmap
[i
]);
165 for (i
= scratches
; i
>= SLJIT_FIRST_SAVED_REG
; i
--) {
166 size
= reg_map
[i
] >= 8 ? 2 : 1;
167 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
172 PUSH_REG(reg_lmap
[i
]);
175 args
= get_arg_count(arg_types
);
179 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
188 inst
[2] = MOD_REG
| (reg_map
[SLJIT_S0
] << 3) | 0x7 /* rdi */;
192 inst
[0] = REX_W
| REX_R
;
194 inst
[2] = MOD_REG
| (reg_lmap
[SLJIT_S1
] << 3) | 0x6 /* rsi */;
198 inst
[0] = REX_W
| REX_R
;
200 inst
[2] = MOD_REG
| (reg_lmap
[SLJIT_S2
] << 3) | 0x2 /* rdx */;
206 inst
[2] = MOD_REG
| (reg_map
[SLJIT_S0
] << 3) | 0x1 /* rcx */;
212 inst
[2] = MOD_REG
| (reg_map
[SLJIT_S1
] << 3) | 0x2 /* rdx */;
216 inst
[0] = REX_W
| REX_B
;
218 inst
[2] = MOD_REG
| (reg_map
[SLJIT_S2
] << 3) | 0x0 /* r8 */;
223 local_size
= ((local_size
+ SLJIT_LOCALS_OFFSET
+ saved_register_size
+ 15) & ~15) - saved_register_size
;
224 compiler
->local_size
= local_size
;
227 if (local_size
> 0) {
228 if (local_size
<= 4 * 4096) {
229 if (local_size
> 4096)
230 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -4096);
231 if (local_size
> 2 * 4096)
232 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -4096 * 2);
233 if (local_size
> 3 * 4096)
234 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -4096 * 3);
237 EMIT_MOV(compiler
, SLJIT_R0
, 0, SLJIT_SP
, 0);
238 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_IMM
, (local_size
- 1) >> 12);
240 SLJIT_ASSERT (reg_map
[SLJIT_R0
] == 0);
242 EMIT_MOV(compiler
, TMP_REG2
, 0, SLJIT_MEM1(SLJIT_R0
), -4096);
243 FAIL_IF(emit_non_cum_binary(compiler
, BINARY_OPCODE(SUB
),
244 SLJIT_R0
, 0, SLJIT_R0
, 0, SLJIT_IMM
, 4096));
245 FAIL_IF(emit_non_cum_binary(compiler
, BINARY_OPCODE(SUB
),
246 TMP_REG1
, 0, TMP_REG1
, 0, SLJIT_IMM
, 1));
248 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2);
253 inst
[1] = (sljit_s8
) -19;
256 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -local_size
);
260 if (local_size
> 0) {
261 FAIL_IF(emit_non_cum_binary(compiler
, BINARY_OPCODE(SUB
),
262 SLJIT_SP
, 0, SLJIT_SP
, 0, SLJIT_IMM
, local_size
));
266 /* Save xmm6 register: movaps [rsp + 0x20], xmm6 */
267 if (fscratches
>= 6 || fsaveds
>= 1) {
268 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 5);
272 sljit_unaligned_store_s32(inst
, 0x20247429);
276 return SLJIT_SUCCESS
;
279 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_set_context(struct sljit_compiler
*compiler
,
280 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
281 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
283 sljit_s32 saved_register_size
;
286 CHECK(check_sljit_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
287 set_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
290 /* Two/four register slots for parameters plus space for xmm6 register if needed. */
291 if (fscratches
>= 6 || fsaveds
>= 1)
292 compiler
->locals_offset
= 6 * sizeof(sljit_sw
);
294 compiler
->locals_offset
= ((scratches
> 2) ? 4 : 2) * sizeof(sljit_sw
);
297 /* Including the return address saved by the call instruction. */
298 saved_register_size
= GET_SAVED_REGISTERS_SIZE(scratches
, saveds
, 1);
299 compiler
->local_size
= ((local_size
+ SLJIT_LOCALS_OFFSET
+ saved_register_size
+ 15) & ~15) - saved_register_size
;
300 return SLJIT_SUCCESS
;
303 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_return(struct sljit_compiler
*compiler
, sljit_s32 op
, sljit_s32 src
, sljit_sw srcw
)
305 sljit_s32 i
, tmp
, size
;
309 CHECK(check_sljit_emit_return(compiler
, op
, src
, srcw
));
311 FAIL_IF(emit_mov_before_return(compiler
, op
, src
, srcw
));
314 /* Restore xmm6 register: movaps xmm6, [rsp + 0x20] */
315 if (compiler
->fscratches
>= 6 || compiler
->fsaveds
>= 1) {
316 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 5);
320 sljit_unaligned_store_s32(inst
, 0x20247428);
324 if (compiler
->local_size
> 0) {
325 if (compiler
->local_size
<= 127) {
326 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 4);
330 *inst
++ = GROUP_BINARY_83
;
331 *inst
++ = MOD_REG
| ADD
| 4;
332 *inst
= compiler
->local_size
;
335 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 7);
339 *inst
++ = GROUP_BINARY_81
;
340 *inst
++ = MOD_REG
| ADD
| 4;
341 sljit_unaligned_store_s32(inst
, compiler
->local_size
);
345 tmp
= compiler
->scratches
;
346 for (i
= SLJIT_FIRST_SAVED_REG
; i
<= tmp
; i
++) {
347 size
= reg_map
[i
] >= 8 ? 2 : 1;
348 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
353 POP_REG(reg_lmap
[i
]);
356 tmp
= compiler
->saveds
< SLJIT_NUMBER_OF_SAVED_REGISTERS
? (SLJIT_S0
+ 1 - compiler
->saveds
) : SLJIT_FIRST_SAVED_REG
;
357 for (i
= tmp
; i
<= SLJIT_S0
; i
++) {
358 size
= reg_map
[i
] >= 8 ? 2 : 1;
359 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
364 POP_REG(reg_lmap
[i
]);
367 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1);
371 return SLJIT_SUCCESS
;
374 /* --------------------------------------------------------------------- */
376 /* --------------------------------------------------------------------- */
378 static sljit_s32
emit_do_imm32(struct sljit_compiler
*compiler
, sljit_u8 rex
, sljit_u8 opcode
, sljit_sw imm
)
381 sljit_s32 length
= 1 + (rex
? 1 : 0) + sizeof(sljit_s32
);
383 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + length
);
389 sljit_unaligned_store_s32(inst
, imm
);
390 return SLJIT_SUCCESS
;
393 static sljit_u8
* emit_x86_instruction(struct sljit_compiler
*compiler
, sljit_s32 size
,
394 /* The register or immediate operand. */
395 sljit_s32 a
, sljit_sw imma
,
396 /* The general operand (not immediate). */
397 sljit_s32 b
, sljit_sw immb
)
402 sljit_s32 flags
= size
& ~0xf;
405 /* The immediate operand must be 32 bit. */
406 SLJIT_ASSERT(!(a
& SLJIT_IMM
) || compiler
->mode32
|| IS_HALFWORD(imma
));
407 /* Both cannot be switched on. */
408 SLJIT_ASSERT((flags
& (EX86_BIN_INS
| EX86_SHIFT_INS
)) != (EX86_BIN_INS
| EX86_SHIFT_INS
));
409 /* Size flags not allowed for typed instructions. */
410 SLJIT_ASSERT(!(flags
& (EX86_BIN_INS
| EX86_SHIFT_INS
)) || (flags
& (EX86_BYTE_ARG
| EX86_HALF_ARG
)) == 0);
411 /* Both size flags cannot be switched on. */
412 SLJIT_ASSERT((flags
& (EX86_BYTE_ARG
| EX86_HALF_ARG
)) != (EX86_BYTE_ARG
| EX86_HALF_ARG
));
413 /* SSE2 and immediate is not possible. */
414 SLJIT_ASSERT(!(a
& SLJIT_IMM
) || !(flags
& EX86_SSE2
));
415 SLJIT_ASSERT((flags
& (EX86_PREF_F2
| EX86_PREF_F3
)) != (EX86_PREF_F2
| EX86_PREF_F3
)
416 && (flags
& (EX86_PREF_F2
| EX86_PREF_66
)) != (EX86_PREF_F2
| EX86_PREF_66
)
417 && (flags
& (EX86_PREF_F3
| EX86_PREF_66
)) != (EX86_PREF_F3
| EX86_PREF_66
));
422 if (!compiler
->mode32
&& !(flags
& EX86_NO_REXW
))
424 else if (flags
& EX86_REX
)
427 if (flags
& (EX86_PREF_F2
| EX86_PREF_F3
))
429 if (flags
& EX86_PREF_66
)
432 /* Calculate size of b. */
433 inst_size
+= 1; /* mod r/m byte. */
435 if (!(b
& OFFS_REG_MASK
)) {
436 if (NOT_HALFWORD(immb
)) {
437 PTR_FAIL_IF(emit_load_imm64(compiler
, TMP_REG2
, immb
));
440 b
|= TO_OFFS_REG(TMP_REG2
);
444 else if (reg_lmap
[b
& REG_MASK
] == 4)
445 b
|= TO_OFFS_REG(SLJIT_SP
);
448 if ((b
& REG_MASK
) == SLJIT_UNUSED
)
449 inst_size
+= 1 + sizeof(sljit_s32
); /* SIB byte required to avoid RIP based addressing. */
451 if (reg_map
[b
& REG_MASK
] >= 8)
454 if (immb
!= 0 && (!(b
& OFFS_REG_MASK
) || (b
& OFFS_REG_MASK
) == TO_OFFS_REG(SLJIT_SP
))) {
455 /* Immediate operand. */
456 if (immb
<= 127 && immb
>= -128)
457 inst_size
+= sizeof(sljit_s8
);
459 inst_size
+= sizeof(sljit_s32
);
461 else if (reg_lmap
[b
& REG_MASK
] == 5)
462 inst_size
+= sizeof(sljit_s8
);
464 if ((b
& OFFS_REG_MASK
) != SLJIT_UNUSED
) {
465 inst_size
+= 1; /* SIB byte. */
466 if (reg_map
[OFFS_REG(b
)] >= 8)
471 else if (!(flags
& EX86_SSE2_OP2
)) {
475 else if (freg_map
[b
] >= 8)
479 if (flags
& EX86_BIN_INS
) {
480 if (imma
<= 127 && imma
>= -128) {
482 flags
|= EX86_BYTE_ARG
;
486 else if (flags
& EX86_SHIFT_INS
) {
487 imma
&= compiler
->mode32
? 0x1f : 0x3f;
490 flags
|= EX86_BYTE_ARG
;
492 } else if (flags
& EX86_BYTE_ARG
)
494 else if (flags
& EX86_HALF_ARG
)
495 inst_size
+= sizeof(short);
497 inst_size
+= sizeof(sljit_s32
);
500 SLJIT_ASSERT(!(flags
& EX86_SHIFT_INS
) || a
== SLJIT_PREF_SHIFT_REG
);
501 /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
502 if (!(flags
& EX86_SSE2_OP1
)) {
506 else if (freg_map
[a
] >= 8)
513 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + inst_size
);
516 /* Encoding the byte. */
518 if (flags
& EX86_PREF_F2
)
520 if (flags
& EX86_PREF_F3
)
522 if (flags
& EX86_PREF_66
)
526 buf_ptr
= inst
+ size
;
528 /* Encode mod/rm byte. */
529 if (!(flags
& EX86_SHIFT_INS
)) {
530 if ((flags
& EX86_BIN_INS
) && (a
& SLJIT_IMM
))
531 *inst
= (flags
& EX86_BYTE_ARG
) ? GROUP_BINARY_83
: GROUP_BINARY_81
;
535 else if (!(flags
& EX86_SSE2_OP1
))
536 *buf_ptr
= reg_lmap
[a
] << 3;
538 *buf_ptr
= freg_lmap
[a
] << 3;
543 *inst
= GROUP_SHIFT_1
;
545 *inst
= GROUP_SHIFT_N
;
547 *inst
= GROUP_SHIFT_CL
;
551 if (!(b
& SLJIT_MEM
))
552 *buf_ptr
++ |= MOD_REG
+ ((!(flags
& EX86_SSE2_OP2
)) ? reg_lmap
[b
] : freg_lmap
[b
]);
553 else if ((b
& REG_MASK
) != SLJIT_UNUSED
) {
554 if ((b
& OFFS_REG_MASK
) == SLJIT_UNUSED
|| (b
& OFFS_REG_MASK
) == TO_OFFS_REG(SLJIT_SP
)) {
555 if (immb
!= 0 || reg_lmap
[b
& REG_MASK
] == 5) {
556 if (immb
<= 127 && immb
>= -128)
562 if ((b
& OFFS_REG_MASK
) == SLJIT_UNUSED
)
563 *buf_ptr
++ |= reg_lmap
[b
& REG_MASK
];
566 *buf_ptr
++ = reg_lmap
[b
& REG_MASK
] | (reg_lmap
[OFFS_REG(b
)] << 3);
569 if (immb
!= 0 || reg_lmap
[b
& REG_MASK
] == 5) {
570 if (immb
<= 127 && immb
>= -128)
571 *buf_ptr
++ = immb
; /* 8 bit displacement. */
573 sljit_unaligned_store_s32(buf_ptr
, immb
); /* 32 bit displacement. */
574 buf_ptr
+= sizeof(sljit_s32
);
579 if (reg_lmap
[b
& REG_MASK
] == 5)
582 *buf_ptr
++ = reg_lmap
[b
& REG_MASK
] | (reg_lmap
[OFFS_REG(b
)] << 3) | (immb
<< 6);
583 if (reg_lmap
[b
& REG_MASK
] == 5)
590 sljit_unaligned_store_s32(buf_ptr
, immb
); /* 32 bit displacement. */
591 buf_ptr
+= sizeof(sljit_s32
);
595 if (flags
& EX86_BYTE_ARG
)
597 else if (flags
& EX86_HALF_ARG
)
598 sljit_unaligned_store_s16(buf_ptr
, imma
);
599 else if (!(flags
& EX86_SHIFT_INS
))
600 sljit_unaligned_store_s32(buf_ptr
, imma
);
603 return !(flags
& EX86_SHIFT_INS
) ? inst
: (inst
+ 1);
606 /* --------------------------------------------------------------------- */
607 /* Call / return instructions */
608 /* --------------------------------------------------------------------- */
612 static sljit_s32
call_with_args(struct sljit_compiler
*compiler
, sljit_s32 arg_types
, sljit_s32
*src_ptr
, sljit_sw srcw
)
614 sljit_s32 src
= src_ptr
? (*src_ptr
) : 0;
615 sljit_s32 word_arg_count
= 0;
617 SLJIT_ASSERT(reg_map
[SLJIT_R1
] == 6 && reg_map
[SLJIT_R3
] == 1 && reg_map
[TMP_REG1
] == 2);
619 compiler
->mode32
= 0;
621 /* Remove return value. */
622 arg_types
>>= SLJIT_DEF_SHIFT
;
625 if ((arg_types
& SLJIT_DEF_MASK
) < SLJIT_ARG_TYPE_F32
)
627 arg_types
>>= SLJIT_DEF_SHIFT
;
630 if (word_arg_count
== 0)
631 return SLJIT_SUCCESS
;
633 if (src
& SLJIT_MEM
) {
634 ADJUST_LOCAL_OFFSET(src
, srcw
);
635 EMIT_MOV(compiler
, TMP_REG2
, 0, src
, srcw
);
638 else if (src
== SLJIT_R2
&& word_arg_count
>= SLJIT_R2
)
641 if (word_arg_count
>= 3)
642 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_R2
, 0);
643 return emit_mov(compiler
, SLJIT_R2
, 0, SLJIT_R0
, 0);
648 static sljit_s32
call_with_args(struct sljit_compiler
*compiler
, sljit_s32 arg_types
, sljit_s32
*src_ptr
, sljit_sw srcw
)
650 sljit_s32 src
= src_ptr
? (*src_ptr
) : 0;
651 sljit_s32 arg_count
= 0;
652 sljit_s32 word_arg_count
= 0;
653 sljit_s32 float_arg_count
= 0;
655 sljit_s32 data_trandfer
= 0;
656 static sljit_u8 word_arg_regs
[5] = { 0, SLJIT_R3
, SLJIT_R1
, SLJIT_R2
, TMP_REG1
};
658 SLJIT_ASSERT(reg_map
[SLJIT_R3
] == 1 && reg_map
[SLJIT_R1
] == 2 && reg_map
[SLJIT_R2
] == 8 && reg_map
[TMP_REG1
] == 9);
660 compiler
->mode32
= 0;
661 arg_types
>>= SLJIT_DEF_SHIFT
;
664 types
= (types
<< SLJIT_DEF_SHIFT
) | (arg_types
& SLJIT_DEF_MASK
);
666 switch (arg_types
& SLJIT_DEF_MASK
) {
667 case SLJIT_ARG_TYPE_F32
:
668 case SLJIT_ARG_TYPE_F64
:
672 if (arg_count
!= float_arg_count
)
679 if (arg_count
!= word_arg_count
|| arg_count
!= word_arg_regs
[arg_count
]) {
682 if (src
== word_arg_regs
[arg_count
]) {
683 EMIT_MOV(compiler
, TMP_REG2
, 0, src
, 0);
690 arg_types
>>= SLJIT_DEF_SHIFT
;
694 return SLJIT_SUCCESS
;
696 if (src
& SLJIT_MEM
) {
697 ADJUST_LOCAL_OFFSET(src
, srcw
);
698 EMIT_MOV(compiler
, TMP_REG2
, 0, src
, srcw
);
703 switch (types
& SLJIT_DEF_MASK
) {
704 case SLJIT_ARG_TYPE_F32
:
705 if (arg_count
!= float_arg_count
)
706 FAIL_IF(emit_sse2_load(compiler
, 1, arg_count
, float_arg_count
, 0));
710 case SLJIT_ARG_TYPE_F64
:
711 if (arg_count
!= float_arg_count
)
712 FAIL_IF(emit_sse2_load(compiler
, 0, arg_count
, float_arg_count
, 0));
717 if (arg_count
!= word_arg_count
|| arg_count
!= word_arg_regs
[arg_count
])
718 EMIT_MOV(compiler
, word_arg_regs
[arg_count
], 0, word_arg_count
, 0);
724 types
>>= SLJIT_DEF_SHIFT
;
727 return SLJIT_SUCCESS
;
732 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_jump
* sljit_emit_call(struct sljit_compiler
*compiler
, sljit_s32 type
,
736 CHECK_PTR(check_sljit_emit_call(compiler
, type
, arg_types
));
738 PTR_FAIL_IF(call_with_args(compiler
, arg_types
, NULL
, 0));
740 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
741 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
742 compiler
->skip_checks
= 1;
745 return sljit_emit_jump(compiler
, type
);
748 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_icall(struct sljit_compiler
*compiler
, sljit_s32 type
,
750 sljit_s32 src
, sljit_sw srcw
)
753 CHECK(check_sljit_emit_icall(compiler
, type
, arg_types
, src
, srcw
));
755 FAIL_IF(call_with_args(compiler
, arg_types
, &src
, srcw
));
757 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
758 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
759 compiler
->skip_checks
= 1;
762 return sljit_emit_ijump(compiler
, type
, src
, srcw
);
765 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fast_enter(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
)
770 CHECK(check_sljit_emit_fast_enter(compiler
, dst
, dstw
));
771 ADJUST_LOCAL_OFFSET(dst
, dstw
);
773 /* For UNUSED dst. Uncommon, but possible. */
774 if (dst
== SLJIT_UNUSED
)
777 if (FAST_IS_REG(dst
)) {
778 if (reg_map
[dst
] < 8) {
779 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1);
782 POP_REG(reg_lmap
[dst
]);
783 return SLJIT_SUCCESS
;
786 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2);
790 POP_REG(reg_lmap
[dst
]);
791 return SLJIT_SUCCESS
;
794 /* REX_W is not necessary (src is not immediate). */
795 compiler
->mode32
= 1;
796 inst
= emit_x86_instruction(compiler
, 1, 0, 0, dst
, dstw
);
799 return SLJIT_SUCCESS
;
802 static sljit_s32
emit_fast_return(struct sljit_compiler
*compiler
, sljit_s32 src
, sljit_sw srcw
)
806 if (FAST_IS_REG(src
)) {
807 if (reg_map
[src
] < 8) {
808 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1 + 1);
812 PUSH_REG(reg_lmap
[src
]);
815 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2 + 1);
820 PUSH_REG(reg_lmap
[src
]);
824 /* REX_W is not necessary (src is not immediate). */
825 compiler
->mode32
= 1;
826 inst
= emit_x86_instruction(compiler
, 1, 0, 0, src
, srcw
);
831 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1);
837 return SLJIT_SUCCESS
;
840 /* --------------------------------------------------------------------- */
842 /* --------------------------------------------------------------------- */
844 static sljit_s32
emit_mov_int(struct sljit_compiler
*compiler
, sljit_s32 sign
,
845 sljit_s32 dst
, sljit_sw dstw
,
846 sljit_s32 src
, sljit_sw srcw
)
851 compiler
->mode32
= 0;
853 if (dst
== SLJIT_UNUSED
&& !(src
& SLJIT_MEM
))
854 return SLJIT_SUCCESS
; /* Empty instruction. */
856 if (src
& SLJIT_IMM
) {
857 if (FAST_IS_REG(dst
)) {
858 if (sign
|| ((sljit_uw
)srcw
<= 0x7fffffff)) {
859 inst
= emit_x86_instruction(compiler
, 1, SLJIT_IMM
, (sljit_sw
)(sljit_s32
)srcw
, dst
, dstw
);
862 return SLJIT_SUCCESS
;
864 return emit_load_imm64(compiler
, dst
, srcw
);
866 compiler
->mode32
= 1;
867 inst
= emit_x86_instruction(compiler
, 1, SLJIT_IMM
, (sljit_sw
)(sljit_s32
)srcw
, dst
, dstw
);
870 compiler
->mode32
= 0;
871 return SLJIT_SUCCESS
;
874 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_REG1
;
876 if ((dst
& SLJIT_MEM
) && FAST_IS_REG(src
))
880 inst
= emit_x86_instruction(compiler
, 1, dst_r
, 0, src
, srcw
);
882 *inst
++ = MOVSXD_r_rm
;
884 compiler
->mode32
= 1;
885 FAIL_IF(emit_mov(compiler
, dst_r
, 0, src
, srcw
));
886 compiler
->mode32
= 0;
890 if (dst
& SLJIT_MEM
) {
891 compiler
->mode32
= 1;
892 inst
= emit_x86_instruction(compiler
, 1, dst_r
, 0, dst
, dstw
);
895 compiler
->mode32
= 0;
898 return SLJIT_SUCCESS
;
901 static sljit_s32
skip_frames_before_return(struct sljit_compiler
*compiler
)
905 /* Don't adjust shadow stack if it isn't enabled. */
906 if (!cpu_has_shadow_stack ())
907 return SLJIT_SUCCESS
;
909 size
= compiler
->local_size
;
910 tmp
= compiler
->scratches
;
911 if (tmp
>= SLJIT_FIRST_SAVED_REG
)
912 size
+= (tmp
- SLJIT_FIRST_SAVED_REG
+ 1) * sizeof(sljit_uw
);
913 tmp
= compiler
->saveds
< SLJIT_NUMBER_OF_SAVED_REGISTERS
? (SLJIT_S0
+ 1 - compiler
->saveds
) : SLJIT_FIRST_SAVED_REG
;
915 size
+= (SLJIT_S0
- tmp
+ 1) * sizeof(sljit_uw
);
917 return adjust_shadow_stack(compiler
, SLJIT_UNUSED
, 0, SLJIT_SP
, size
);