2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* x86 64-bit arch dependent functions. */
29 /* --------------------------------------------------------------------- */
31 /* --------------------------------------------------------------------- */
33 static sljit_s32
emit_load_imm64(struct sljit_compiler
*compiler
, sljit_s32 reg
, sljit_sw imm
)
37 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2 + sizeof(sljit_sw
));
39 INC_SIZE(2 + sizeof(sljit_sw
));
40 *inst
++ = REX_W
| ((reg_map
[reg
] <= 7) ? 0 : REX_B
);
41 *inst
++ = U8(MOV_r_i32
| (reg_map
[reg
] & 0x7));
42 sljit_unaligned_store_sw(inst
, imm
);
46 static sljit_s32
emit_do_imm32(struct sljit_compiler
*compiler
, sljit_u8 rex
, sljit_u8 opcode
, sljit_sw imm
)
49 sljit_uw length
= (rex
? 2 : 1) + sizeof(sljit_s32
);
51 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + length
);
57 sljit_unaligned_store_s32(inst
, (sljit_s32
)imm
);
61 static sljit_u8
* emit_x86_instruction(struct sljit_compiler
*compiler
, sljit_uw size
,
62 /* The register or immediate operand. */
63 sljit_s32 a
, sljit_sw imma
,
64 /* The general operand (not immediate). */
65 sljit_s32 b
, sljit_sw immb
)
71 sljit_uw flags
= size
;
74 /* The immediate operand must be 32 bit. */
75 SLJIT_ASSERT(!(a
& SLJIT_IMM
) || compiler
->mode32
|| IS_HALFWORD(imma
));
76 /* Both cannot be switched on. */
77 SLJIT_ASSERT((flags
& (EX86_BIN_INS
| EX86_SHIFT_INS
)) != (EX86_BIN_INS
| EX86_SHIFT_INS
));
78 /* Size flags not allowed for typed instructions. */
79 SLJIT_ASSERT(!(flags
& (EX86_BIN_INS
| EX86_SHIFT_INS
)) || (flags
& (EX86_BYTE_ARG
| EX86_HALF_ARG
)) == 0);
80 /* Both size flags cannot be switched on. */
81 SLJIT_ASSERT((flags
& (EX86_BYTE_ARG
| EX86_HALF_ARG
)) != (EX86_BYTE_ARG
| EX86_HALF_ARG
));
82 /* SSE2 and immediate is not possible. */
83 SLJIT_ASSERT(!(a
& SLJIT_IMM
) || !(flags
& EX86_SSE2
));
84 SLJIT_ASSERT((flags
& (EX86_PREF_F2
| EX86_PREF_F3
)) != (EX86_PREF_F2
| EX86_PREF_F3
)
85 && (flags
& (EX86_PREF_F2
| EX86_PREF_66
)) != (EX86_PREF_F2
| EX86_PREF_66
)
86 && (flags
& (EX86_PREF_F3
| EX86_PREF_66
)) != (EX86_PREF_F3
| EX86_PREF_66
));
91 if (!compiler
->mode32
&& !(flags
& EX86_NO_REXW
))
93 else if (flags
& EX86_REX
)
96 if (flags
& (EX86_PREF_F2
| EX86_PREF_F3
))
98 if (flags
& EX86_PREF_66
)
101 /* Calculate size of b. */
102 inst_size
+= 1; /* mod r/m byte. */
104 if (!(b
& OFFS_REG_MASK
)) {
105 if (NOT_HALFWORD(immb
)) {
106 PTR_FAIL_IF(emit_load_imm64(compiler
, TMP_REG2
, immb
));
109 b
|= TO_OFFS_REG(TMP_REG2
);
113 else if (reg_lmap
[b
& REG_MASK
] == 4)
114 b
|= TO_OFFS_REG(SLJIT_SP
);
118 inst_size
+= 1 + sizeof(sljit_s32
); /* SIB byte required to avoid RIP based addressing. */
120 if (reg_map
[b
& REG_MASK
] >= 8)
123 if (immb
!= 0 && (!(b
& OFFS_REG_MASK
) || (b
& OFFS_REG_MASK
) == TO_OFFS_REG(SLJIT_SP
))) {
124 /* Immediate operand. */
125 if (immb
<= 127 && immb
>= -128)
126 inst_size
+= sizeof(sljit_s8
);
128 inst_size
+= sizeof(sljit_s32
);
130 else if (reg_lmap
[b
& REG_MASK
] == 5)
131 inst_size
+= sizeof(sljit_s8
);
133 if (b
& OFFS_REG_MASK
) {
134 inst_size
+= 1; /* SIB byte. */
135 if (reg_map
[OFFS_REG(b
)] >= 8)
140 else if (!(flags
& EX86_SSE2_OP2
)) {
144 else if (freg_map
[b
] >= 8)
148 if (flags
& EX86_BIN_INS
) {
149 if (imma
<= 127 && imma
>= -128) {
151 flags
|= EX86_BYTE_ARG
;
155 else if (flags
& EX86_SHIFT_INS
) {
156 imma
&= compiler
->mode32
? 0x1f : 0x3f;
159 flags
|= EX86_BYTE_ARG
;
161 } else if (flags
& EX86_BYTE_ARG
)
163 else if (flags
& EX86_HALF_ARG
)
164 inst_size
+= sizeof(short);
166 inst_size
+= sizeof(sljit_s32
);
169 SLJIT_ASSERT(!(flags
& EX86_SHIFT_INS
) || a
== SLJIT_PREF_SHIFT_REG
);
170 /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
171 if (!(flags
& EX86_SSE2_OP1
)) {
175 else if (freg_map
[a
] >= 8)
182 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + inst_size
);
185 /* Encoding the byte. */
187 if (flags
& EX86_PREF_F2
)
189 if (flags
& EX86_PREF_F3
)
191 if (flags
& EX86_PREF_66
)
195 buf_ptr
= inst
+ size
;
197 /* Encode mod/rm byte. */
198 if (!(flags
& EX86_SHIFT_INS
)) {
199 if ((flags
& EX86_BIN_INS
) && (a
& SLJIT_IMM
))
200 *inst
= (flags
& EX86_BYTE_ARG
) ? GROUP_BINARY_83
: GROUP_BINARY_81
;
204 else if (!(flags
& EX86_SSE2_OP1
))
205 *buf_ptr
= U8(reg_lmap
[a
] << 3);
207 *buf_ptr
= U8(freg_lmap
[a
] << 3);
212 *inst
= GROUP_SHIFT_1
;
214 *inst
= GROUP_SHIFT_N
;
216 *inst
= GROUP_SHIFT_CL
;
220 if (!(b
& SLJIT_MEM
)) {
221 *buf_ptr
= U8(*buf_ptr
| MOD_REG
| (!(flags
& EX86_SSE2_OP2
) ? reg_lmap
[b
] : freg_lmap
[b
]));
223 } else if (b
& REG_MASK
) {
224 reg_lmap_b
= reg_lmap
[b
& REG_MASK
];
226 if (!(b
& OFFS_REG_MASK
) || (b
& OFFS_REG_MASK
) == TO_OFFS_REG(SLJIT_SP
) || reg_lmap_b
== 5) {
227 if (immb
!= 0 || reg_lmap_b
== 5) {
228 if (immb
<= 127 && immb
>= -128)
234 if (!(b
& OFFS_REG_MASK
))
235 *buf_ptr
++ |= reg_lmap_b
;
238 *buf_ptr
++ = U8(reg_lmap_b
| (reg_lmap
[OFFS_REG(b
)] << 3));
241 if (immb
!= 0 || reg_lmap_b
== 5) {
242 if (immb
<= 127 && immb
>= -128)
243 *buf_ptr
++ = U8(immb
); /* 8 bit displacement. */
245 sljit_unaligned_store_s32(buf_ptr
, (sljit_s32
)immb
); /* 32 bit displacement. */
246 buf_ptr
+= sizeof(sljit_s32
);
252 *buf_ptr
++ = U8(reg_lmap_b
| (reg_lmap
[OFFS_REG(b
)] << 3) | (immb
<< 6));
258 sljit_unaligned_store_s32(buf_ptr
, (sljit_s32
)immb
); /* 32 bit displacement. */
259 buf_ptr
+= sizeof(sljit_s32
);
263 if (flags
& EX86_BYTE_ARG
)
265 else if (flags
& EX86_HALF_ARG
)
266 sljit_unaligned_store_s16(buf_ptr
, (sljit_s16
)imma
);
267 else if (!(flags
& EX86_SHIFT_INS
))
268 sljit_unaligned_store_s32(buf_ptr
, (sljit_s32
)imma
);
271 return !(flags
& EX86_SHIFT_INS
) ? inst
: (inst
+ 1);
274 /* --------------------------------------------------------------------- */
276 /* --------------------------------------------------------------------- */
278 static sljit_u8
* generate_far_jump_code(struct sljit_jump
*jump
, sljit_u8
*code_ptr
)
280 sljit_uw type
= jump
->flags
>> TYPE_SHIFT
;
282 int short_addr
= !(jump
->flags
& SLJIT_REWRITABLE_JUMP
) && !(jump
->flags
& JUMP_LABEL
) && (jump
->u
.target
<= 0xffffffff);
284 /* The relative jump below specialized for this case. */
285 SLJIT_ASSERT(reg_map
[TMP_REG2
] >= 8);
287 if (type
< SLJIT_JUMP
) {
289 *code_ptr
++ = U8(get_jump_code(type
^ 0x1) - 0x10);
290 *code_ptr
++ = short_addr
? (6 + 3) : (10 + 3);
293 *code_ptr
++ = short_addr
? REX_B
: (REX_W
| REX_B
);
294 *code_ptr
++ = MOV_r_i32
| reg_lmap
[TMP_REG2
];
295 jump
->addr
= (sljit_uw
)code_ptr
;
297 if (jump
->flags
& JUMP_LABEL
)
298 jump
->flags
|= PATCH_MD
;
300 sljit_unaligned_store_s32(code_ptr
, (sljit_s32
)jump
->u
.target
);
302 sljit_unaligned_store_sw(code_ptr
, (sljit_sw
)jump
->u
.target
);
304 code_ptr
+= short_addr
? sizeof(sljit_s32
) : sizeof(sljit_sw
);
307 *code_ptr
++ = GROUP_FF
;
308 *code_ptr
++ = U8(MOD_REG
| (type
>= SLJIT_FAST_CALL
? CALL_rm
: JMP_rm
) | reg_lmap
[TMP_REG2
]);
313 static sljit_u8
* generate_put_label_code(struct sljit_put_label
*put_label
, sljit_u8
*code_ptr
, sljit_uw max_label
)
315 if (max_label
> HALFWORD_MAX
) {
316 put_label
->addr
-= put_label
->flags
;
317 put_label
->flags
= PATCH_MD
;
321 if (put_label
->flags
== 0) {
322 /* Destination is register. */
323 code_ptr
= (sljit_u8
*)put_label
->addr
- 2 - sizeof(sljit_uw
);
325 SLJIT_ASSERT((code_ptr
[0] & 0xf8) == REX_W
);
326 SLJIT_ASSERT((code_ptr
[1] & 0xf8) == MOV_r_i32
);
328 if ((code_ptr
[0] & 0x07) != 0) {
329 code_ptr
[0] = U8(code_ptr
[0] & ~0x08);
330 code_ptr
+= 2 + sizeof(sljit_s32
);
333 code_ptr
[0] = code_ptr
[1];
334 code_ptr
+= 1 + sizeof(sljit_s32
);
337 put_label
->addr
= (sljit_uw
)code_ptr
;
341 code_ptr
-= put_label
->flags
+ (2 + sizeof(sljit_uw
));
342 SLJIT_MEMMOVE(code_ptr
, code_ptr
+ (2 + sizeof(sljit_uw
)), put_label
->flags
);
344 SLJIT_ASSERT((code_ptr
[0] & 0xf8) == REX_W
);
346 if ((code_ptr
[1] & 0xf8) == MOV_r_i32
) {
347 code_ptr
+= 2 + sizeof(sljit_uw
);
348 SLJIT_ASSERT((code_ptr
[0] & 0xf8) == REX_W
);
351 SLJIT_ASSERT(code_ptr
[1] == MOV_rm_r
);
353 code_ptr
[0] = U8(code_ptr
[0] & ~0x4);
354 code_ptr
[1] = MOV_rm_i32
;
355 code_ptr
[2] = U8(code_ptr
[2] & ~(0x7 << 3));
357 code_ptr
= (sljit_u8
*)(put_label
->addr
- (2 + sizeof(sljit_uw
)) + sizeof(sljit_s32
));
358 put_label
->addr
= (sljit_uw
)code_ptr
;
359 put_label
->flags
= 0;
363 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_enter(struct sljit_compiler
*compiler
,
364 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
365 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
368 sljit_s32 word_arg_count
= 0;
369 sljit_s32 saved_arg_count
= 0;
370 sljit_s32 saved_regs_size
, tmp
, i
;
372 sljit_s32 saved_float_regs_size
;
373 sljit_s32 saved_float_regs_offset
= 0;
374 sljit_s32 float_arg_count
= 0;
379 CHECK(check_sljit_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
380 set_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
382 /* Emit ENDBR64 at function entry if needed. */
383 FAIL_IF(emit_endbranch(compiler
));
385 compiler
->mode32
= 0;
387 /* Including the return address saved by the call instruction. */
388 saved_regs_size
= GET_SAVED_REGISTERS_SIZE(scratches
, saveds
, 1);
390 tmp
= SLJIT_S0
- saveds
;
391 for (i
= SLJIT_S0
; i
> tmp
; i
--) {
392 size
= reg_map
[i
] >= 8 ? 2 : 1;
393 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
398 PUSH_REG(reg_lmap
[i
]);
401 for (i
= scratches
; i
>= SLJIT_FIRST_SAVED_REG
; i
--) {
402 size
= reg_map
[i
] >= 8 ? 2 : 1;
403 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
408 PUSH_REG(reg_lmap
[i
]);
412 local_size
+= SLJIT_LOCALS_OFFSET
;
413 saved_float_regs_size
= GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches
, fsaveds
, 16);
415 if (saved_float_regs_size
> 0) {
416 saved_float_regs_offset
= ((local_size
+ 0xf) & ~0xf);
417 local_size
= saved_float_regs_offset
+ saved_float_regs_size
;
420 SLJIT_ASSERT(SLJIT_LOCALS_OFFSET
== 0);
423 arg_types
>>= SLJIT_ARG_SHIFT
;
425 while (arg_types
> 0) {
426 if ((arg_types
& SLJIT_ARG_MASK
) < SLJIT_ARG_TYPE_F64
) {
429 switch (word_arg_count
) {
444 switch (word_arg_count
+ float_arg_count
) {
459 if (arg_types
& SLJIT_ARG_TYPE_SCRATCH_REG
) {
460 if (tmp
!= SLJIT_R0
+ word_arg_count
)
461 EMIT_MOV(compiler
, SLJIT_R0
+ word_arg_count
, 0, tmp
, 0);
463 EMIT_MOV(compiler
, SLJIT_S0
- saved_arg_count
, 0, tmp
, 0);
469 SLJIT_COMPILE_ASSERT(SLJIT_FR0
== 1, float_register_index_start
);
471 if (float_arg_count
!= float_arg_count
+ word_arg_count
)
472 FAIL_IF(emit_sse2_load(compiler
, (arg_types
& SLJIT_ARG_MASK
) == SLJIT_ARG_TYPE_F32
,
473 float_arg_count
, float_arg_count
+ word_arg_count
, 0));
476 arg_types
>>= SLJIT_ARG_SHIFT
;
479 local_size
= ((local_size
+ saved_regs_size
+ 0xf) & ~0xf) - saved_regs_size
;
480 compiler
->local_size
= local_size
;
483 if (local_size
> 0) {
484 if (local_size
<= 4 * 4096) {
485 if (local_size
> 4096)
486 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -4096);
487 if (local_size
> 2 * 4096)
488 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -4096 * 2);
489 if (local_size
> 3 * 4096)
490 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -4096 * 3);
493 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_IMM
, local_size
>> 12);
495 EMIT_MOV(compiler
, TMP_REG2
, 0, SLJIT_MEM1(SLJIT_SP
), -4096);
496 BINARY_IMM32(SUB
, 4096, SLJIT_SP
, 0);
497 BINARY_IMM32(SUB
, 1, TMP_REG1
, 0);
499 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2);
504 inst
[1] = (sljit_u8
)-21;
509 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_MEM1(SLJIT_SP
), -local_size
);
514 BINARY_IMM32(SUB
, local_size
, SLJIT_SP
, 0);
517 if (saved_float_regs_size
> 0) {
518 compiler
->mode32
= 1;
520 tmp
= SLJIT_FS0
- fsaveds
;
521 for (i
= SLJIT_FS0
; i
> tmp
; i
--) {
522 inst
= emit_x86_instruction(compiler
, 2 | EX86_SSE2
, i
, 0, SLJIT_MEM1(SLJIT_SP
), saved_float_regs_offset
);
525 saved_float_regs_offset
+= 16;
528 for (i
= fscratches
; i
>= SLJIT_FIRST_SAVED_FLOAT_REG
; i
--) {
529 inst
= emit_x86_instruction(compiler
, 2 | EX86_SSE2
, i
, 0, SLJIT_MEM1(SLJIT_SP
), saved_float_regs_offset
);
532 saved_float_regs_offset
+= 16;
537 return SLJIT_SUCCESS
;
540 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_set_context(struct sljit_compiler
*compiler
,
541 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
542 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
544 sljit_s32 saved_regs_size
;
546 sljit_s32 saved_float_regs_size
;
550 CHECK(check_sljit_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
551 set_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
554 local_size
+= SLJIT_LOCALS_OFFSET
;
555 saved_float_regs_size
= GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches
, fsaveds
, 16);
557 if (saved_float_regs_size
> 0)
558 local_size
= ((local_size
+ 0xf) & ~0xf) + saved_float_regs_size
;
560 SLJIT_ASSERT(SLJIT_LOCALS_OFFSET
== 0);
563 /* Including the return address saved by the call instruction. */
564 saved_regs_size
= GET_SAVED_REGISTERS_SIZE(scratches
, saveds
, 1);
565 compiler
->local_size
= ((local_size
+ saved_regs_size
+ 0xf) & ~0xf) - saved_regs_size
;
566 return SLJIT_SUCCESS
;
569 static sljit_s32
emit_stack_frame_release(struct sljit_compiler
*compiler
)
575 sljit_s32 saved_float_regs_offset
;
576 sljit_s32 fscratches
= compiler
->fscratches
;
577 sljit_s32 fsaveds
= compiler
->fsaveds
;
581 saved_float_regs_offset
= GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches
, fsaveds
, 16);
583 if (saved_float_regs_offset
> 0) {
584 compiler
->mode32
= 1;
585 saved_float_regs_offset
= (compiler
->local_size
- saved_float_regs_offset
) & ~0xf;
587 tmp
= SLJIT_FS0
- fsaveds
;
588 for (i
= SLJIT_FS0
; i
> tmp
; i
--) {
589 inst
= emit_x86_instruction(compiler
, 2 | EX86_SSE2
, i
, 0, SLJIT_MEM1(SLJIT_SP
), saved_float_regs_offset
);
592 saved_float_regs_offset
+= 16;
595 for (i
= fscratches
; i
>= SLJIT_FIRST_SAVED_FLOAT_REG
; i
--) {
596 inst
= emit_x86_instruction(compiler
, 2 | EX86_SSE2
, i
, 0, SLJIT_MEM1(SLJIT_SP
), saved_float_regs_offset
);
599 saved_float_regs_offset
+= 16;
604 if (compiler
->local_size
> 0) {
605 if (compiler
->local_size
<= 127) {
606 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 4);
610 *inst
++ = GROUP_BINARY_83
;
611 *inst
++ = MOD_REG
| ADD
| 4;
612 *inst
= U8(compiler
->local_size
);
615 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 7);
619 *inst
++ = GROUP_BINARY_81
;
620 *inst
++ = MOD_REG
| ADD
| 4;
621 sljit_unaligned_store_s32(inst
, compiler
->local_size
);
625 tmp
= compiler
->scratches
;
626 for (i
= SLJIT_FIRST_SAVED_REG
; i
<= tmp
; i
++) {
627 size
= reg_map
[i
] >= 8 ? 2 : 1;
628 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
633 POP_REG(reg_lmap
[i
]);
636 tmp
= compiler
->saveds
< SLJIT_NUMBER_OF_SAVED_REGISTERS
? (SLJIT_S0
+ 1 - compiler
->saveds
) : SLJIT_FIRST_SAVED_REG
;
637 for (i
= tmp
; i
<= SLJIT_S0
; i
++) {
638 size
= reg_map
[i
] >= 8 ? 2 : 1;
639 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + size
);
644 POP_REG(reg_lmap
[i
]);
647 return SLJIT_SUCCESS
;
650 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_return_void(struct sljit_compiler
*compiler
)
655 CHECK(check_sljit_emit_return_void(compiler
));
657 FAIL_IF(emit_stack_frame_release(compiler
));
659 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1);
663 return SLJIT_SUCCESS
;
666 /* --------------------------------------------------------------------- */
667 /* Call / return instructions */
668 /* --------------------------------------------------------------------- */
672 static sljit_s32
call_with_args(struct sljit_compiler
*compiler
, sljit_s32 arg_types
, sljit_s32
*src_ptr
)
674 sljit_s32 src
= src_ptr
? (*src_ptr
) : 0;
675 sljit_s32 word_arg_count
= 0;
677 SLJIT_ASSERT(reg_map
[SLJIT_R1
] == 6 && reg_map
[SLJIT_R3
] == 1 && reg_map
[TMP_REG1
] == 2);
678 SLJIT_ASSERT(!(src
& SLJIT_MEM
));
680 /* Remove return value. */
681 arg_types
>>= SLJIT_ARG_SHIFT
;
684 if ((arg_types
& SLJIT_ARG_MASK
) < SLJIT_ARG_TYPE_F64
)
686 arg_types
>>= SLJIT_ARG_SHIFT
;
689 if (word_arg_count
== 0)
690 return SLJIT_SUCCESS
;
692 if (word_arg_count
>= 3) {
695 EMIT_MOV(compiler
, TMP_REG1
, 0, SLJIT_R2
, 0);
698 return emit_mov(compiler
, SLJIT_R2
, 0, SLJIT_R0
, 0);
703 static sljit_s32
call_with_args(struct sljit_compiler
*compiler
, sljit_s32 arg_types
, sljit_s32
*src_ptr
)
705 sljit_s32 src
= src_ptr
? (*src_ptr
) : 0;
706 sljit_s32 arg_count
= 0;
707 sljit_s32 word_arg_count
= 0;
708 sljit_s32 float_arg_count
= 0;
710 sljit_s32 data_trandfer
= 0;
711 static sljit_u8 word_arg_regs
[5] = { 0, SLJIT_R3
, SLJIT_R1
, SLJIT_R2
, TMP_REG1
};
713 SLJIT_ASSERT(reg_map
[SLJIT_R3
] == 1 && reg_map
[SLJIT_R1
] == 2 && reg_map
[SLJIT_R2
] == 8 && reg_map
[TMP_REG1
] == 9);
714 SLJIT_ASSERT(!(src
& SLJIT_MEM
));
716 arg_types
>>= SLJIT_ARG_SHIFT
;
719 types
= (types
<< SLJIT_ARG_SHIFT
) | (arg_types
& SLJIT_ARG_MASK
);
721 switch (arg_types
& SLJIT_ARG_MASK
) {
722 case SLJIT_ARG_TYPE_F64
:
723 case SLJIT_ARG_TYPE_F32
:
727 if (arg_count
!= float_arg_count
)
734 if (arg_count
!= word_arg_count
|| arg_count
!= word_arg_regs
[arg_count
]) {
737 if (src
== word_arg_regs
[arg_count
]) {
738 EMIT_MOV(compiler
, TMP_REG2
, 0, src
, 0);
745 arg_types
>>= SLJIT_ARG_SHIFT
;
749 return SLJIT_SUCCESS
;
752 switch (types
& SLJIT_ARG_MASK
) {
753 case SLJIT_ARG_TYPE_F64
:
754 if (arg_count
!= float_arg_count
)
755 FAIL_IF(emit_sse2_load(compiler
, 0, arg_count
, float_arg_count
, 0));
759 case SLJIT_ARG_TYPE_F32
:
760 if (arg_count
!= float_arg_count
)
761 FAIL_IF(emit_sse2_load(compiler
, 1, arg_count
, float_arg_count
, 0));
766 if (arg_count
!= word_arg_count
|| arg_count
!= word_arg_regs
[arg_count
])
767 EMIT_MOV(compiler
, word_arg_regs
[arg_count
], 0, word_arg_count
, 0);
773 types
>>= SLJIT_ARG_SHIFT
;
776 return SLJIT_SUCCESS
;
781 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_jump
* sljit_emit_call(struct sljit_compiler
*compiler
, sljit_s32 type
,
785 CHECK_PTR(check_sljit_emit_call(compiler
, type
, arg_types
));
787 compiler
->mode32
= 0;
789 PTR_FAIL_IF(call_with_args(compiler
, arg_types
, NULL
));
791 if (type
& SLJIT_CALL_RETURN
) {
792 PTR_FAIL_IF(emit_stack_frame_release(compiler
));
793 type
= SLJIT_JUMP
| (type
& SLJIT_REWRITABLE_JUMP
);
796 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
797 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
798 compiler
->skip_checks
= 1;
800 return sljit_emit_jump(compiler
, type
);
803 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_icall(struct sljit_compiler
*compiler
, sljit_s32 type
,
805 sljit_s32 src
, sljit_sw srcw
)
808 CHECK(check_sljit_emit_icall(compiler
, type
, arg_types
, src
, srcw
));
810 compiler
->mode32
= 0;
812 if (src
& SLJIT_MEM
) {
813 ADJUST_LOCAL_OFFSET(src
, srcw
);
814 EMIT_MOV(compiler
, TMP_REG2
, 0, src
, srcw
);
818 if (type
& SLJIT_CALL_RETURN
) {
819 if (src
>= SLJIT_FIRST_SAVED_REG
&& src
<= SLJIT_S0
) {
820 EMIT_MOV(compiler
, TMP_REG2
, 0, src
, srcw
);
824 FAIL_IF(emit_stack_frame_release(compiler
));
828 FAIL_IF(call_with_args(compiler
, arg_types
, &src
));
830 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
831 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
832 compiler
->skip_checks
= 1;
835 return sljit_emit_ijump(compiler
, type
, src
, srcw
);
838 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fast_enter(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
)
843 CHECK(check_sljit_emit_fast_enter(compiler
, dst
, dstw
));
844 ADJUST_LOCAL_OFFSET(dst
, dstw
);
846 if (FAST_IS_REG(dst
)) {
847 if (reg_map
[dst
] < 8) {
848 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1);
851 POP_REG(reg_lmap
[dst
]);
852 return SLJIT_SUCCESS
;
855 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2);
859 POP_REG(reg_lmap
[dst
]);
860 return SLJIT_SUCCESS
;
863 /* REX_W is not necessary (src is not immediate). */
864 compiler
->mode32
= 1;
865 inst
= emit_x86_instruction(compiler
, 1, 0, 0, dst
, dstw
);
868 return SLJIT_SUCCESS
;
871 static sljit_s32
emit_fast_return(struct sljit_compiler
*compiler
, sljit_s32 src
, sljit_sw srcw
)
875 if (FAST_IS_REG(src
)) {
876 if (reg_map
[src
] < 8) {
877 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1 + 1);
881 PUSH_REG(reg_lmap
[src
]);
884 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 2 + 1);
889 PUSH_REG(reg_lmap
[src
]);
893 /* REX_W is not necessary (src is not immediate). */
894 compiler
->mode32
= 1;
895 inst
= emit_x86_instruction(compiler
, 1, 0, 0, src
, srcw
);
900 inst
= (sljit_u8
*)ensure_buf(compiler
, 1 + 1);
906 return SLJIT_SUCCESS
;
909 /* --------------------------------------------------------------------- */
911 /* --------------------------------------------------------------------- */
913 static sljit_s32
emit_mov_int(struct sljit_compiler
*compiler
, sljit_s32 sign
,
914 sljit_s32 dst
, sljit_sw dstw
,
915 sljit_s32 src
, sljit_sw srcw
)
920 compiler
->mode32
= 0;
922 if (src
& SLJIT_IMM
) {
923 if (FAST_IS_REG(dst
)) {
924 if (sign
|| ((sljit_uw
)srcw
<= 0x7fffffff)) {
925 inst
= emit_x86_instruction(compiler
, 1, SLJIT_IMM
, (sljit_sw
)(sljit_s32
)srcw
, dst
, dstw
);
928 return SLJIT_SUCCESS
;
930 return emit_load_imm64(compiler
, dst
, srcw
);
932 compiler
->mode32
= 1;
933 inst
= emit_x86_instruction(compiler
, 1, SLJIT_IMM
, (sljit_sw
)(sljit_s32
)srcw
, dst
, dstw
);
936 compiler
->mode32
= 0;
937 return SLJIT_SUCCESS
;
940 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_REG1
;
942 if ((dst
& SLJIT_MEM
) && FAST_IS_REG(src
))
946 inst
= emit_x86_instruction(compiler
, 1, dst_r
, 0, src
, srcw
);
948 *inst
++ = MOVSXD_r_rm
;
950 compiler
->mode32
= 1;
951 FAIL_IF(emit_mov(compiler
, dst_r
, 0, src
, srcw
));
952 compiler
->mode32
= 0;
956 if (dst
& SLJIT_MEM
) {
957 compiler
->mode32
= 1;
958 inst
= emit_x86_instruction(compiler
, 1, dst_r
, 0, dst
, dstw
);
961 compiler
->mode32
= 0;
964 return SLJIT_SUCCESS
;
967 static sljit_s32
skip_frames_before_return(struct sljit_compiler
*compiler
)
971 /* Don't adjust shadow stack if it isn't enabled. */
972 if (!cpu_has_shadow_stack())
973 return SLJIT_SUCCESS
;
975 size
= compiler
->local_size
;
976 tmp
= compiler
->scratches
;
977 if (tmp
>= SLJIT_FIRST_SAVED_REG
)
978 size
+= (tmp
- SLJIT_FIRST_SAVED_REG
+ 1) * SSIZE_OF(sw
);
979 tmp
= compiler
->saveds
< SLJIT_NUMBER_OF_SAVED_REGISTERS
? (SLJIT_S0
+ 1 - compiler
->saveds
) : SLJIT_FIRST_SAVED_REG
;
981 size
+= (SLJIT_S0
- tmp
+ 1) * SSIZE_OF(sw
);
983 return adjust_shadow_stack(compiler
, SLJIT_MEM1(SLJIT_SP
), size
);