2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #define ARM_ABI_INFO " ABI:softfp"
30 #define ARM_ABI_INFO " ABI:hardfp"
33 SLJIT_API_FUNC_ATTRIBUTE
const char* sljit_get_platform_name(void)
35 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
36 return "ARMv7" SLJIT_CPUINFO ARM_ABI_INFO
;
37 #elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
38 return "ARMv5" SLJIT_CPUINFO ARM_ABI_INFO
;
40 #error "Internal error: Unknown ARM architecture"
44 /* Last register + 1. */
45 #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
46 #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
47 #define TMP_PC (SLJIT_NUMBER_OF_REGISTERS + 4)
49 #define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
50 #define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
52 /* In ARM instruction words.
53 Cache lines are usually 32 byte aligned. */
54 #define CONST_POOL_ALIGNMENT 8
55 #define CONST_POOL_EMPTY 0xffffffff
57 #define ALIGN_INSTRUCTION(ptr) \
58 (sljit_uw*)(((sljit_uw)(ptr) + (CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1) & ~((CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1))
59 #define MAX_DIFFERENCE(max_diff) \
60 (((max_diff) / (sljit_s32)sizeof(sljit_uw)) - (CONST_POOL_ALIGNMENT - 1))
62 /* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */
63 static const sljit_u8 reg_map
[SLJIT_NUMBER_OF_REGISTERS
+ 5] = {
64 0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15
67 static const sljit_u8 freg_map
[SLJIT_NUMBER_OF_FLOAT_REGISTERS
+ 3] = {
68 0, 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8, 6, 7
71 #define RM(rm) ((sljit_uw)reg_map[rm])
72 #define RM8(rm) ((sljit_uw)reg_map[rm] << 8)
73 #define RD(rd) ((sljit_uw)reg_map[rd] << 12)
74 #define RN(rn) ((sljit_uw)reg_map[rn] << 16)
76 #define VM(rm) ((sljit_uw)freg_map[rm])
77 #define VD(rd) ((sljit_uw)freg_map[rd] << 12)
78 #define VN(rn) ((sljit_uw)freg_map[rn] << 16)
80 /* --------------------------------------------------------------------- */
81 /* Instrucion forms */
82 /* --------------------------------------------------------------------- */
84 /* The instruction includes the AL condition.
85 INST_NAME - CONDITIONAL remove this flag. */
86 #define COND_MASK 0xf0000000
87 #define CONDITIONAL 0xe0000000
88 #define PUSH_POOL 0xff000000
90 #define ADC 0xe0a00000
91 #define ADD 0xe0800000
92 #define AND 0xe0000000
94 #define BIC 0xe1c00000
96 #define BLX 0xe12fff30
98 #define CLZ 0xe16f0f10
99 #define CMN 0xe1600000
100 #define CMP 0xe1400000
101 #define BKPT 0xe1200070
102 #define EOR 0xe0200000
103 #define LDR 0xe5100000
104 #define MOV 0xe1a00000
105 #define MUL 0xe0000090
106 #define MVN 0xe1e00000
107 #define NOP 0xe1a00000
108 #define ORR 0xe1800000
109 #define PUSH 0xe92d0000
110 #define POP 0xe8bd0000
111 #define RSB 0xe0600000
112 #define RSC 0xe0e00000
113 #define SBC 0xe0c00000
114 #define SMULL 0xe0c00090
115 #define STR 0xe5000000
116 #define SUB 0xe0400000
117 #define TST 0xe1000000
118 #define UMULL 0xe0800090
119 #define VABS_F32 0xeeb00ac0
120 #define VADD_F32 0xee300a00
121 #define VCMP_F32 0xeeb40a40
122 #define VCVT_F32_S32 0xeeb80ac0
123 #define VCVT_F64_F32 0xeeb70ac0
124 #define VCVT_S32_F32 0xeebd0ac0
125 #define VDIV_F32 0xee800a00
126 #define VLDR_F32 0xed100a00
127 #define VMOV_F32 0xeeb00a40
128 #define VMOV 0xee000a10
129 #define VMOV2 0xec400a10
130 #define VMRS 0xeef1fa10
131 #define VMUL_F32 0xee200a00
132 #define VNEG_F32 0xeeb10a40
133 #define VPOP 0xecbd0b00
134 #define VPUSH 0xed2d0b00
135 #define VSTR_F32 0xed000a00
136 #define VSUB_F32 0xee300a40
138 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
139 /* Arm v7 specific instructions. */
140 #define MOVW 0xe3000000
141 #define MOVT 0xe3400000
142 #define SXTB 0xe6af0070
143 #define SXTH 0xe6bf0070
144 #define UXTB 0xe6ef0070
145 #define UXTH 0xe6ff0070
148 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
150 static sljit_s32
push_cpool(struct sljit_compiler
*compiler
)
152 /* Pushing the constant pool into the instruction stream. */
158 /* The label could point the address after the constant pool. */
159 if (compiler
->last_label
&& compiler
->last_label
->size
== compiler
->size
)
160 compiler
->last_label
->size
+= compiler
->cpool_fill
+ (CONST_POOL_ALIGNMENT
- 1) + 1;
162 SLJIT_ASSERT(compiler
->cpool_fill
> 0 && compiler
->cpool_fill
<= CPOOL_SIZE
);
163 inst
= (sljit_uw
*)ensure_buf(compiler
, sizeof(sljit_uw
));
166 *inst
= 0xff000000 | compiler
->cpool_fill
;
168 for (i
= 0; i
< CONST_POOL_ALIGNMENT
- 1; i
++) {
169 inst
= (sljit_uw
*)ensure_buf(compiler
, sizeof(sljit_uw
));
175 cpool_ptr
= compiler
->cpool
;
176 cpool_end
= cpool_ptr
+ compiler
->cpool_fill
;
177 while (cpool_ptr
< cpool_end
) {
178 inst
= (sljit_uw
*)ensure_buf(compiler
, sizeof(sljit_uw
));
181 *inst
= *cpool_ptr
++;
183 compiler
->cpool_diff
= CONST_POOL_EMPTY
;
184 compiler
->cpool_fill
= 0;
185 return SLJIT_SUCCESS
;
188 static sljit_s32
push_inst(struct sljit_compiler
*compiler
, sljit_uw inst
)
192 if (SLJIT_UNLIKELY(compiler
->cpool_diff
!= CONST_POOL_EMPTY
&& compiler
->size
- compiler
->cpool_diff
>= MAX_DIFFERENCE(4092)))
193 FAIL_IF(push_cpool(compiler
));
195 ptr
= (sljit_uw
*)ensure_buf(compiler
, sizeof(sljit_uw
));
199 return SLJIT_SUCCESS
;
202 static sljit_s32
push_inst_with_literal(struct sljit_compiler
*compiler
, sljit_uw inst
, sljit_uw literal
)
205 sljit_uw cpool_index
= CPOOL_SIZE
;
208 sljit_u8
* cpool_unique_ptr
;
210 if (SLJIT_UNLIKELY(compiler
->cpool_diff
!= CONST_POOL_EMPTY
&& compiler
->size
- compiler
->cpool_diff
>= MAX_DIFFERENCE(4092)))
211 FAIL_IF(push_cpool(compiler
));
212 else if (compiler
->cpool_fill
> 0) {
213 cpool_ptr
= compiler
->cpool
;
214 cpool_end
= cpool_ptr
+ compiler
->cpool_fill
;
215 cpool_unique_ptr
= compiler
->cpool_unique
;
217 if ((*cpool_ptr
== literal
) && !(*cpool_unique_ptr
)) {
218 cpool_index
= (sljit_uw
)(cpool_ptr
- compiler
->cpool
);
223 } while (cpool_ptr
< cpool_end
);
226 if (cpool_index
== CPOOL_SIZE
) {
227 /* Must allocate a new entry in the literal pool. */
228 if (compiler
->cpool_fill
< CPOOL_SIZE
) {
229 cpool_index
= compiler
->cpool_fill
;
230 compiler
->cpool_fill
++;
233 FAIL_IF(push_cpool(compiler
));
235 compiler
->cpool_fill
= 1;
239 SLJIT_ASSERT((inst
& 0xfff) == 0);
240 ptr
= (sljit_uw
*)ensure_buf(compiler
, sizeof(sljit_uw
));
243 *ptr
= inst
| cpool_index
;
245 compiler
->cpool
[cpool_index
] = literal
;
246 compiler
->cpool_unique
[cpool_index
] = 0;
247 if (compiler
->cpool_diff
== CONST_POOL_EMPTY
)
248 compiler
->cpool_diff
= compiler
->size
;
249 return SLJIT_SUCCESS
;
252 static sljit_s32
push_inst_with_unique_literal(struct sljit_compiler
*compiler
, sljit_uw inst
, sljit_uw literal
)
255 if (SLJIT_UNLIKELY((compiler
->cpool_diff
!= CONST_POOL_EMPTY
&& compiler
->size
- compiler
->cpool_diff
>= MAX_DIFFERENCE(4092)) || compiler
->cpool_fill
>= CPOOL_SIZE
))
256 FAIL_IF(push_cpool(compiler
));
258 SLJIT_ASSERT(compiler
->cpool_fill
< CPOOL_SIZE
&& (inst
& 0xfff) == 0);
259 ptr
= (sljit_uw
*)ensure_buf(compiler
, sizeof(sljit_uw
));
262 *ptr
= inst
| compiler
->cpool_fill
;
264 compiler
->cpool
[compiler
->cpool_fill
] = literal
;
265 compiler
->cpool_unique
[compiler
->cpool_fill
] = 1;
266 compiler
->cpool_fill
++;
267 if (compiler
->cpool_diff
== CONST_POOL_EMPTY
)
268 compiler
->cpool_diff
= compiler
->size
;
269 return SLJIT_SUCCESS
;
272 static SLJIT_INLINE sljit_s32
prepare_blx(struct sljit_compiler
*compiler
)
274 /* Place for at least two instruction (doesn't matter whether the first has a literal). */
275 if (SLJIT_UNLIKELY(compiler
->cpool_diff
!= CONST_POOL_EMPTY
&& compiler
->size
- compiler
->cpool_diff
>= MAX_DIFFERENCE(4088)))
276 return push_cpool(compiler
);
277 return SLJIT_SUCCESS
;
280 static SLJIT_INLINE sljit_s32
emit_blx(struct sljit_compiler
*compiler
)
282 /* Must follow tightly the previous instruction (to be able to convert it to bl instruction). */
283 SLJIT_ASSERT(compiler
->cpool_diff
== CONST_POOL_EMPTY
|| compiler
->size
- compiler
->cpool_diff
< MAX_DIFFERENCE(4092));
284 SLJIT_ASSERT(reg_map
[TMP_REG1
] != 14);
286 return push_inst(compiler
, BLX
| RM(TMP_REG1
));
289 static sljit_uw
patch_pc_relative_loads(sljit_uw
*last_pc_patch
, sljit_uw
*code_ptr
, sljit_uw
* const_pool
, sljit_uw cpool_size
)
293 sljit_uw counter
= 0;
294 sljit_uw
* clear_const_pool
= const_pool
;
295 sljit_uw
* clear_const_pool_end
= const_pool
+ cpool_size
;
297 SLJIT_ASSERT(const_pool
- code_ptr
<= CONST_POOL_ALIGNMENT
);
298 /* Set unused flag for all literals in the constant pool.
299 I.e.: unused literals can belong to branches, which can be encoded as B or BL.
300 We can "compress" the constant pool by discarding these literals. */
301 while (clear_const_pool
< clear_const_pool_end
)
302 *clear_const_pool
++ = (sljit_uw
)(-1);
304 while (last_pc_patch
< code_ptr
) {
305 /* Data transfer instruction with Rn == r15. */
306 if ((*last_pc_patch
& 0x0c0f0000) == 0x040f0000) {
307 diff
= (sljit_uw
)(const_pool
- last_pc_patch
);
308 ind
= (*last_pc_patch
) & 0xfff;
310 /* Must be a load instruction with immediate offset. */
311 SLJIT_ASSERT(ind
< cpool_size
&& !(*last_pc_patch
& (1 << 25)) && (*last_pc_patch
& (1 << 20)));
312 if ((sljit_s32
)const_pool
[ind
] < 0) {
313 const_pool
[ind
] = counter
;
318 ind
= const_pool
[ind
];
320 SLJIT_ASSERT(diff
>= 1);
321 if (diff
>= 2 || ind
> 0) {
322 diff
= (diff
+ (sljit_uw
)ind
- 2) << 2;
323 SLJIT_ASSERT(diff
<= 0xfff);
324 *last_pc_patch
= (*last_pc_patch
& ~(sljit_uw
)0xfff) | diff
;
327 *last_pc_patch
= (*last_pc_patch
& ~(sljit_uw
)(0xfff | (1 << 23))) | 0x004;
334 /* In some rare ocasions we may need future patches. The probability is close to 0 in practice. */
335 struct future_patch
{
336 struct future_patch
* next
;
341 static sljit_s32
resolve_const_pool_index(struct sljit_compiler
*compiler
, struct future_patch
**first_patch
, sljit_uw cpool_current_index
, sljit_uw
*cpool_start_address
, sljit_uw
*buf_ptr
)
344 struct future_patch
*curr_patch
, *prev_patch
;
346 SLJIT_UNUSED_ARG(compiler
);
348 /* Using the values generated by patch_pc_relative_loads. */
350 value
= cpool_start_address
[cpool_current_index
];
352 curr_patch
= *first_patch
;
356 value
= cpool_start_address
[cpool_current_index
];
359 if ((sljit_uw
)curr_patch
->index
== cpool_current_index
) {
360 value
= (sljit_uw
)curr_patch
->value
;
362 prev_patch
->next
= curr_patch
->next
;
364 *first_patch
= curr_patch
->next
;
365 SLJIT_FREE(curr_patch
, compiler
->allocator_data
);
368 prev_patch
= curr_patch
;
369 curr_patch
= curr_patch
->next
;
373 if ((sljit_sw
)value
>= 0) {
374 if (value
> cpool_current_index
) {
375 curr_patch
= (struct future_patch
*)SLJIT_MALLOC(sizeof(struct future_patch
), compiler
->allocator_data
);
377 while (*first_patch
) {
378 curr_patch
= *first_patch
;
379 *first_patch
= (*first_patch
)->next
;
380 SLJIT_FREE(curr_patch
, compiler
->allocator_data
);
382 return SLJIT_ERR_ALLOC_FAILED
;
384 curr_patch
->next
= *first_patch
;
385 curr_patch
->index
= (sljit_sw
)value
;
386 curr_patch
->value
= (sljit_sw
)cpool_start_address
[value
];
387 *first_patch
= curr_patch
;
389 cpool_start_address
[value
] = *buf_ptr
;
391 return SLJIT_SUCCESS
;
396 static sljit_s32
push_inst(struct sljit_compiler
*compiler
, sljit_uw inst
)
400 ptr
= (sljit_uw
*)ensure_buf(compiler
, sizeof(sljit_uw
));
404 return SLJIT_SUCCESS
;
407 static SLJIT_INLINE sljit_s32
emit_imm(struct sljit_compiler
*compiler
, sljit_s32 reg
, sljit_sw imm
)
409 FAIL_IF(push_inst(compiler
, MOVW
| RD(reg
) | ((imm
<< 4) & 0xf0000) | ((sljit_u32
)imm
& 0xfff)));
410 return push_inst(compiler
, MOVT
| RD(reg
) | ((imm
>> 12) & 0xf0000) | (((sljit_u32
)imm
>> 16) & 0xfff));
415 static SLJIT_INLINE sljit_s32
detect_jump_type(struct sljit_jump
*jump
, sljit_uw
*code_ptr
, sljit_uw
*code
, sljit_sw executable_offset
)
419 if (jump
->flags
& SLJIT_REWRITABLE_JUMP
)
422 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
423 if (jump
->flags
& IS_BL
)
426 if (jump
->flags
& JUMP_ADDR
)
427 diff
= ((sljit_sw
)jump
->u
.target
- (sljit_sw
)(code_ptr
+ 2) - executable_offset
);
429 SLJIT_ASSERT(jump
->flags
& JUMP_LABEL
);
430 diff
= ((sljit_sw
)(code
+ jump
->u
.label
->size
) - (sljit_sw
)(code_ptr
+ 2));
433 /* Branch to Thumb code has not been optimized yet. */
437 if (jump
->flags
& IS_BL
) {
438 if (diff
<= 0x01ffffff && diff
>= -0x02000000) {
439 *code_ptr
= (BL
- CONDITIONAL
) | (*(code_ptr
+ 1) & COND_MASK
);
440 jump
->flags
|= PATCH_B
;
445 if (diff
<= 0x01ffffff && diff
>= -0x02000000) {
446 *code_ptr
= (B
- CONDITIONAL
) | (*code_ptr
& COND_MASK
);
447 jump
->flags
|= PATCH_B
;
451 if (jump
->flags
& JUMP_ADDR
)
452 diff
= ((sljit_sw
)jump
->u
.target
- (sljit_sw
)code_ptr
- executable_offset
);
454 SLJIT_ASSERT(jump
->flags
& JUMP_LABEL
);
455 diff
= ((sljit_sw
)(code
+ jump
->u
.label
->size
) - (sljit_sw
)code_ptr
);
458 /* Branch to Thumb code has not been optimized yet. */
462 if (diff
<= 0x01ffffff && diff
>= -0x02000000) {
464 *code_ptr
= ((jump
->flags
& IS_BL
) ? (BL
- CONDITIONAL
) : (B
- CONDITIONAL
)) | (code_ptr
[2] & COND_MASK
);
465 jump
->flags
|= PATCH_B
;
472 static SLJIT_INLINE
void inline_set_jump_addr(sljit_uw jump_ptr
, sljit_sw executable_offset
, sljit_uw new_addr
, sljit_s32 flush_cache
)
474 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
475 sljit_uw
*ptr
= (sljit_uw
*)jump_ptr
;
476 sljit_uw
*inst
= (sljit_uw
*)ptr
[0];
477 sljit_uw mov_pc
= ptr
[1];
478 sljit_s32 bl
= (mov_pc
& 0x0000f000) != RD(TMP_PC
);
479 sljit_sw diff
= (sljit_sw
)(((sljit_sw
)new_addr
- (sljit_sw
)(inst
+ 2) - executable_offset
) >> 2);
481 SLJIT_UNUSED_ARG(executable_offset
);
483 if (diff
<= 0x7fffff && diff
>= -0x800000) {
484 /* Turn to branch. */
487 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 0);
489 inst
[0] = (mov_pc
& COND_MASK
) | (B
- CONDITIONAL
) | (diff
& 0xffffff);
491 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 1);
492 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
493 SLJIT_CACHE_FLUSH(inst
, inst
+ 1);
497 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 2, 0);
499 inst
[0] = (mov_pc
& COND_MASK
) | (BL
- CONDITIONAL
) | (diff
& 0xffffff);
502 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 2, 1);
503 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
504 SLJIT_CACHE_FLUSH(inst
, inst
+ 2);
508 /* Get the position of the constant. */
509 if (mov_pc
& (1 << 23))
510 ptr
= inst
+ ((mov_pc
& 0xfff) >> 2) + 2;
514 if (*inst
!= mov_pc
) {
516 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ (!bl
? 1 : 2), 0);
521 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 1);
522 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
523 SLJIT_CACHE_FLUSH(inst
, inst
+ 1);
526 inst
[1] = BLX
| RM(TMP_REG1
);
528 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 2, 1);
529 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
530 SLJIT_CACHE_FLUSH(inst
, inst
+ 2);
536 SLJIT_UPDATE_WX_FLAGS(ptr
, ptr
+ 1, 0);
542 SLJIT_UPDATE_WX_FLAGS(ptr
, ptr
+ 1, 1);
546 sljit_uw
*inst
= (sljit_uw
*)jump_ptr
;
548 SLJIT_UNUSED_ARG(executable_offset
);
550 SLJIT_ASSERT((inst
[0] & 0xfff00000) == MOVW
&& (inst
[1] & 0xfff00000) == MOVT
);
553 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 2, 0);
556 inst
[0] = MOVW
| (inst
[0] & 0xf000) | ((new_addr
<< 4) & 0xf0000) | (new_addr
& 0xfff);
557 inst
[1] = MOVT
| (inst
[1] & 0xf000) | ((new_addr
>> 12) & 0xf0000) | ((new_addr
>> 16) & 0xfff);
560 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 2, 1);
561 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
562 SLJIT_CACHE_FLUSH(inst
, inst
+ 2);
567 static sljit_uw
get_imm(sljit_uw imm
);
568 static sljit_s32
load_immediate(struct sljit_compiler
*compiler
, sljit_s32 reg
, sljit_uw imm
);
570 static SLJIT_INLINE
void inline_set_const(sljit_uw addr
, sljit_sw executable_offset
, sljit_uw new_constant
, sljit_s32 flush_cache
)
572 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
573 sljit_uw
*ptr
= (sljit_uw
*)addr
;
574 sljit_uw
*inst
= (sljit_uw
*)ptr
[0];
575 sljit_uw ldr_literal
= ptr
[1];
578 SLJIT_UNUSED_ARG(executable_offset
);
580 src2
= get_imm(new_constant
);
583 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 0);
586 *inst
= 0xe3a00000 | (ldr_literal
& 0xf000) | src2
;
589 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 1);
590 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
591 SLJIT_CACHE_FLUSH(inst
, inst
+ 1);
596 src2
= get_imm(~new_constant
);
599 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 0);
602 *inst
= 0xe3e00000 | (ldr_literal
& 0xf000) | src2
;
605 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 1);
606 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
607 SLJIT_CACHE_FLUSH(inst
, inst
+ 1);
612 if (ldr_literal
& (1 << 23))
613 ptr
= inst
+ ((ldr_literal
& 0xfff) >> 2) + 2;
617 if (*inst
!= ldr_literal
) {
619 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 0);
625 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 1, 1);
626 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
627 SLJIT_CACHE_FLUSH(inst
, inst
+ 1);
632 SLJIT_UPDATE_WX_FLAGS(ptr
, ptr
+ 1, 0);
638 SLJIT_UPDATE_WX_FLAGS(ptr
, ptr
+ 1, 1);
641 sljit_uw
*inst
= (sljit_uw
*)addr
;
643 SLJIT_UNUSED_ARG(executable_offset
);
645 SLJIT_ASSERT((inst
[0] & 0xfff00000) == MOVW
&& (inst
[1] & 0xfff00000) == MOVT
);
648 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 2, 0);
651 inst
[0] = MOVW
| (inst
[0] & 0xf000) | ((new_constant
<< 4) & 0xf0000) | (new_constant
& 0xfff);
652 inst
[1] = MOVT
| (inst
[1] & 0xf000) | ((new_constant
>> 12) & 0xf0000) | ((new_constant
>> 16) & 0xfff);
655 SLJIT_UPDATE_WX_FLAGS(inst
, inst
+ 2, 1);
656 inst
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(inst
, executable_offset
);
657 SLJIT_CACHE_FLUSH(inst
, inst
+ 2);
662 SLJIT_API_FUNC_ATTRIBUTE
void* sljit_generate_code(struct sljit_compiler
*compiler
)
664 struct sljit_memory_fragment
*buf
;
672 sljit_sw executable_offset
;
674 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
676 sljit_uw cpool_skip_alignment
;
677 sljit_uw cpool_current_index
;
678 sljit_uw
*cpool_start_address
;
679 sljit_uw
*last_pc_patch
;
680 struct future_patch
*first_patch
;
683 struct sljit_label
*label
;
684 struct sljit_jump
*jump
;
685 struct sljit_const
*const_
;
686 struct sljit_put_label
*put_label
;
689 CHECK_PTR(check_sljit_generate_code(compiler
));
690 reverse_buf(compiler
);
692 /* Second code generation pass. */
693 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
694 size
= compiler
->size
+ (compiler
->patches
<< 1);
695 if (compiler
->cpool_fill
> 0)
696 size
+= compiler
->cpool_fill
+ CONST_POOL_ALIGNMENT
- 1;
698 size
= compiler
->size
;
700 code
= (sljit_uw
*)SLJIT_MALLOC_EXEC(size
* sizeof(sljit_uw
), compiler
->exec_allocator_data
);
701 PTR_FAIL_WITH_EXEC_IF(code
);
704 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
706 cpool_skip_alignment
= 0;
707 cpool_current_index
= 0;
708 cpool_start_address
= NULL
;
710 last_pc_patch
= code
;
716 executable_offset
= SLJIT_EXEC_OFFSET(code
);
718 label
= compiler
->labels
;
719 jump
= compiler
->jumps
;
720 const_
= compiler
->consts
;
721 put_label
= compiler
->put_labels
;
723 if (label
&& label
->size
== 0) {
724 label
->addr
= (sljit_uw
)SLJIT_ADD_EXEC_OFFSET(code
, executable_offset
);
729 buf_ptr
= (sljit_uw
*)buf
->memory
;
730 buf_end
= buf_ptr
+ (buf
->used_size
>> 2);
733 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
734 if (cpool_size
> 0) {
735 if (cpool_skip_alignment
> 0) {
737 cpool_skip_alignment
--;
740 if (SLJIT_UNLIKELY(resolve_const_pool_index(compiler
, &first_patch
, cpool_current_index
, cpool_start_address
, buf_ptr
))) {
741 SLJIT_FREE_EXEC(code
, compiler
->exec_allocator_data
);
742 compiler
->error
= SLJIT_ERR_ALLOC_FAILED
;
746 if (++cpool_current_index
>= cpool_size
) {
747 SLJIT_ASSERT(!first_patch
);
749 if (label
&& label
->size
== word_count
) {
750 /* Points after the current instruction. */
751 label
->addr
= (sljit_uw
)SLJIT_ADD_EXEC_OFFSET(code_ptr
, executable_offset
);
752 label
->size
= (sljit_uw
)(code_ptr
- code
);
755 next_addr
= compute_next_addr(label
, jump
, const_
, put_label
);
760 else if ((*buf_ptr
& 0xff000000) != PUSH_POOL
) {
762 *code_ptr
= *buf_ptr
++;
763 if (next_addr
== word_count
) {
764 SLJIT_ASSERT(!label
|| label
->size
>= word_count
);
765 SLJIT_ASSERT(!jump
|| jump
->addr
>= word_count
);
766 SLJIT_ASSERT(!const_
|| const_
->addr
>= word_count
);
767 SLJIT_ASSERT(!put_label
|| put_label
->addr
>= word_count
);
769 /* These structures are ordered by their address. */
770 if (jump
&& jump
->addr
== word_count
) {
771 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
772 if (detect_jump_type(jump
, code_ptr
, code
, executable_offset
))
774 jump
->addr
= (sljit_uw
)code_ptr
;
776 jump
->addr
= (sljit_uw
)(code_ptr
- 2);
777 if (detect_jump_type(jump
, code_ptr
, code
, executable_offset
))
782 if (label
&& label
->size
== word_count
) {
783 /* code_ptr can be affected above. */
784 label
->addr
= (sljit_uw
)SLJIT_ADD_EXEC_OFFSET(code_ptr
+ 1, executable_offset
);
785 label
->size
= (sljit_uw
)((code_ptr
+ 1) - code
);
788 if (const_
&& const_
->addr
== word_count
) {
789 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
790 const_
->addr
= (sljit_uw
)code_ptr
;
792 const_
->addr
= (sljit_uw
)(code_ptr
- 1);
794 const_
= const_
->next
;
796 if (put_label
&& put_label
->addr
== word_count
) {
797 SLJIT_ASSERT(put_label
->label
);
798 put_label
->addr
= (sljit_uw
)code_ptr
;
799 put_label
= put_label
->next
;
801 next_addr
= compute_next_addr(label
, jump
, const_
, put_label
);
804 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
807 /* Fortunately, no need to shift. */
808 cpool_size
= *buf_ptr
++ & ~PUSH_POOL
;
809 SLJIT_ASSERT(cpool_size
> 0);
810 cpool_start_address
= ALIGN_INSTRUCTION(code_ptr
+ 1);
811 cpool_current_index
= patch_pc_relative_loads(last_pc_patch
, code_ptr
, cpool_start_address
, cpool_size
);
812 if (cpool_current_index
> 0) {
813 /* Unconditional branch. */
814 *code_ptr
= B
| (((sljit_uw
)(cpool_start_address
- code_ptr
) + cpool_current_index
- 2) & ~PUSH_POOL
);
815 code_ptr
= (sljit_uw
*)(cpool_start_address
+ cpool_current_index
);
817 cpool_skip_alignment
= CONST_POOL_ALIGNMENT
- 1;
818 cpool_current_index
= 0;
819 last_pc_patch
= code_ptr
;
822 } while (buf_ptr
< buf_end
);
826 SLJIT_ASSERT(!label
);
828 SLJIT_ASSERT(!const_
);
829 SLJIT_ASSERT(!put_label
);
831 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
832 SLJIT_ASSERT(cpool_size
== 0);
833 if (compiler
->cpool_fill
> 0) {
834 cpool_start_address
= ALIGN_INSTRUCTION(code_ptr
);
835 cpool_current_index
= patch_pc_relative_loads(last_pc_patch
, code_ptr
, cpool_start_address
, compiler
->cpool_fill
);
836 if (cpool_current_index
> 0)
837 code_ptr
= (sljit_uw
*)(cpool_start_address
+ cpool_current_index
);
839 buf_ptr
= compiler
->cpool
;
840 buf_end
= buf_ptr
+ compiler
->cpool_fill
;
841 cpool_current_index
= 0;
842 while (buf_ptr
< buf_end
) {
843 if (SLJIT_UNLIKELY(resolve_const_pool_index(compiler
, &first_patch
, cpool_current_index
, cpool_start_address
, buf_ptr
))) {
844 SLJIT_FREE_EXEC(code
, compiler
->exec_allocator_data
);
845 compiler
->error
= SLJIT_ERR_ALLOC_FAILED
;
849 cpool_current_index
++;
851 SLJIT_ASSERT(!first_patch
);
855 jump
= compiler
->jumps
;
857 buf_ptr
= (sljit_uw
*)jump
->addr
;
859 if (jump
->flags
& PATCH_B
) {
860 addr
= (sljit_uw
)SLJIT_ADD_EXEC_OFFSET(buf_ptr
+ 2, executable_offset
);
861 if (!(jump
->flags
& JUMP_ADDR
)) {
862 SLJIT_ASSERT(jump
->flags
& JUMP_LABEL
);
863 SLJIT_ASSERT((sljit_sw
)(jump
->u
.label
->addr
- addr
) <= 0x01ffffff && (sljit_sw
)(jump
->u
.label
->addr
- addr
) >= -0x02000000);
864 *buf_ptr
|= ((jump
->u
.label
->addr
- addr
) >> 2) & 0x00ffffff;
867 SLJIT_ASSERT((sljit_sw
)(jump
->u
.target
- addr
) <= 0x01ffffff && (sljit_sw
)(jump
->u
.target
- addr
) >= -0x02000000);
868 *buf_ptr
|= ((jump
->u
.target
- addr
) >> 2) & 0x00ffffff;
871 else if (jump
->flags
& SLJIT_REWRITABLE_JUMP
) {
872 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
873 jump
->addr
= (sljit_uw
)code_ptr
;
874 code_ptr
[0] = (sljit_uw
)buf_ptr
;
875 code_ptr
[1] = *buf_ptr
;
876 inline_set_jump_addr((sljit_uw
)code_ptr
, executable_offset
, (jump
->flags
& JUMP_LABEL
) ? jump
->u
.label
->addr
: jump
->u
.target
, 0);
879 inline_set_jump_addr((sljit_uw
)buf_ptr
, executable_offset
, (jump
->flags
& JUMP_LABEL
) ? jump
->u
.label
->addr
: jump
->u
.target
, 0);
883 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
884 if (jump
->flags
& IS_BL
)
886 if (*buf_ptr
& (1 << 23))
887 buf_ptr
+= ((*buf_ptr
& 0xfff) >> 2) + 2;
890 *buf_ptr
= (jump
->flags
& JUMP_LABEL
) ? jump
->u
.label
->addr
: jump
->u
.target
;
892 inline_set_jump_addr((sljit_uw
)buf_ptr
, executable_offset
, (jump
->flags
& JUMP_LABEL
) ? jump
->u
.label
->addr
: jump
->u
.target
, 0);
898 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
899 const_
= compiler
->consts
;
901 buf_ptr
= (sljit_uw
*)const_
->addr
;
902 const_
->addr
= (sljit_uw
)code_ptr
;
904 code_ptr
[0] = (sljit_uw
)buf_ptr
;
905 code_ptr
[1] = *buf_ptr
;
906 if (*buf_ptr
& (1 << 23))
907 buf_ptr
+= ((*buf_ptr
& 0xfff) >> 2) + 2;
910 /* Set the value again (can be a simple constant). */
911 inline_set_const((sljit_uw
)code_ptr
, executable_offset
, *buf_ptr
, 0);
914 const_
= const_
->next
;
918 put_label
= compiler
->put_labels
;
920 addr
= put_label
->label
->addr
;
921 buf_ptr
= (sljit_uw
*)put_label
->addr
;
923 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
924 SLJIT_ASSERT((buf_ptr
[0] & 0xffff0000) == 0xe59f0000);
925 buf_ptr
[((buf_ptr
[0] & 0xfff) >> 2) + 2] = addr
;
927 SLJIT_ASSERT((buf_ptr
[-1] & 0xfff00000) == MOVW
&& (buf_ptr
[0] & 0xfff00000) == MOVT
);
928 buf_ptr
[-1] |= ((addr
<< 4) & 0xf0000) | (addr
& 0xfff);
929 buf_ptr
[0] |= ((addr
>> 12) & 0xf0000) | ((addr
>> 16) & 0xfff);
931 put_label
= put_label
->next
;
934 SLJIT_ASSERT(code_ptr
- code
<= (sljit_s32
)size
);
936 compiler
->error
= SLJIT_ERR_COMPILED
;
937 compiler
->executable_offset
= executable_offset
;
938 compiler
->executable_size
= (sljit_uw
)(code_ptr
- code
) * sizeof(sljit_uw
);
940 code
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(code
, executable_offset
);
941 code_ptr
= (sljit_uw
*)SLJIT_ADD_EXEC_OFFSET(code_ptr
, executable_offset
);
943 SLJIT_CACHE_FLUSH(code
, code_ptr
);
944 SLJIT_UPDATE_WX_FLAGS(code
, code_ptr
, 1);
948 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_has_cpu_feature(sljit_s32 feature_type
)
950 switch (feature_type
) {
952 #ifdef SLJIT_IS_FPU_AVAILABLE
953 return SLJIT_IS_FPU_AVAILABLE
;
955 /* Available by default. */
961 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
962 case SLJIT_HAS_PREFETCH
:
971 /* --------------------------------------------------------------------- */
973 /* --------------------------------------------------------------------- */
975 /* Creates an index in data_transfer_insts array. */
976 #define WORD_SIZE 0x00
977 #define BYTE_SIZE 0x01
978 #define HALF_SIZE 0x02
981 #define LOAD_DATA 0x08
983 /* Flag bits for emit_op. */
984 #define ALLOW_IMM 0x10
985 #define ALLOW_INV_IMM 0x20
986 #define ALLOW_ANY_IMM (ALLOW_IMM | ALLOW_INV_IMM)
987 #define ALLOW_NEG_IMM 0x40
989 /* s/l - store/load (1 bit)
990 u/s - signed/unsigned (1 bit)
991 w/b/h/N - word/byte/half/NOT allowed (2 bit)
992 Storing signed and unsigned values are the same operations. */
994 static const sljit_uw data_transfer_insts
[16] = {
995 /* s u w */ 0xe5000000 /* str */,
996 /* s u b */ 0xe5400000 /* strb */,
997 /* s u h */ 0xe10000b0 /* strh */,
998 /* s u N */ 0x00000000 /* not allowed */,
999 /* s s w */ 0xe5000000 /* str */,
1000 /* s s b */ 0xe5400000 /* strb */,
1001 /* s s h */ 0xe10000b0 /* strh */,
1002 /* s s N */ 0x00000000 /* not allowed */,
1004 /* l u w */ 0xe5100000 /* ldr */,
1005 /* l u b */ 0xe5500000 /* ldrb */,
1006 /* l u h */ 0xe11000b0 /* ldrh */,
1007 /* l u p */ 0xf5500000 /* preload */,
1008 /* l s w */ 0xe5100000 /* ldr */,
1009 /* l s b */ 0xe11000d0 /* ldrsb */,
1010 /* l s h */ 0xe11000f0 /* ldrsh */,
1011 /* l s N */ 0x00000000 /* not allowed */,
1014 #define EMIT_DATA_TRANSFER(type, add, target_reg, base_reg, arg) \
1015 (data_transfer_insts[(type) & 0xf] | ((add) << 23) | RD(target_reg) | RN(base_reg) | (sljit_uw)(arg))
1017 /* Normal ldr/str instruction.
1018 Type2: ldrsb, ldrh, ldrsh */
1019 #define IS_TYPE1_TRANSFER(type) \
1020 (data_transfer_insts[(type) & 0xf] & 0x04000000)
1021 #define TYPE2_TRANSFER_IMM(imm) \
1022 (((imm) & 0xf) | (((imm) & 0xf0) << 4) | (1 << 22))
1024 #define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \
1025 ((sljit_uw)(opcode) | (sljit_uw)(mode) | VD(dst) | VM(src1) | VN(src2))
1027 /* Flags for emit_op: */
1028 /* Arguments are swapped. */
1029 #define ARGS_SWAPPED 0x01
1030 /* Inverted immediate. */
1031 #define INV_IMM 0x02
1032 /* Source and destination is register. */
1033 #define MOVE_REG_CONV 0x04
1034 /* Unused return value. */
1035 #define UNUSED_RETURN 0x08
1036 /* SET_FLAGS must be (1 << 20) as it is also the value of S bit (can be used for optimization). */
1037 #define SET_FLAGS (1 << 20)
1040 src2: reg or imm (if allowed)
1041 SRC2_IMM must be (1 << 25) as it is also the value of I bit (can be used for optimization). */
1042 #define SRC2_IMM (1 << 25)
1044 static sljit_s32
emit_op(struct sljit_compiler
*compiler
, sljit_s32 op
, sljit_s32 inp_flags
,
1045 sljit_s32 dst
, sljit_sw dstw
,
1046 sljit_s32 src1
, sljit_sw src1w
,
1047 sljit_s32 src2
, sljit_sw src2w
);
1049 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_enter(struct sljit_compiler
*compiler
,
1050 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
1051 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
1053 sljit_uw imm
, offset
;
1054 sljit_s32 i
, tmp
, size
, word_arg_count
;
1055 sljit_s32 saved_arg_count
= SLJIT_KEPT_SAVEDS_COUNT(options
);
1057 sljit_u32 float_arg_count
;
1059 sljit_u32 old_offset
, f32_offset
;
1061 sljit_u32
*remap_ptr
= remap
;
1065 CHECK(check_sljit_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
1066 set_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
1070 tmp
= SLJIT_S0
- saveds
;
1071 for (i
= SLJIT_S0
- saved_arg_count
; i
> tmp
; i
--)
1072 imm
|= (sljit_uw
)1 << reg_map
[i
];
1074 for (i
= scratches
; i
>= SLJIT_FIRST_SAVED_REG
; i
--)
1075 imm
|= (sljit_uw
)1 << reg_map
[i
];
1077 SLJIT_ASSERT(reg_map
[TMP_REG2
] == 14);
1079 /* Push saved and temporary registers
1080 multiple registers: stmdb sp!, {..., lr}
1081 single register: str reg, [sp, #-4]! */
1083 FAIL_IF(push_inst(compiler
, PUSH
| (1 << 14) | imm
));
1085 FAIL_IF(push_inst(compiler
, 0xe52d0004 | RD(TMP_REG2
)));
1087 /* Stack must be aligned to 8 bytes: */
1088 size
= GET_SAVED_REGISTERS_SIZE(scratches
, saveds
- saved_arg_count
, 1);
1090 if (fsaveds
> 0 || fscratches
>= SLJIT_FIRST_SAVED_FLOAT_REG
) {
1091 if ((size
& SSIZE_OF(sw
)) != 0) {
1092 FAIL_IF(push_inst(compiler
, SUB
| RD(SLJIT_SP
) | RN(SLJIT_SP
) | SRC2_IMM
| sizeof(sljit_sw
)));
1093 size
+= SSIZE_OF(sw
);
1096 if (fsaveds
+ fscratches
>= SLJIT_NUMBER_OF_FLOAT_REGISTERS
) {
1097 FAIL_IF(push_inst(compiler
, VPUSH
| VD(SLJIT_FS0
) | ((sljit_uw
)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS
<< 1)));
1100 FAIL_IF(push_inst(compiler
, VPUSH
| VD(SLJIT_FS0
) | ((sljit_uw
)fsaveds
<< 1)));
1101 if (fscratches
>= SLJIT_FIRST_SAVED_FLOAT_REG
)
1102 FAIL_IF(push_inst(compiler
, VPUSH
| VD(fscratches
) | ((sljit_uw
)(fscratches
- (SLJIT_FIRST_SAVED_FLOAT_REG
- 1)) << 1)));
1106 local_size
= ((size
+ local_size
+ 0x7) & ~0x7) - size
;
1107 compiler
->local_size
= local_size
;
1109 if (options
& SLJIT_ENTER_REG_ARG
)
1112 arg_types
>>= SLJIT_ARG_SHIFT
;
1114 saved_arg_count
= 0;
1116 SLJIT_COMPILE_ASSERT(SLJIT_FR0
== 1, float_register_index_start
);
1119 float_arg_count
= 0;
1122 switch (arg_types
& SLJIT_ARG_MASK
) {
1123 case SLJIT_ARG_TYPE_F64
:
1125 offset
+= sizeof(sljit_sw
);
1127 if (offset
< 4 * sizeof(sljit_sw
))
1128 FAIL_IF(push_inst(compiler
, VMOV2
| (offset
<< 10) | ((offset
+ sizeof(sljit_sw
)) << 14) | float_arg_count
));
1130 FAIL_IF(push_inst(compiler
, VLDR_F32
| 0x800100 | RN(SLJIT_SP
)
1131 | (float_arg_count
<< 12) | ((offset
+ (sljit_uw
)size
- 4 * sizeof(sljit_sw
)) >> 2)));
1133 offset
+= sizeof(sljit_f64
) - sizeof(sljit_sw
);
1135 case SLJIT_ARG_TYPE_F32
:
1136 if (offset
< 4 * sizeof(sljit_sw
))
1137 FAIL_IF(push_inst(compiler
, VMOV
| (float_arg_count
<< 16) | (offset
<< 10)));
1139 FAIL_IF(push_inst(compiler
, VLDR_F32
| 0x800000 | RN(SLJIT_SP
)
1140 | (float_arg_count
<< 12) | ((offset
+ (sljit_uw
)size
- 4 * sizeof(sljit_sw
)) >> 2)));
1146 if (!(arg_types
& SLJIT_ARG_TYPE_SCRATCH_REG
)) {
1147 tmp
= SLJIT_S0
- saved_arg_count
;
1149 } else if (word_arg_count
- 1 != (sljit_s32
)(offset
>> 2))
1150 tmp
= word_arg_count
;
1154 if (offset
< 4 * sizeof(sljit_sw
))
1155 FAIL_IF(push_inst(compiler
, MOV
| RD(tmp
) | (offset
>> 2)));
1157 FAIL_IF(push_inst(compiler
, LDR
| 0x800000 | RN(SLJIT_SP
) | RD(tmp
) | (offset
+ (sljit_uw
)size
- 4 * sizeof(sljit_sw
))));
1161 offset
+= sizeof(sljit_sw
);
1162 arg_types
>>= SLJIT_ARG_SHIFT
;
1165 compiler
->args_size
= offset
;
1168 old_offset
= SLJIT_FR0
;
1172 switch (arg_types
& SLJIT_ARG_MASK
) {
1173 case SLJIT_ARG_TYPE_F64
:
1174 if (offset
!= old_offset
)
1175 *remap_ptr
++ = EMIT_FPU_OPERATION(VMOV_F32
, SLJIT_32
, offset
, old_offset
, 0);
1179 case SLJIT_ARG_TYPE_F32
:
1180 if (f32_offset
!= 0) {
1181 *remap_ptr
++ = EMIT_FPU_OPERATION(VMOV_F32
, 0x20, offset
, f32_offset
, 0);
1184 if (offset
!= old_offset
)
1185 *remap_ptr
++ = EMIT_FPU_OPERATION(VMOV_F32
, 0, offset
, old_offset
, 0);
1186 f32_offset
= old_offset
;
1192 if (!(arg_types
& SLJIT_ARG_TYPE_SCRATCH_REG
)) {
1193 FAIL_IF(push_inst(compiler
, MOV
| RD(SLJIT_S0
- saved_arg_count
) | RM(SLJIT_R0
+ word_arg_count
)));
1200 arg_types
>>= SLJIT_ARG_SHIFT
;
1203 SLJIT_ASSERT((sljit_uw
)(remap_ptr
- remap
) <= sizeof(remap
));
1205 while (remap_ptr
> remap
)
1206 FAIL_IF(push_inst(compiler
, *(--remap_ptr
)));
1210 FAIL_IF(emit_op(compiler
, SLJIT_SUB
, ALLOW_IMM
, SLJIT_SP
, 0, SLJIT_SP
, 0, SLJIT_IMM
, local_size
));
1212 return SLJIT_SUCCESS
;
1215 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_set_context(struct sljit_compiler
*compiler
,
1216 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
1217 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
1222 CHECK(check_sljit_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
1223 set_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
1225 size
= GET_SAVED_REGISTERS_SIZE(scratches
, saveds
- SLJIT_KEPT_SAVEDS_COUNT(options
), 1);
1227 if ((size
& SSIZE_OF(sw
)) != 0 && (fsaveds
> 0 || fscratches
>= SLJIT_FIRST_SAVED_FLOAT_REG
))
1228 size
+= SSIZE_OF(sw
);
1230 compiler
->local_size
= ((size
+ local_size
+ 0x7) & ~0x7) - size
;
1231 return SLJIT_SUCCESS
;
1234 static sljit_s32
emit_add_sp(struct sljit_compiler
*compiler
, sljit_uw imm
)
1236 sljit_uw imm2
= get_imm(imm
);
1239 FAIL_IF(load_immediate(compiler
, TMP_REG2
, imm
));
1240 imm2
= RM(TMP_REG2
);
1243 return push_inst(compiler
, ADD
| RD(SLJIT_SP
) | RN(SLJIT_SP
) | imm2
);
1246 static sljit_s32
emit_stack_frame_release(struct sljit_compiler
*compiler
, sljit_s32 frame_size
)
1248 sljit_s32 local_size
, fscratches
, fsaveds
, i
, tmp
;
1249 sljit_s32 saveds_restore_start
= SLJIT_S0
- SLJIT_KEPT_SAVEDS_COUNT(compiler
->options
);
1250 sljit_s32 lr_dst
= TMP_PC
;
1253 SLJIT_ASSERT(reg_map
[TMP_REG2
] == 14);
1255 local_size
= compiler
->local_size
;
1256 fscratches
= compiler
->fscratches
;
1257 fsaveds
= compiler
->fsaveds
;
1259 if (fsaveds
> 0 || fscratches
>= SLJIT_FIRST_SAVED_FLOAT_REG
) {
1261 FAIL_IF(emit_add_sp(compiler
, (sljit_uw
)local_size
));
1263 if (fsaveds
+ fscratches
>= SLJIT_NUMBER_OF_FLOAT_REGISTERS
) {
1264 FAIL_IF(push_inst(compiler
, VPOP
| VD(SLJIT_FS0
) | ((sljit_uw
)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS
<< 1)));
1266 if (fscratches
>= SLJIT_FIRST_SAVED_FLOAT_REG
)
1267 FAIL_IF(push_inst(compiler
, VPOP
| VD(fscratches
) | ((sljit_uw
)(fscratches
- (SLJIT_FIRST_SAVED_FLOAT_REG
- 1)) << 1)));
1269 FAIL_IF(push_inst(compiler
, VPOP
| VD(SLJIT_FS0
) | ((sljit_uw
)fsaveds
<< 1)));
1272 local_size
= GET_SAVED_REGISTERS_SIZE(compiler
->scratches
, compiler
->saveds
, 1) & 0x7;
1275 if (frame_size
< 0) {
1278 } else if (frame_size
> 0)
1283 reg_list
|= (sljit_uw
)1 << reg_map
[lr_dst
];
1285 tmp
= SLJIT_S0
- compiler
->saveds
;
1286 if (saveds_restore_start
!= tmp
) {
1287 for (i
= saveds_restore_start
; i
> tmp
; i
--)
1288 reg_list
|= (sljit_uw
)1 << reg_map
[i
];
1290 saveds_restore_start
= 0;
1292 for (i
= compiler
->scratches
; i
>= SLJIT_FIRST_SAVED_REG
; i
--)
1293 reg_list
|= (sljit_uw
)1 << reg_map
[i
];
1295 if (lr_dst
== 0 && (reg_list
& (reg_list
- 1)) == 0) {
1296 /* The local_size does not include the saved registers. */
1297 local_size
+= SSIZE_OF(sw
);
1300 local_size
+= SSIZE_OF(sw
);
1302 if (frame_size
> local_size
)
1303 FAIL_IF(push_inst(compiler
, SUB
| RD(SLJIT_SP
) | RN(SLJIT_SP
) | (1 << 25) | (sljit_uw
)(frame_size
- local_size
)));
1304 else if (frame_size
< local_size
)
1305 FAIL_IF(emit_add_sp(compiler
, (sljit_uw
)(local_size
- frame_size
)));
1308 return SLJIT_SUCCESS
;
1310 if (saveds_restore_start
!= 0) {
1311 SLJIT_ASSERT(reg_list
== ((sljit_uw
)1 << reg_map
[saveds_restore_start
]));
1312 lr_dst
= saveds_restore_start
;
1314 SLJIT_ASSERT(reg_list
== ((sljit_uw
)1 << reg_map
[SLJIT_FIRST_SAVED_REG
]));
1315 lr_dst
= SLJIT_FIRST_SAVED_REG
;
1318 return push_inst(compiler
, LDR
| 0x800000 | RN(SLJIT_SP
) | RD(lr_dst
) | (sljit_uw
)(frame_size
- 2 * SSIZE_OF(sw
)));
1322 FAIL_IF(emit_add_sp(compiler
, (sljit_uw
)local_size
));
1324 /* Pop saved and temporary registers
1325 multiple registers: ldmia sp!, {...}
1326 single register: ldr reg, [sp], #4 */
1327 if ((reg_list
& (reg_list
- 1)) == 0) {
1328 SLJIT_ASSERT(lr_dst
!= 0);
1329 SLJIT_ASSERT(reg_list
== (sljit_uw
)1 << reg_map
[lr_dst
]);
1331 return push_inst(compiler
, 0xe49d0004 | RD(lr_dst
));
1334 FAIL_IF(push_inst(compiler
, POP
| reg_list
));
1336 return push_inst(compiler
, SUB
| RD(SLJIT_SP
) | RN(SLJIT_SP
) | (1 << 25) | ((sljit_uw
)frame_size
- sizeof(sljit_sw
)));
1337 return SLJIT_SUCCESS
;
1340 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_return_void(struct sljit_compiler
*compiler
)
1343 CHECK(check_sljit_emit_return_void(compiler
));
1345 return emit_stack_frame_release(compiler
, 0);
1348 /* --------------------------------------------------------------------- */
1350 /* --------------------------------------------------------------------- */
1352 #define EMIT_SHIFT_INS_AND_RETURN(opcode) \
1353 SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); \
1354 if (compiler->shift_imm != 0x20) { \
1355 SLJIT_ASSERT(src1 == TMP_REG1); \
1356 SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); \
1358 if (compiler->shift_imm != 0) \
1359 return push_inst(compiler, MOV | (flags & SET_FLAGS) | \
1360 RD(dst) | (compiler->shift_imm << 7) | (opcode << 5) | RM(src2)); \
1361 return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) | RM(src2)); \
1363 return push_inst(compiler, MOV | (flags & SET_FLAGS) | RD(dst) \
1364 | RM8((flags & ARGS_SWAPPED) ? src1 : src2) | (sljit_uw)(opcode << 5) \
1365 | 0x10 | RM((flags & ARGS_SWAPPED) ? src2 : src1));
1367 static SLJIT_INLINE sljit_s32
emit_single_op(struct sljit_compiler
*compiler
, sljit_s32 op
, sljit_s32 flags
,
1368 sljit_uw dst
, sljit_uw src1
, sljit_uw src2
)
1370 switch (GET_OPCODE(op
)) {
1372 SLJIT_ASSERT(src1
== TMP_REG1
&& !(flags
& ARGS_SWAPPED
));
1374 if (src2
& SRC2_IMM
) {
1375 return push_inst(compiler
, ((flags
& INV_IMM
) ? MVN
: MOV
) | RD(dst
) | src2
);
1377 return push_inst(compiler
, MOV
| RD(dst
) | RM(src2
));
1379 return SLJIT_SUCCESS
;
1383 SLJIT_ASSERT(src1
== TMP_REG1
&& !(flags
& ARGS_SWAPPED
));
1384 if (flags
& MOVE_REG_CONV
) {
1385 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
1386 if (op
== SLJIT_MOV_U8
)
1387 return push_inst(compiler
, AND
| RD(dst
) | RN(src2
) | SRC2_IMM
| 0xff);
1388 FAIL_IF(push_inst(compiler
, MOV
| RD(dst
) | (24 << 7) | RM(src2
)));
1389 return push_inst(compiler
, MOV
| RD(dst
) | (24 << 7) | (op
== SLJIT_MOV_U8
? 0x20 : 0x40) | RM(dst
));
1391 return push_inst(compiler
, (op
== SLJIT_MOV_U8
? UXTB
: SXTB
) | RD(dst
) | RM(src2
));
1394 else if (dst
!= src2
) {
1395 SLJIT_ASSERT(src2
& SRC2_IMM
);
1396 return push_inst(compiler
, ((flags
& INV_IMM
) ? MVN
: MOV
) | RD(dst
) | src2
);
1398 return SLJIT_SUCCESS
;
1402 SLJIT_ASSERT(src1
== TMP_REG1
&& !(flags
& ARGS_SWAPPED
));
1403 if (flags
& MOVE_REG_CONV
) {
1404 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
1405 FAIL_IF(push_inst(compiler
, MOV
| RD(dst
) | (16 << 7) | RM(src2
)));
1406 return push_inst(compiler
, MOV
| RD(dst
) | (16 << 7) | (op
== SLJIT_MOV_U16
? 0x20 : 0x40) | RM(dst
));
1408 return push_inst(compiler
, (op
== SLJIT_MOV_U16
? UXTH
: SXTH
) | RD(dst
) | RM(src2
));
1411 else if (dst
!= src2
) {
1412 SLJIT_ASSERT(src2
& SRC2_IMM
);
1413 return push_inst(compiler
, ((flags
& INV_IMM
) ? MVN
: MOV
) | RD(dst
) | src2
);
1415 return SLJIT_SUCCESS
;
1418 if (src2
& SRC2_IMM
)
1419 return push_inst(compiler
, ((flags
& INV_IMM
) ? MOV
: MVN
) | (flags
& SET_FLAGS
) | RD(dst
) | src2
);
1421 return push_inst(compiler
, MVN
| (flags
& SET_FLAGS
) | RD(dst
) | RM(src2
));
1424 SLJIT_ASSERT(!(flags
& INV_IMM
));
1425 SLJIT_ASSERT(!(src2
& SRC2_IMM
));
1426 FAIL_IF(push_inst(compiler
, CLZ
| RD(dst
) | RM(src2
)));
1427 return SLJIT_SUCCESS
;
1430 SLJIT_ASSERT(!(flags
& INV_IMM
));
1432 if ((flags
& (UNUSED_RETURN
| ARGS_SWAPPED
)) == UNUSED_RETURN
)
1433 return push_inst(compiler
, CMN
| SET_FLAGS
| RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1434 return push_inst(compiler
, ADD
| (flags
& SET_FLAGS
) | RD(dst
) | RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1437 SLJIT_ASSERT(!(flags
& INV_IMM
));
1438 return push_inst(compiler
, ADC
| (flags
& SET_FLAGS
) | RD(dst
) | RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1441 SLJIT_ASSERT(!(flags
& INV_IMM
));
1443 if ((flags
& (UNUSED_RETURN
| ARGS_SWAPPED
)) == UNUSED_RETURN
)
1444 return push_inst(compiler
, CMP
| SET_FLAGS
| RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1446 return push_inst(compiler
, (!(flags
& ARGS_SWAPPED
) ? SUB
: RSB
) | (flags
& SET_FLAGS
)
1447 | RD(dst
) | RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1450 SLJIT_ASSERT(!(flags
& INV_IMM
));
1451 return push_inst(compiler
, (!(flags
& ARGS_SWAPPED
) ? SBC
: RSC
) | (flags
& SET_FLAGS
)
1452 | RD(dst
) | RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1455 SLJIT_ASSERT(!(flags
& INV_IMM
));
1456 SLJIT_ASSERT(!(src2
& SRC2_IMM
));
1457 compiler
->status_flags_state
= 0;
1460 return push_inst(compiler
, MUL
| RN(dst
) | RM8(src2
) | RM(src1
));
1462 FAIL_IF(push_inst(compiler
, SMULL
| RN(TMP_REG1
) | RD(dst
) | RM8(src2
) | RM(src1
)));
1464 /* cmp TMP_REG1, dst asr #31. */
1465 return push_inst(compiler
, CMP
| SET_FLAGS
| RN(TMP_REG1
) | RM(dst
) | 0xfc0);
1468 if ((flags
& (UNUSED_RETURN
| INV_IMM
)) == UNUSED_RETURN
)
1469 return push_inst(compiler
, TST
| SET_FLAGS
| RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1470 return push_inst(compiler
, (!(flags
& INV_IMM
) ? AND
: BIC
) | (flags
& SET_FLAGS
)
1471 | RD(dst
) | RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1474 SLJIT_ASSERT(!(flags
& INV_IMM
));
1475 return push_inst(compiler
, ORR
| (flags
& SET_FLAGS
) | RD(dst
) | RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1478 SLJIT_ASSERT(!(flags
& INV_IMM
));
1479 return push_inst(compiler
, EOR
| (flags
& SET_FLAGS
) | RD(dst
) | RN(src1
) | ((src2
& SRC2_IMM
) ? src2
: RM(src2
)));
1482 EMIT_SHIFT_INS_AND_RETURN(0);
1485 EMIT_SHIFT_INS_AND_RETURN(1);
1488 EMIT_SHIFT_INS_AND_RETURN(2);
1491 SLJIT_UNREACHABLE();
1492 return SLJIT_SUCCESS
;
1495 #undef EMIT_SHIFT_INS_AND_RETURN
1497 /* Tests whether the immediate can be stored in the 12 bit imm field.
1498 Returns with 0 if not possible. */
1499 static sljit_uw
get_imm(sljit_uw imm
)
1504 return SRC2_IMM
| imm
;
1506 if (!(imm
& 0xff000000)) {
1511 imm
= (imm
<< 24) | (imm
>> 8);
1515 if (!(imm
& 0xff000000)) {
1520 if (!(imm
& 0xf0000000)) {
1525 if (!(imm
& 0xc0000000)) {
1530 if (!(imm
& 0x00ffffff))
1531 return SRC2_IMM
| (imm
>> 24) | (rol
<< 8);
1536 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
1537 static sljit_s32
generate_int(struct sljit_compiler
*compiler
, sljit_s32 reg
, sljit_uw imm
, sljit_s32 positive
)
1544 /* Step1: Search a zero byte (8 continous zero bit). */
1548 if (!(imm
& mask
)) {
1549 /* Rol imm by rol. */
1550 imm
= (imm
<< rol
) | (imm
>> (32 - rol
));
1551 /* Calculate arm rol. */
1552 rol
= 4 + (rol
>> 1);
1559 imm
= (imm
<< 8) | (imm
>> 24);
1563 if (!(imm
& mask
)) {
1564 /* Rol imm by rol. */
1565 imm
= (imm
<< rol
) | (imm
>> (32 - rol
));
1566 /* Calculate arm rol. */
1567 rol
= (rol
>> 1) - 8;
1579 /* The low 8 bit must be zero. */
1580 SLJIT_ASSERT(!(imm
& 0xff));
1582 if (!(imm
& 0xff000000)) {
1583 imm1
= SRC2_IMM
| ((imm
>> 16) & 0xff) | (((rol
+ 4) & 0xf) << 8);
1584 imm2
= SRC2_IMM
| ((imm
>> 8) & 0xff) | (((rol
+ 8) & 0xf) << 8);
1586 else if (imm
& 0xc0000000) {
1587 imm1
= SRC2_IMM
| ((imm
>> 24) & 0xff) | ((rol
& 0xf) << 8);
1591 if (!(imm
& 0xff000000)) {
1596 if (!(imm
& 0xf0000000)) {
1601 if (!(imm
& 0xc0000000)) {
1606 if (!(imm
& 0x00ffffff))
1607 imm2
= SRC2_IMM
| (imm
>> 24) | ((rol
& 0xf) << 8);
1612 if (!(imm
& 0xf0000000)) {
1617 if (!(imm
& 0xc0000000)) {
1622 imm1
= SRC2_IMM
| ((imm
>> 24) & 0xff) | ((rol
& 0xf) << 8);
1626 if (!(imm
& 0xf0000000)) {
1631 if (!(imm
& 0xc0000000)) {
1636 if (!(imm
& 0x00ffffff))
1637 imm2
= SRC2_IMM
| (imm
>> 24) | ((rol
& 0xf) << 8);
1642 FAIL_IF(push_inst(compiler
, (positive
? MOV
: MVN
) | RD(reg
) | imm1
));
1643 FAIL_IF(push_inst(compiler
, (positive
? ORR
: BIC
) | RD(reg
) | RN(reg
) | imm2
));
1648 static sljit_s32
load_immediate(struct sljit_compiler
*compiler
, sljit_s32 reg
, sljit_uw imm
)
1652 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
1653 if (!(imm
& ~(sljit_uw
)0xffff))
1654 return push_inst(compiler
, MOVW
| RD(reg
) | ((imm
<< 4) & 0xf0000) | (imm
& 0xfff));
1657 /* Create imm by 1 inst. */
1660 return push_inst(compiler
, MOV
| RD(reg
) | tmp
);
1662 tmp
= get_imm(~imm
);
1664 return push_inst(compiler
, MVN
| RD(reg
) | tmp
);
1666 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
1667 /* Create imm by 2 inst. */
1668 FAIL_IF(generate_int(compiler
, reg
, imm
, 1));
1669 FAIL_IF(generate_int(compiler
, reg
, ~imm
, 0));
1672 return push_inst_with_literal(compiler
, EMIT_DATA_TRANSFER(WORD_SIZE
| LOAD_DATA
, 1, reg
, TMP_PC
, 0), imm
);
1674 FAIL_IF(push_inst(compiler
, MOVW
| RD(reg
) | ((imm
<< 4) & 0xf0000) | (imm
& 0xfff)));
1676 return SLJIT_SUCCESS
;
1677 return push_inst(compiler
, MOVT
| RD(reg
) | ((imm
>> 12) & 0xf0000) | ((imm
>> 16) & 0xfff));
1681 static SLJIT_INLINE sljit_s32
emit_op_mem(struct sljit_compiler
*compiler
, sljit_s32 flags
, sljit_s32 reg
,
1682 sljit_s32 arg
, sljit_sw argw
, sljit_s32 tmp_reg
)
1684 sljit_uw imm
, offset_reg
;
1685 sljit_sw mask
= IS_TYPE1_TRANSFER(flags
) ? 0xfff : 0xff;
1687 SLJIT_ASSERT (arg
& SLJIT_MEM
);
1688 SLJIT_ASSERT((arg
& REG_MASK
) != tmp_reg
|| (arg
== SLJIT_MEM1(tmp_reg
) && argw
>= -mask
&& argw
<= mask
));
1690 if (SLJIT_UNLIKELY(!(arg
& REG_MASK
))) {
1691 FAIL_IF(load_immediate(compiler
, tmp_reg
, (sljit_uw
)(argw
& ~mask
)));
1694 return push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, 1, reg
, tmp_reg
,
1695 (mask
== 0xff) ? TYPE2_TRANSFER_IMM(argw
) : argw
));
1698 if (arg
& OFFS_REG_MASK
) {
1699 offset_reg
= OFFS_REG(arg
);
1703 if (argw
!= 0 && (mask
== 0xff)) {
1704 FAIL_IF(push_inst(compiler
, ADD
| RD(tmp_reg
) | RN(arg
) | RM(offset_reg
) | ((sljit_uw
)argw
<< 7)));
1705 return push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, 1, reg
, tmp_reg
, TYPE2_TRANSFER_IMM(0)));
1708 /* Bit 25: RM is offset. */
1709 return push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, 1, reg
, arg
,
1710 RM(offset_reg
) | (mask
== 0xff ? 0 : (1 << 25)) | ((sljit_uw
)argw
<< 7)));
1716 imm
= get_imm((sljit_uw
)(argw
& ~mask
));
1718 FAIL_IF(push_inst(compiler
, ADD
| RD(tmp_reg
) | RN(arg
) | imm
));
1723 else if (argw
< -mask
) {
1724 imm
= get_imm((sljit_uw
)(-argw
& ~mask
));
1726 FAIL_IF(push_inst(compiler
, SUB
| RD(tmp_reg
) | RN(arg
) | imm
));
1727 argw
= -(-argw
& mask
);
1732 if (argw
<= mask
&& argw
>= -mask
) {
1735 argw
= TYPE2_TRANSFER_IMM(argw
);
1736 return push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, 1, reg
, arg
, argw
));
1742 argw
= TYPE2_TRANSFER_IMM(argw
);
1744 return push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, 0, reg
, arg
, argw
));
1747 FAIL_IF(load_immediate(compiler
, tmp_reg
, (sljit_uw
)argw
));
1748 return push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, 1, reg
, arg
,
1749 RM(tmp_reg
) | (mask
== 0xff ? 0 : (1 << 25))));
1752 static sljit_s32
emit_op(struct sljit_compiler
*compiler
, sljit_s32 op
, sljit_s32 inp_flags
,
1753 sljit_s32 dst
, sljit_sw dstw
,
1754 sljit_s32 src1
, sljit_sw src1w
,
1755 sljit_s32 src2
, sljit_sw src2w
)
1757 /* src1 is reg or TMP_REG1
1758 src2 is reg, TMP_REG2, or imm
1759 result goes to TMP_REG2, so put result can use TMP_REG1. */
1761 /* We prefers register and simple consts. */
1764 sljit_s32 src2_reg
= 0;
1765 sljit_s32 flags
= HAS_FLAGS(op
) ? SET_FLAGS
: 0;
1766 sljit_s32 neg_op
= 0;
1768 if (dst
== TMP_REG2
)
1769 flags
|= UNUSED_RETURN
;
1771 SLJIT_ASSERT(!(inp_flags
& ALLOW_INV_IMM
) || (inp_flags
& ALLOW_IMM
));
1773 if (inp_flags
& ALLOW_NEG_IMM
) {
1774 switch (GET_OPCODE(op
)) {
1776 compiler
->status_flags_state
= SLJIT_CURRENT_FLAGS_ADD
;
1780 compiler
->status_flags_state
= SLJIT_CURRENT_FLAGS_ADD
;
1781 neg_op
= SLJIT_SUBC
;
1784 compiler
->status_flags_state
= SLJIT_CURRENT_FLAGS_SUB
;
1788 compiler
->status_flags_state
= SLJIT_CURRENT_FLAGS_SUB
;
1789 neg_op
= SLJIT_ADDC
;
1795 if (!(inp_flags
& ALLOW_IMM
))
1798 if (src2
& SLJIT_IMM
) {
1799 src2_reg
= (sljit_s32
)get_imm((sljit_uw
)src2w
);
1802 if (inp_flags
& ALLOW_INV_IMM
) {
1803 src2_reg
= (sljit_s32
)get_imm(~(sljit_uw
)src2w
);
1810 src2_reg
= (sljit_s32
)get_imm((sljit_uw
)-src2w
);
1812 op
= neg_op
| GET_ALL_FLAGS(op
);
1818 if (src1
& SLJIT_IMM
) {
1819 src2_reg
= (sljit_s32
)get_imm((sljit_uw
)src1w
);
1821 flags
|= ARGS_SWAPPED
;
1826 if (inp_flags
& ALLOW_INV_IMM
) {
1827 src2_reg
= (sljit_s32
)get_imm(~(sljit_uw
)src1w
);
1829 flags
|= ARGS_SWAPPED
| INV_IMM
;
1835 if (neg_op
>= SLJIT_SUB
) {
1836 /* Note: additive operation (commutative). */
1837 src2_reg
= (sljit_s32
)get_imm((sljit_uw
)-src1w
);
1841 op
= neg_op
| GET_ALL_FLAGS(op
);
1849 if (FAST_IS_REG(src1
))
1851 else if (src1
& SLJIT_MEM
) {
1852 FAIL_IF(emit_op_mem(compiler
, inp_flags
| LOAD_DATA
, TMP_REG1
, src1
, src1w
, TMP_REG1
));
1853 src1_reg
= TMP_REG1
;
1856 FAIL_IF(load_immediate(compiler
, TMP_REG1
, (sljit_uw
)src1w
));
1857 src1_reg
= TMP_REG1
;
1861 dst_reg
= FAST_IS_REG(dst
) ? dst
: TMP_REG2
;
1863 if (op
<= SLJIT_MOV_P
) {
1864 if (dst
& SLJIT_MEM
) {
1865 if (inp_flags
& BYTE_SIZE
)
1866 inp_flags
&= ~SIGNED
;
1868 if (FAST_IS_REG(src2
))
1869 return emit_op_mem(compiler
, inp_flags
, src2
, dst
, dstw
, TMP_REG2
);
1872 if (FAST_IS_REG(src2
) && dst_reg
!= TMP_REG2
)
1873 flags
|= MOVE_REG_CONV
;
1877 if (src2_reg
== 0) {
1878 src2_reg
= (op
<= SLJIT_MOV_P
) ? dst_reg
: TMP_REG2
;
1880 if (FAST_IS_REG(src2
))
1882 else if (src2
& SLJIT_MEM
)
1883 FAIL_IF(emit_op_mem(compiler
, inp_flags
| LOAD_DATA
, src2_reg
, src2
, src2w
, TMP_REG2
));
1885 FAIL_IF(load_immediate(compiler
, src2_reg
, (sljit_uw
)src2w
));
1888 FAIL_IF(emit_single_op(compiler
, op
, flags
, (sljit_uw
)dst_reg
, (sljit_uw
)src1_reg
, (sljit_uw
)src2_reg
));
1890 if (!(dst
& SLJIT_MEM
))
1891 return SLJIT_SUCCESS
;
1893 return emit_op_mem(compiler
, inp_flags
, dst_reg
, dst
, dstw
, TMP_REG1
);
1900 #if defined(__GNUC__)
1901 extern unsigned int __aeabi_uidivmod(unsigned int numerator
, unsigned int denominator
);
1902 extern int __aeabi_idivmod(int numerator
, int denominator
);
1904 #error "Software divmod functions are needed"
1911 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op0(struct sljit_compiler
*compiler
, sljit_s32 op
)
1913 sljit_uw saved_reg_list
[3];
1914 sljit_sw saved_reg_count
;
1917 CHECK(check_sljit_emit_op0(compiler
, op
));
1919 op
= GET_OPCODE(op
);
1921 case SLJIT_BREAKPOINT
:
1922 FAIL_IF(push_inst(compiler
, BKPT
));
1925 FAIL_IF(push_inst(compiler
, NOP
));
1929 return push_inst(compiler
, (op
== SLJIT_LMUL_UW
? UMULL
: SMULL
)
1930 | RN(SLJIT_R1
) | RD(SLJIT_R0
) | RM8(SLJIT_R0
) | RM(SLJIT_R1
));
1931 case SLJIT_DIVMOD_UW
:
1932 case SLJIT_DIVMOD_SW
:
1935 SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW
& 0x2) == 0 && SLJIT_DIV_UW
- 0x2 == SLJIT_DIVMOD_UW
, bad_div_opcode_assignments
);
1936 SLJIT_ASSERT(reg_map
[2] == 1 && reg_map
[3] == 2 && reg_map
[4] == 3);
1938 saved_reg_count
= 0;
1939 if (compiler
->scratches
>= 4)
1940 saved_reg_list
[saved_reg_count
++] = 3;
1941 if (compiler
->scratches
>= 3)
1942 saved_reg_list
[saved_reg_count
++] = 2;
1943 if (op
>= SLJIT_DIV_UW
)
1944 saved_reg_list
[saved_reg_count
++] = 1;
1946 if (saved_reg_count
> 0) {
1947 FAIL_IF(push_inst(compiler
, STR
| 0x2d0000 | (saved_reg_count
>= 3 ? 16 : 8)
1948 | (saved_reg_list
[0] << 12) /* str rX, [sp, #-8/-16]! */));
1949 if (saved_reg_count
>= 2) {
1950 SLJIT_ASSERT(saved_reg_list
[1] < 8);
1951 FAIL_IF(push_inst(compiler
, STR
| 0x8d0004 | (saved_reg_list
[1] << 12) /* str rX, [sp, #4] */));
1953 if (saved_reg_count
>= 3) {
1954 SLJIT_ASSERT(saved_reg_list
[2] < 8);
1955 FAIL_IF(push_inst(compiler
, STR
| 0x8d0008 | (saved_reg_list
[2] << 12) /* str rX, [sp, #8] */));
1959 #if defined(__GNUC__)
1960 FAIL_IF(sljit_emit_ijump(compiler
, SLJIT_FAST_CALL
, SLJIT_IMM
,
1961 ((op
| 0x2) == SLJIT_DIV_UW
? SLJIT_FUNC_ADDR(__aeabi_uidivmod
) : SLJIT_FUNC_ADDR(__aeabi_idivmod
))));
1963 #error "Software divmod functions are needed"
1966 if (saved_reg_count
> 0) {
1967 if (saved_reg_count
>= 3) {
1968 SLJIT_ASSERT(saved_reg_list
[2] < 8);
1969 FAIL_IF(push_inst(compiler
, LDR
| 0x8d0008 | (saved_reg_list
[2] << 12) /* ldr rX, [sp, #8] */));
1971 if (saved_reg_count
>= 2) {
1972 SLJIT_ASSERT(saved_reg_list
[1] < 8);
1973 FAIL_IF(push_inst(compiler
, LDR
| 0x8d0004 | (saved_reg_list
[1] << 12) /* ldr rX, [sp, #4] */));
1975 return push_inst(compiler
, (LDR
^ (1 << 24)) | 0x8d0000 | (sljit_uw
)(saved_reg_count
>= 3 ? 16 : 8)
1976 | (saved_reg_list
[0] << 12) /* ldr rX, [sp], #8/16 */);
1978 return SLJIT_SUCCESS
;
1980 case SLJIT_SKIP_FRAMES_BEFORE_RETURN
:
1981 return SLJIT_SUCCESS
;
1984 return SLJIT_SUCCESS
;
1987 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op1(struct sljit_compiler
*compiler
, sljit_s32 op
,
1988 sljit_s32 dst
, sljit_sw dstw
,
1989 sljit_s32 src
, sljit_sw srcw
)
1992 CHECK(check_sljit_emit_op1(compiler
, op
, dst
, dstw
, src
, srcw
));
1993 ADJUST_LOCAL_OFFSET(dst
, dstw
);
1994 ADJUST_LOCAL_OFFSET(src
, srcw
);
1996 switch (GET_OPCODE(op
)) {
2002 return emit_op(compiler
, SLJIT_MOV
, ALLOW_ANY_IMM
, dst
, dstw
, TMP_REG1
, 0, src
, srcw
);
2005 return emit_op(compiler
, SLJIT_MOV_U8
, ALLOW_ANY_IMM
| BYTE_SIZE
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_u8
)srcw
: srcw
);
2008 return emit_op(compiler
, SLJIT_MOV_S8
, ALLOW_ANY_IMM
| SIGNED
| BYTE_SIZE
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_s8
)srcw
: srcw
);
2011 return emit_op(compiler
, SLJIT_MOV_U16
, ALLOW_ANY_IMM
| HALF_SIZE
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_u16
)srcw
: srcw
);
2014 return emit_op(compiler
, SLJIT_MOV_S16
, ALLOW_ANY_IMM
| SIGNED
| HALF_SIZE
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_s16
)srcw
: srcw
);
2017 return emit_op(compiler
, op
, ALLOW_ANY_IMM
, dst
, dstw
, TMP_REG1
, 0, src
, srcw
);
2020 return emit_op(compiler
, op
, 0, dst
, dstw
, TMP_REG1
, 0, src
, srcw
);
2023 return SLJIT_SUCCESS
;
2026 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op2(struct sljit_compiler
*compiler
, sljit_s32 op
,
2027 sljit_s32 dst
, sljit_sw dstw
,
2028 sljit_s32 src1
, sljit_sw src1w
,
2029 sljit_s32 src2
, sljit_sw src2w
)
2032 CHECK(check_sljit_emit_op2(compiler
, op
, 0, dst
, dstw
, src1
, src1w
, src2
, src2w
));
2033 ADJUST_LOCAL_OFFSET(dst
, dstw
);
2034 ADJUST_LOCAL_OFFSET(src1
, src1w
);
2035 ADJUST_LOCAL_OFFSET(src2
, src2w
);
2037 switch (GET_OPCODE(op
)) {
2042 return emit_op(compiler
, op
, ALLOW_IMM
| ALLOW_NEG_IMM
, dst
, dstw
, src1
, src1w
, src2
, src2w
);
2046 return emit_op(compiler
, op
, ALLOW_IMM
, dst
, dstw
, src1
, src1w
, src2
, src2w
);
2049 return emit_op(compiler
, op
, 0, dst
, dstw
, src1
, src1w
, src2
, src2w
);
2052 return emit_op(compiler
, op
, ALLOW_ANY_IMM
, dst
, dstw
, src1
, src1w
, src2
, src2w
);
2057 if (src2
& SLJIT_IMM
) {
2058 compiler
->shift_imm
= src2w
& 0x1f;
2059 return emit_op(compiler
, op
, 0, dst
, dstw
, TMP_REG1
, 0, src1
, src1w
);
2062 compiler
->shift_imm
= 0x20;
2063 return emit_op(compiler
, op
, 0, dst
, dstw
, src1
, src1w
, src2
, src2w
);
2067 return SLJIT_SUCCESS
;
2070 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op2u(struct sljit_compiler
*compiler
, sljit_s32 op
,
2071 sljit_s32 src1
, sljit_sw src1w
,
2072 sljit_s32 src2
, sljit_sw src2w
)
2075 CHECK(check_sljit_emit_op2(compiler
, op
, 1, 0, 0, src1
, src1w
, src2
, src2w
));
2077 SLJIT_SKIP_CHECKS(compiler
);
2078 return sljit_emit_op2(compiler
, op
, TMP_REG2
, 0, src1
, src1w
, src2
, src2w
);
2081 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op_src(struct sljit_compiler
*compiler
, sljit_s32 op
,
2082 sljit_s32 src
, sljit_sw srcw
)
2085 CHECK(check_sljit_emit_op_src(compiler
, op
, src
, srcw
));
2086 ADJUST_LOCAL_OFFSET(src
, srcw
);
2089 case SLJIT_FAST_RETURN
:
2090 SLJIT_ASSERT(reg_map
[TMP_REG2
] == 14);
2092 if (FAST_IS_REG(src
))
2093 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG2
) | RM(src
)));
2095 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
| LOAD_DATA
, TMP_REG2
, src
, srcw
, TMP_REG1
));
2097 return push_inst(compiler
, BX
| RM(TMP_REG2
));
2098 case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN
:
2099 return SLJIT_SUCCESS
;
2100 case SLJIT_PREFETCH_L1
:
2101 case SLJIT_PREFETCH_L2
:
2102 case SLJIT_PREFETCH_L3
:
2103 case SLJIT_PREFETCH_ONCE
:
2104 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
2105 SLJIT_ASSERT(src
& SLJIT_MEM
);
2106 return emit_op_mem(compiler
, PRELOAD
| LOAD_DATA
, TMP_PC
, src
, srcw
, TMP_REG1
);
2107 #else /* !SLJIT_CONFIG_ARM_V7 */
2108 return SLJIT_SUCCESS
;
2109 #endif /* SLJIT_CONFIG_ARM_V7 */
2112 return SLJIT_SUCCESS
;
2115 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_get_register_index(sljit_s32 reg
)
2117 CHECK_REG_INDEX(check_sljit_get_register_index(reg
));
2118 return reg_map
[reg
];
2121 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_get_float_register_index(sljit_s32 reg
)
2123 CHECK_REG_INDEX(check_sljit_get_float_register_index(reg
));
2124 return (freg_map
[reg
] << 1);
2127 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op_custom(struct sljit_compiler
*compiler
,
2128 void *instruction
, sljit_u32 size
)
2130 SLJIT_UNUSED_ARG(size
);
2132 CHECK(check_sljit_emit_op_custom(compiler
, instruction
, size
));
2134 return push_inst(compiler
, *(sljit_uw
*)instruction
);
2137 /* --------------------------------------------------------------------- */
2138 /* Floating point operators */
2139 /* --------------------------------------------------------------------- */
2141 #define FPU_LOAD (1 << 20)
2142 #define EMIT_FPU_DATA_TRANSFER(inst, add, base, freg, offs) \
2143 ((inst) | (sljit_uw)((add) << 23) | RN(base) | VD(freg) | (sljit_uw)(offs))
2145 static sljit_s32
emit_fop_mem(struct sljit_compiler
*compiler
, sljit_s32 flags
, sljit_s32 reg
, sljit_s32 arg
, sljit_sw argw
)
2148 sljit_uw inst
= VSTR_F32
| (flags
& (SLJIT_32
| FPU_LOAD
));
2150 SLJIT_ASSERT(arg
& SLJIT_MEM
);
2153 if (SLJIT_UNLIKELY(arg
& OFFS_REG_MASK
)) {
2154 FAIL_IF(push_inst(compiler
, ADD
| RD(TMP_REG2
) | RN(arg
& REG_MASK
) | RM(OFFS_REG(arg
)) | (((sljit_uw
)argw
& 0x3) << 7)));
2159 /* Fast loads and stores. */
2161 if (!(argw
& ~0x3fc))
2162 return push_inst(compiler
, EMIT_FPU_DATA_TRANSFER(inst
, 1, arg
& REG_MASK
, reg
, argw
>> 2));
2163 if (!(-argw
& ~0x3fc))
2164 return push_inst(compiler
, EMIT_FPU_DATA_TRANSFER(inst
, 0, arg
& REG_MASK
, reg
, (-argw
) >> 2));
2166 imm
= get_imm((sljit_uw
)argw
& ~(sljit_uw
)0x3fc);
2168 FAIL_IF(push_inst(compiler
, ADD
| RD(TMP_REG2
) | RN(arg
& REG_MASK
) | imm
));
2169 return push_inst(compiler
, EMIT_FPU_DATA_TRANSFER(inst
, 1, TMP_REG2
, reg
, (argw
& 0x3fc) >> 2));
2171 imm
= get_imm((sljit_uw
)-argw
& ~(sljit_uw
)0x3fc);
2174 FAIL_IF(push_inst(compiler
, SUB
| RD(TMP_REG2
) | RN(arg
& REG_MASK
) | imm
));
2175 return push_inst(compiler
, EMIT_FPU_DATA_TRANSFER(inst
, 0, TMP_REG2
, reg
, (argw
& 0x3fc) >> 2));
2180 FAIL_IF(load_immediate(compiler
, TMP_REG2
, (sljit_uw
)argw
));
2181 FAIL_IF(push_inst(compiler
, ADD
| RD(TMP_REG2
) | RN(arg
& REG_MASK
) | RM(TMP_REG2
)));
2184 FAIL_IF(load_immediate(compiler
, TMP_REG2
, (sljit_uw
)argw
));
2186 return push_inst(compiler
, EMIT_FPU_DATA_TRANSFER(inst
, 1, TMP_REG2
, reg
, 0));
2189 static SLJIT_INLINE sljit_s32
sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler
*compiler
, sljit_s32 op
,
2190 sljit_s32 dst
, sljit_sw dstw
,
2191 sljit_s32 src
, sljit_sw srcw
)
2195 if (src
& SLJIT_MEM
) {
2196 FAIL_IF(emit_fop_mem(compiler
, (op
& SLJIT_32
) | FPU_LOAD
, TMP_FREG1
, src
, srcw
));
2200 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VCVT_S32_F32
, op
& SLJIT_32
, TMP_FREG1
, src
, 0)));
2202 if (FAST_IS_REG(dst
))
2203 return push_inst(compiler
, VMOV
| (1 << 20) | RD(dst
) | VN(TMP_FREG1
));
2205 /* Store the integer value from a VFP register. */
2206 return emit_fop_mem(compiler
, 0, TMP_FREG1
, dst
, dstw
);
2209 static SLJIT_INLINE sljit_s32
sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler
*compiler
, sljit_s32 op
,
2210 sljit_s32 dst
, sljit_sw dstw
,
2211 sljit_s32 src
, sljit_sw srcw
)
2213 sljit_s32 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_FREG1
;
2217 if (FAST_IS_REG(src
))
2218 FAIL_IF(push_inst(compiler
, VMOV
| RD(src
) | VN(TMP_FREG1
)));
2219 else if (src
& SLJIT_MEM
) {
2220 /* Load the integer value into a VFP register. */
2221 FAIL_IF(emit_fop_mem(compiler
, FPU_LOAD
, TMP_FREG1
, src
, srcw
));
2224 FAIL_IF(load_immediate(compiler
, TMP_REG1
, (sljit_uw
)srcw
));
2225 FAIL_IF(push_inst(compiler
, VMOV
| RD(TMP_REG1
) | VN(TMP_FREG1
)));
2228 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VCVT_F32_S32
, op
& SLJIT_32
, dst_r
, TMP_FREG1
, 0)));
2230 if (dst
& SLJIT_MEM
)
2231 return emit_fop_mem(compiler
, (op
& SLJIT_32
), TMP_FREG1
, dst
, dstw
);
2232 return SLJIT_SUCCESS
;
2235 static SLJIT_INLINE sljit_s32
sljit_emit_fop1_cmp(struct sljit_compiler
*compiler
, sljit_s32 op
,
2236 sljit_s32 src1
, sljit_sw src1w
,
2237 sljit_s32 src2
, sljit_sw src2w
)
2241 if (src1
& SLJIT_MEM
) {
2242 FAIL_IF(emit_fop_mem(compiler
, (op
& SLJIT_32
) | FPU_LOAD
, TMP_FREG1
, src1
, src1w
));
2246 if (src2
& SLJIT_MEM
) {
2247 FAIL_IF(emit_fop_mem(compiler
, (op
& SLJIT_32
) | FPU_LOAD
, TMP_FREG2
, src2
, src2w
));
2251 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VCMP_F32
, op
& SLJIT_32
, src1
, src2
, 0)));
2252 return push_inst(compiler
, VMRS
);
2255 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fop1(struct sljit_compiler
*compiler
, sljit_s32 op
,
2256 sljit_s32 dst
, sljit_sw dstw
,
2257 sljit_s32 src
, sljit_sw srcw
)
2263 SLJIT_COMPILE_ASSERT((SLJIT_32
== 0x100), float_transfer_bit_error
);
2264 SELECT_FOP1_OPERATION_WITH_CHECKS(compiler
, op
, dst
, dstw
, src
, srcw
);
2266 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_FREG1
;
2268 if (GET_OPCODE(op
) != SLJIT_CONV_F64_FROM_F32
)
2271 if (src
& SLJIT_MEM
) {
2272 FAIL_IF(emit_fop_mem(compiler
, (op
& SLJIT_32
) | FPU_LOAD
, dst_r
, src
, srcw
));
2276 switch (GET_OPCODE(op
)) {
2279 if (dst_r
!= TMP_FREG1
)
2280 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VMOV_F32
, op
& SLJIT_32
, dst_r
, src
, 0)));
2286 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VNEG_F32
, op
& SLJIT_32
, dst_r
, src
, 0)));
2289 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VABS_F32
, op
& SLJIT_32
, dst_r
, src
, 0)));
2291 case SLJIT_CONV_F64_FROM_F32
:
2292 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VCVT_F64_F32
, op
& SLJIT_32
, dst_r
, src
, 0)));
2297 if (dst
& SLJIT_MEM
)
2298 return emit_fop_mem(compiler
, (op
& SLJIT_32
), dst_r
, dst
, dstw
);
2299 return SLJIT_SUCCESS
;
2302 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fop2(struct sljit_compiler
*compiler
, sljit_s32 op
,
2303 sljit_s32 dst
, sljit_sw dstw
,
2304 sljit_s32 src1
, sljit_sw src1w
,
2305 sljit_s32 src2
, sljit_sw src2w
)
2310 CHECK(check_sljit_emit_fop2(compiler
, op
, dst
, dstw
, src1
, src1w
, src2
, src2w
));
2311 ADJUST_LOCAL_OFFSET(dst
, dstw
);
2312 ADJUST_LOCAL_OFFSET(src1
, src1w
);
2313 ADJUST_LOCAL_OFFSET(src2
, src2w
);
2317 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_FREG1
;
2319 if (src2
& SLJIT_MEM
) {
2320 FAIL_IF(emit_fop_mem(compiler
, (op
& SLJIT_32
) | FPU_LOAD
, TMP_FREG2
, src2
, src2w
));
2324 if (src1
& SLJIT_MEM
) {
2325 FAIL_IF(emit_fop_mem(compiler
, (op
& SLJIT_32
) | FPU_LOAD
, TMP_FREG1
, src1
, src1w
));
2329 switch (GET_OPCODE(op
)) {
2331 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VADD_F32
, op
& SLJIT_32
, dst_r
, src2
, src1
)));
2335 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VSUB_F32
, op
& SLJIT_32
, dst_r
, src2
, src1
)));
2339 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VMUL_F32
, op
& SLJIT_32
, dst_r
, src2
, src1
)));
2343 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VDIV_F32
, op
& SLJIT_32
, dst_r
, src2
, src1
)));
2347 if (dst_r
== TMP_FREG1
)
2348 FAIL_IF(emit_fop_mem(compiler
, (op
& SLJIT_32
), TMP_FREG1
, dst
, dstw
));
2350 return SLJIT_SUCCESS
;
2353 #undef EMIT_FPU_DATA_TRANSFER
2355 /* --------------------------------------------------------------------- */
2356 /* Other instructions */
2357 /* --------------------------------------------------------------------- */
2359 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fast_enter(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
)
2362 CHECK(check_sljit_emit_fast_enter(compiler
, dst
, dstw
));
2363 ADJUST_LOCAL_OFFSET(dst
, dstw
);
2365 SLJIT_ASSERT(reg_map
[TMP_REG2
] == 14);
2367 if (FAST_IS_REG(dst
))
2368 return push_inst(compiler
, MOV
| RD(dst
) | RM(TMP_REG2
));
2371 return emit_op_mem(compiler
, WORD_SIZE
, TMP_REG2
, dst
, dstw
, TMP_REG1
);
2374 /* --------------------------------------------------------------------- */
2375 /* Conditional instructions */
2376 /* --------------------------------------------------------------------- */
2378 static sljit_uw
get_cc(struct sljit_compiler
*compiler
, sljit_s32 type
)
2383 case SLJIT_ORDERED_EQUAL
:
2384 case SLJIT_UNORDERED_OR_EQUAL
: /* Not supported. */
2387 case SLJIT_NOT_EQUAL
:
2388 case SLJIT_F_NOT_EQUAL
:
2389 case SLJIT_UNORDERED_OR_NOT_EQUAL
:
2390 case SLJIT_ORDERED_NOT_EQUAL
: /* Not supported. */
2394 if (compiler
->status_flags_state
& SLJIT_CURRENT_FLAGS_ADD
)
2401 case SLJIT_NOT_CARRY
:
2402 if (compiler
->status_flags_state
& SLJIT_CURRENT_FLAGS_ADD
)
2406 case SLJIT_GREATER_EQUAL
:
2410 case SLJIT_UNORDERED_OR_GREATER
:
2413 case SLJIT_LESS_EQUAL
:
2414 case SLJIT_F_LESS_EQUAL
:
2415 case SLJIT_ORDERED_LESS_EQUAL
:
2418 case SLJIT_SIG_LESS
:
2419 case SLJIT_UNORDERED_OR_LESS
:
2422 case SLJIT_SIG_GREATER_EQUAL
:
2423 case SLJIT_F_GREATER_EQUAL
:
2424 case SLJIT_ORDERED_GREATER_EQUAL
:
2427 case SLJIT_SIG_GREATER
:
2428 case SLJIT_F_GREATER
:
2429 case SLJIT_ORDERED_GREATER
:
2432 case SLJIT_SIG_LESS_EQUAL
:
2433 case SLJIT_UNORDERED_OR_LESS_EQUAL
:
2436 case SLJIT_OVERFLOW
:
2437 if (!(compiler
->status_flags_state
& (SLJIT_CURRENT_FLAGS_ADD
| SLJIT_CURRENT_FLAGS_SUB
)))
2441 case SLJIT_UNORDERED
:
2444 case SLJIT_NOT_OVERFLOW
:
2445 if (!(compiler
->status_flags_state
& (SLJIT_CURRENT_FLAGS_ADD
| SLJIT_CURRENT_FLAGS_SUB
)))
2453 case SLJIT_ORDERED_LESS
:
2456 case SLJIT_UNORDERED_OR_GREATER_EQUAL
:
2460 SLJIT_ASSERT(type
>= SLJIT_JUMP
&& type
<= SLJIT_CALL_REG_ARG
);
2465 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_label
* sljit_emit_label(struct sljit_compiler
*compiler
)
2467 struct sljit_label
*label
;
2470 CHECK_PTR(check_sljit_emit_label(compiler
));
2472 if (compiler
->last_label
&& compiler
->last_label
->size
== compiler
->size
)
2473 return compiler
->last_label
;
2475 label
= (struct sljit_label
*)ensure_abuf(compiler
, sizeof(struct sljit_label
));
2476 PTR_FAIL_IF(!label
);
2477 set_label(label
, compiler
);
2481 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_jump
* sljit_emit_jump(struct sljit_compiler
*compiler
, sljit_s32 type
)
2483 struct sljit_jump
*jump
;
2486 CHECK_PTR(check_sljit_emit_jump(compiler
, type
));
2488 jump
= (struct sljit_jump
*)ensure_abuf(compiler
, sizeof(struct sljit_jump
));
2490 set_jump(jump
, compiler
, type
& SLJIT_REWRITABLE_JUMP
);
2493 SLJIT_ASSERT(reg_map
[TMP_REG1
] != 14);
2495 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
2496 if (type
>= SLJIT_FAST_CALL
)
2497 PTR_FAIL_IF(prepare_blx(compiler
));
2498 PTR_FAIL_IF(push_inst_with_unique_literal(compiler
, ((EMIT_DATA_TRANSFER(WORD_SIZE
| LOAD_DATA
, 1,
2499 type
<= SLJIT_JUMP
? TMP_PC
: TMP_REG1
, TMP_PC
, 0)) & ~COND_MASK
) | get_cc(compiler
, type
), 0));
2501 if (jump
->flags
& SLJIT_REWRITABLE_JUMP
) {
2502 jump
->addr
= compiler
->size
;
2503 compiler
->patches
++;
2506 if (type
>= SLJIT_FAST_CALL
) {
2507 jump
->flags
|= IS_BL
;
2508 PTR_FAIL_IF(emit_blx(compiler
));
2511 if (!(jump
->flags
& SLJIT_REWRITABLE_JUMP
))
2512 jump
->addr
= compiler
->size
;
2514 if (type
>= SLJIT_FAST_CALL
)
2515 jump
->flags
|= IS_BL
;
2516 PTR_FAIL_IF(emit_imm(compiler
, TMP_REG1
, 0));
2517 PTR_FAIL_IF(push_inst(compiler
, (((type
<= SLJIT_JUMP
? BX
: BLX
) | RM(TMP_REG1
)) & ~COND_MASK
) | get_cc(compiler
, type
)));
2518 jump
->addr
= compiler
->size
;
2525 static sljit_s32
softfloat_call_with_args(struct sljit_compiler
*compiler
, sljit_s32 arg_types
, sljit_s32
*src
, sljit_u32
*extra_space
)
2527 sljit_u32 is_tail_call
= *extra_space
& SLJIT_CALL_RETURN
;
2528 sljit_u32 offset
= 0;
2529 sljit_u32 word_arg_offset
= 0;
2530 sljit_u32 src_offset
= 4 * sizeof(sljit_sw
);
2531 sljit_u32 float_arg_count
= 0;
2532 sljit_s32 types
= 0;
2533 sljit_u8 offsets
[4];
2534 sljit_u8
*offset_ptr
= offsets
;
2536 if (src
&& FAST_IS_REG(*src
))
2537 src_offset
= (sljit_uw
)reg_map
[*src
] * sizeof(sljit_sw
);
2539 arg_types
>>= SLJIT_ARG_SHIFT
;
2542 types
= (types
<< SLJIT_ARG_SHIFT
) | (arg_types
& SLJIT_ARG_MASK
);
2544 switch (arg_types
& SLJIT_ARG_MASK
) {
2545 case SLJIT_ARG_TYPE_F64
:
2547 offset
+= sizeof(sljit_sw
);
2548 *offset_ptr
++ = (sljit_u8
)offset
;
2549 offset
+= sizeof(sljit_f64
);
2552 case SLJIT_ARG_TYPE_F32
:
2553 *offset_ptr
++ = (sljit_u8
)offset
;
2554 offset
+= sizeof(sljit_f32
);
2558 *offset_ptr
++ = (sljit_u8
)offset
;
2559 offset
+= sizeof(sljit_sw
);
2560 word_arg_offset
+= sizeof(sljit_sw
);
2564 arg_types
>>= SLJIT_ARG_SHIFT
;
2567 if (offset
> 4 * sizeof(sljit_sw
) && (!is_tail_call
|| offset
> compiler
->args_size
)) {
2568 /* Keep lr register on the stack. */
2570 offset
+= sizeof(sljit_sw
);
2572 offset
= ((offset
- 4 * sizeof(sljit_sw
)) + 0x7) & ~(sljit_uw
)0x7;
2574 *extra_space
= offset
;
2577 FAIL_IF(emit_stack_frame_release(compiler
, (sljit_s32
)offset
));
2579 FAIL_IF(push_inst(compiler
, SUB
| RD(SLJIT_SP
) | RN(SLJIT_SP
) | SRC2_IMM
| offset
));
2582 FAIL_IF(emit_stack_frame_release(compiler
, -1));
2586 /* Process arguments in reversed direction. */
2588 switch (types
& SLJIT_ARG_MASK
) {
2589 case SLJIT_ARG_TYPE_F64
:
2591 offset
= *(--offset_ptr
);
2593 SLJIT_ASSERT((offset
& 0x7) == 0);
2595 if (offset
< 4 * sizeof(sljit_sw
)) {
2596 if (src_offset
== offset
|| src_offset
== offset
+ sizeof(sljit_sw
)) {
2597 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG1
) | (src_offset
>> 2)));
2600 FAIL_IF(push_inst(compiler
, VMOV2
| 0x100000 | (offset
<< 10) | ((offset
+ sizeof(sljit_sw
)) << 14) | float_arg_count
));
2602 FAIL_IF(push_inst(compiler
, VSTR_F32
| 0x800100 | RN(SLJIT_SP
)
2603 | (float_arg_count
<< 12) | ((offset
- 4 * sizeof(sljit_sw
)) >> 2)));
2605 case SLJIT_ARG_TYPE_F32
:
2607 offset
= *(--offset_ptr
);
2609 if (offset
< 4 * sizeof(sljit_sw
)) {
2610 if (src_offset
== offset
) {
2611 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG1
) | (src_offset
>> 2)));
2614 FAIL_IF(push_inst(compiler
, VMOV
| 0x100000 | (float_arg_count
<< 16) | (offset
<< 10)));
2616 FAIL_IF(push_inst(compiler
, VSTR_F32
| 0x800000 | RN(SLJIT_SP
)
2617 | (float_arg_count
<< 12) | ((offset
- 4 * sizeof(sljit_sw
)) >> 2)));
2620 word_arg_offset
-= sizeof(sljit_sw
);
2621 offset
= *(--offset_ptr
);
2623 SLJIT_ASSERT(offset
>= word_arg_offset
);
2625 if (offset
!= word_arg_offset
) {
2626 if (offset
< 4 * sizeof(sljit_sw
)) {
2627 if (src_offset
== offset
) {
2628 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG1
) | (src_offset
>> 2)));
2631 else if (src_offset
== word_arg_offset
) {
2632 *src
= (sljit_s32
)(SLJIT_R0
+ (offset
>> 2));
2633 src_offset
= offset
;
2635 FAIL_IF(push_inst(compiler
, MOV
| (offset
<< 10) | (word_arg_offset
>> 2)));
2637 FAIL_IF(push_inst(compiler
, STR
| 0x800000 | RN(SLJIT_SP
) | (word_arg_offset
<< 10) | (offset
- 4 * sizeof(sljit_sw
))));
2642 types
>>= SLJIT_ARG_SHIFT
;
2645 return SLJIT_SUCCESS
;
2648 static sljit_s32
softfloat_post_call_with_args(struct sljit_compiler
*compiler
, sljit_s32 arg_types
)
2650 if ((arg_types
& SLJIT_ARG_MASK
) == SLJIT_ARG_TYPE_F64
)
2651 FAIL_IF(push_inst(compiler
, VMOV2
| (1 << 16) | (0 << 12) | 0));
2652 if ((arg_types
& SLJIT_ARG_MASK
) == SLJIT_ARG_TYPE_F32
)
2653 FAIL_IF(push_inst(compiler
, VMOV
| (0 << 16) | (0 << 12)));
2655 return SLJIT_SUCCESS
;
2658 #else /* !__SOFTFP__ */
2660 static sljit_s32
hardfloat_call_with_args(struct sljit_compiler
*compiler
, sljit_s32 arg_types
)
2662 sljit_u32 offset
= SLJIT_FR0
;
2663 sljit_u32 new_offset
= SLJIT_FR0
;
2664 sljit_u32 f32_offset
= 0;
2666 /* Remove return value. */
2667 arg_types
>>= SLJIT_ARG_SHIFT
;
2670 switch (arg_types
& SLJIT_ARG_MASK
) {
2671 case SLJIT_ARG_TYPE_F64
:
2672 if (offset
!= new_offset
)
2673 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VMOV_F32
,
2674 SLJIT_32
, new_offset
, offset
, 0)));
2679 case SLJIT_ARG_TYPE_F32
:
2680 if (f32_offset
!= 0) {
2681 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VMOV_F32
,
2682 0x400000, f32_offset
, offset
, 0)));
2685 if (offset
!= new_offset
)
2686 FAIL_IF(push_inst(compiler
, EMIT_FPU_OPERATION(VMOV_F32
,
2687 0, new_offset
, offset
, 0)));
2688 f32_offset
= new_offset
;
2694 arg_types
>>= SLJIT_ARG_SHIFT
;
2697 return SLJIT_SUCCESS
;
2700 #endif /* __SOFTFP__ */
2702 #undef EMIT_FPU_OPERATION
2704 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_jump
* sljit_emit_call(struct sljit_compiler
*compiler
, sljit_s32 type
,
2705 sljit_s32 arg_types
)
2708 struct sljit_jump
*jump
;
2709 sljit_u32 extra_space
= (sljit_u32
)type
;
2713 CHECK_PTR(check_sljit_emit_call(compiler
, type
, arg_types
));
2716 if ((type
& 0xff) != SLJIT_CALL_REG_ARG
) {
2717 PTR_FAIL_IF(softfloat_call_with_args(compiler
, arg_types
, NULL
, &extra_space
));
2718 SLJIT_ASSERT((extra_space
& 0x7) == 0);
2720 if ((type
& SLJIT_CALL_RETURN
) && extra_space
== 0)
2721 type
= SLJIT_JUMP
| (type
& SLJIT_REWRITABLE_JUMP
);
2723 SLJIT_SKIP_CHECKS(compiler
);
2724 jump
= sljit_emit_jump(compiler
, type
);
2725 PTR_FAIL_IF(jump
== NULL
);
2727 if (extra_space
> 0) {
2728 if (type
& SLJIT_CALL_RETURN
)
2729 PTR_FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(WORD_SIZE
| LOAD_DATA
, 1,
2730 TMP_REG2
, SLJIT_SP
, extra_space
- sizeof(sljit_sw
))));
2732 PTR_FAIL_IF(push_inst(compiler
, ADD
| RD(SLJIT_SP
) | RN(SLJIT_SP
) | SRC2_IMM
| extra_space
));
2734 if (type
& SLJIT_CALL_RETURN
) {
2735 PTR_FAIL_IF(push_inst(compiler
, BX
| RM(TMP_REG2
)));
2740 SLJIT_ASSERT(!(type
& SLJIT_CALL_RETURN
));
2741 PTR_FAIL_IF(softfloat_post_call_with_args(compiler
, arg_types
));
2744 #endif /* __SOFTFP__ */
2746 if (type
& SLJIT_CALL_RETURN
) {
2747 PTR_FAIL_IF(emit_stack_frame_release(compiler
, -1));
2748 type
= SLJIT_JUMP
| (type
& SLJIT_REWRITABLE_JUMP
);
2752 if ((type
& 0xff) != SLJIT_CALL_REG_ARG
)
2753 PTR_FAIL_IF(hardfloat_call_with_args(compiler
, arg_types
));
2754 #endif /* !__SOFTFP__ */
2756 SLJIT_SKIP_CHECKS(compiler
);
2757 return sljit_emit_jump(compiler
, type
);
2760 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_ijump(struct sljit_compiler
*compiler
, sljit_s32 type
, sljit_s32 src
, sljit_sw srcw
)
2762 struct sljit_jump
*jump
;
2765 CHECK(check_sljit_emit_ijump(compiler
, type
, src
, srcw
));
2766 ADJUST_LOCAL_OFFSET(src
, srcw
);
2768 SLJIT_ASSERT(reg_map
[TMP_REG1
] != 14);
2770 if (!(src
& SLJIT_IMM
)) {
2771 if (FAST_IS_REG(src
)) {
2772 SLJIT_ASSERT(reg_map
[src
] != 14);
2773 return push_inst(compiler
, (type
<= SLJIT_JUMP
? BX
: BLX
) | RM(src
));
2776 SLJIT_ASSERT(src
& SLJIT_MEM
);
2777 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
| LOAD_DATA
, TMP_REG1
, src
, srcw
, TMP_REG1
));
2778 return push_inst(compiler
, (type
<= SLJIT_JUMP
? BX
: BLX
) | RM(TMP_REG1
));
2781 /* These jumps are converted to jump/call instructions when possible. */
2782 jump
= (struct sljit_jump
*)ensure_abuf(compiler
, sizeof(struct sljit_jump
));
2784 set_jump(jump
, compiler
, JUMP_ADDR
| ((type
>= SLJIT_FAST_CALL
) ? IS_BL
: 0));
2785 jump
->u
.target
= (sljit_uw
)srcw
;
2787 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
2788 if (type
>= SLJIT_FAST_CALL
)
2789 FAIL_IF(prepare_blx(compiler
));
2790 FAIL_IF(push_inst_with_unique_literal(compiler
, EMIT_DATA_TRANSFER(WORD_SIZE
| LOAD_DATA
, 1, type
<= SLJIT_JUMP
? TMP_PC
: TMP_REG1
, TMP_PC
, 0), 0));
2791 if (type
>= SLJIT_FAST_CALL
)
2792 FAIL_IF(emit_blx(compiler
));
2794 FAIL_IF(emit_imm(compiler
, TMP_REG1
, 0));
2795 FAIL_IF(push_inst(compiler
, (type
<= SLJIT_JUMP
? BX
: BLX
) | RM(TMP_REG1
)));
2797 jump
->addr
= compiler
->size
;
2798 return SLJIT_SUCCESS
;
2801 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_icall(struct sljit_compiler
*compiler
, sljit_s32 type
,
2802 sljit_s32 arg_types
,
2803 sljit_s32 src
, sljit_sw srcw
)
2806 sljit_u32 extra_space
= (sljit_u32
)type
;
2810 CHECK(check_sljit_emit_icall(compiler
, type
, arg_types
, src
, srcw
));
2812 if (src
& SLJIT_MEM
) {
2813 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
| LOAD_DATA
, TMP_REG1
, src
, srcw
, TMP_REG1
));
2817 if ((type
& SLJIT_CALL_RETURN
) && (src
>= SLJIT_FIRST_SAVED_REG
&& src
<= SLJIT_S0
)) {
2818 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG1
) | RM(src
)));
2823 if ((type
& 0xff) != SLJIT_CALL_REG_ARG
) {
2824 FAIL_IF(softfloat_call_with_args(compiler
, arg_types
, &src
, &extra_space
));
2825 SLJIT_ASSERT((extra_space
& 0x7) == 0);
2827 if ((type
& SLJIT_CALL_RETURN
) && extra_space
== 0)
2830 SLJIT_SKIP_CHECKS(compiler
);
2831 FAIL_IF(sljit_emit_ijump(compiler
, type
, src
, srcw
));
2833 if (extra_space
> 0) {
2834 if (type
& SLJIT_CALL_RETURN
)
2835 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(WORD_SIZE
| LOAD_DATA
, 1,
2836 TMP_REG2
, SLJIT_SP
, extra_space
- sizeof(sljit_sw
))));
2838 FAIL_IF(push_inst(compiler
, ADD
| RD(SLJIT_SP
) | RN(SLJIT_SP
) | SRC2_IMM
| extra_space
));
2840 if (type
& SLJIT_CALL_RETURN
)
2841 return push_inst(compiler
, BX
| RM(TMP_REG2
));
2844 SLJIT_ASSERT(!(type
& SLJIT_CALL_RETURN
));
2845 return softfloat_post_call_with_args(compiler
, arg_types
);
2847 #endif /* __SOFTFP__ */
2849 if (type
& SLJIT_CALL_RETURN
) {
2850 FAIL_IF(emit_stack_frame_release(compiler
, -1));
2855 if ((type
& 0xff) != SLJIT_CALL_REG_ARG
)
2856 FAIL_IF(hardfloat_call_with_args(compiler
, arg_types
));
2857 #endif /* !__SOFTFP__ */
2859 SLJIT_SKIP_CHECKS(compiler
);
2860 return sljit_emit_ijump(compiler
, type
, src
, srcw
);
2863 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op_flags(struct sljit_compiler
*compiler
, sljit_s32 op
,
2864 sljit_s32 dst
, sljit_sw dstw
,
2867 sljit_s32 dst_reg
, flags
= GET_ALL_FLAGS(op
);
2871 CHECK(check_sljit_emit_op_flags(compiler
, op
, dst
, dstw
, type
));
2872 ADJUST_LOCAL_OFFSET(dst
, dstw
);
2874 op
= GET_OPCODE(op
);
2875 cc
= get_cc(compiler
, type
);
2876 dst_reg
= FAST_IS_REG(dst
) ? dst
: TMP_REG1
;
2878 if (op
< SLJIT_ADD
) {
2879 FAIL_IF(push_inst(compiler
, MOV
| RD(dst_reg
) | SRC2_IMM
| 0));
2880 FAIL_IF(push_inst(compiler
, ((MOV
| RD(dst_reg
) | SRC2_IMM
| 1) & ~COND_MASK
) | cc
));
2881 if (dst
& SLJIT_MEM
)
2882 return emit_op_mem(compiler
, WORD_SIZE
, TMP_REG1
, dst
, dstw
, TMP_REG2
);
2883 return SLJIT_SUCCESS
;
2886 ins
= (op
== SLJIT_AND
? AND
: (op
== SLJIT_OR
? ORR
: EOR
));
2888 if (dst
& SLJIT_MEM
)
2889 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
| LOAD_DATA
, TMP_REG1
, dst
, dstw
, TMP_REG2
));
2891 FAIL_IF(push_inst(compiler
, ((ins
| RD(dst_reg
) | RN(dst_reg
) | SRC2_IMM
| 1) & ~COND_MASK
) | cc
));
2893 if (op
== SLJIT_AND
)
2894 FAIL_IF(push_inst(compiler
, ((ins
| RD(dst_reg
) | RN(dst_reg
) | SRC2_IMM
| 0) & ~COND_MASK
) | (cc
^ 0x10000000)));
2896 if (dst
& SLJIT_MEM
)
2897 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
, TMP_REG1
, dst
, dstw
, TMP_REG2
));
2899 if (flags
& SLJIT_SET_Z
)
2900 return push_inst(compiler
, MOV
| SET_FLAGS
| RD(TMP_REG2
) | RM(dst_reg
));
2901 return SLJIT_SUCCESS
;
2904 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_cmov(struct sljit_compiler
*compiler
, sljit_s32 type
,
2906 sljit_s32 src
, sljit_sw srcw
)
2911 CHECK(check_sljit_emit_cmov(compiler
, type
, dst_reg
, src
, srcw
));
2913 dst_reg
&= ~SLJIT_32
;
2915 cc
= get_cc(compiler
, type
);
2917 if (SLJIT_UNLIKELY(src
& SLJIT_IMM
)) {
2918 tmp
= get_imm((sljit_uw
)srcw
);
2920 return push_inst(compiler
, ((MOV
| RD(dst_reg
) | tmp
) & ~COND_MASK
) | cc
);
2922 tmp
= get_imm(~(sljit_uw
)srcw
);
2924 return push_inst(compiler
, ((MVN
| RD(dst_reg
) | tmp
) & ~COND_MASK
) | cc
);
2926 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
2927 tmp
= (sljit_uw
)srcw
;
2928 FAIL_IF(push_inst(compiler
, (MOVW
& ~COND_MASK
) | cc
| RD(dst_reg
) | ((tmp
<< 4) & 0xf0000) | (tmp
& 0xfff)));
2930 return SLJIT_SUCCESS
;
2931 return push_inst(compiler
, (MOVT
& ~COND_MASK
) | cc
| RD(dst_reg
) | ((tmp
>> 12) & 0xf0000) | ((tmp
>> 16) & 0xfff));
2933 FAIL_IF(load_immediate(compiler
, TMP_REG1
, (sljit_uw
)srcw
));
2938 return push_inst(compiler
, ((MOV
| RD(dst_reg
) | RM(src
)) & ~COND_MASK
) | cc
);
2941 static sljit_s32
update_mem_addr(struct sljit_compiler
*compiler
, sljit_s32
*mem
, sljit_sw
*memw
, sljit_s32 max_offset
)
2943 sljit_s32 arg
= *mem
;
2944 sljit_sw argw
= *memw
;
2946 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
2947 sljit_sw mask
= max_offset
>= 0x100 ? 0xfff : 0xff;
2948 #else /* !SLJIT_CONFIG_ARM_V5 */
2949 sljit_sw mask
= 0xfff;
2951 SLJIT_ASSERT(max_offset
>= 0x100);
2952 #endif /* SLJIT_CONFIG_ARM_V5 */
2956 if (SLJIT_UNLIKELY(arg
& OFFS_REG_MASK
)) {
2958 return push_inst(compiler
, ADD
| RD(TMP_REG1
) | RN(arg
& REG_MASK
) | RM(OFFS_REG(arg
)) | ((sljit_uw
)(argw
& 0x3) << 7));
2964 if (argw
<= max_offset
&& argw
>= -mask
) {
2966 return SLJIT_SUCCESS
;
2970 imm
= get_imm((sljit_uw
)(-argw
& ~mask
));
2973 *memw
= -(-argw
& mask
);
2974 return push_inst(compiler
, SUB
| RD(TMP_REG1
) | RN(arg
) | imm
);
2976 } else if ((argw
& mask
) <= max_offset
) {
2977 imm
= get_imm((sljit_uw
)(argw
& ~mask
));
2980 *memw
= argw
& mask
;
2981 return push_inst(compiler
, ADD
| RD(TMP_REG1
) | RN(arg
) | imm
);
2984 imm
= get_imm((sljit_uw
)((argw
| mask
) + 1));
2987 *memw
= (argw
& mask
) - (mask
+ 1);
2988 return push_inst(compiler
, ADD
| RD(TMP_REG1
) | RN(arg
) | imm
);
2993 imm
= (sljit_uw
)(argw
& ~mask
);
2995 if ((argw
& mask
) > max_offset
) {
2996 imm
+= (sljit_uw
)(mask
+ 1);
2997 *memw
= (argw
& mask
) - (mask
+ 1);
2999 *memw
= argw
& mask
;
3001 FAIL_IF(load_immediate(compiler
, TMP_REG1
, imm
));
3004 return SLJIT_SUCCESS
;
3006 return push_inst(compiler
, ADD
| RD(TMP_REG1
) | RN(TMP_REG1
) | RM(arg
));
3009 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
3011 static sljit_s32
sljit_emit_mem_unaligned(struct sljit_compiler
*compiler
, sljit_s32 type
,
3013 sljit_s32 mem
, sljit_sw memw
)
3017 sljit_uw add
, shift
;
3019 switch (type
& 0xff) {
3023 if (!(type
& SLJIT_MEM_STORE
))
3025 if ((type
& 0xff) == SLJIT_MOV_S8
)
3028 return emit_op_mem(compiler
, flags
, reg
, mem
, memw
, TMP_REG1
);
3031 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, 0xfff - 1));
3037 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, 0xff - 1));
3038 flags
= BYTE_SIZE
| SIGNED
;
3043 if (type
& SLJIT_MEM_ALIGNED_32
) {
3045 if (!(type
& SLJIT_MEM_STORE
))
3048 return emit_op_mem(compiler
, flags
, reg
, mem
, memw
, TMP_REG1
);
3051 if (!(type
& SLJIT_MEM_ALIGNED_16
)) {
3052 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, 0xfff - 3));
3058 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, 0xff - 2));
3066 if (type
& SLJIT_MEM_STORE
) {
3067 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(HALF_SIZE
, add
, reg
, mem
, TYPE2_TRANSFER_IMM(memw
))));
3068 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG2
) | RM(reg
) | (16 << 7) | (2 << 4)));
3079 return push_inst(compiler
, EMIT_DATA_TRANSFER(HALF_SIZE
, add
, TMP_REG2
, mem
, TYPE2_TRANSFER_IMM(memw
)));
3083 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG1
) | RM(mem
)));
3087 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(HALF_SIZE
| LOAD_DATA
, add
, reg
, mem
, TYPE2_TRANSFER_IMM(memw
))));
3098 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(HALF_SIZE
| LOAD_DATA
, add
, TMP_REG2
, mem
, TYPE2_TRANSFER_IMM(memw
))));
3099 return push_inst(compiler
, ORR
| RD(reg
) | RN(reg
) | RM(TMP_REG2
) | (16 << 7));
3102 SLJIT_ASSERT(steps
> 0);
3110 if (type
& SLJIT_MEM_STORE
) {
3111 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(BYTE_SIZE
, add
, reg
, mem
, memw
)));
3112 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG2
) | RM(reg
) | (8 << 7) | (2 << 4)));
3122 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(BYTE_SIZE
, add
, TMP_REG2
, mem
, memw
)));
3125 return SLJIT_SUCCESS
;
3127 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG2
) | RM(TMP_REG2
) | (8 << 7) | (2 << 4)));
3132 FAIL_IF(push_inst(compiler
, MOV
| RD(TMP_REG1
) | RM(mem
)));
3137 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(BYTE_SIZE
| LOAD_DATA
, add
, reg
, mem
, memw
)));
3148 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(BYTE_SIZE
| LOAD_DATA
, add
, TMP_REG2
, mem
, memw
)));
3149 FAIL_IF(push_inst(compiler
, ORR
| RD(reg
) | RN(reg
) | RM(TMP_REG2
) | (shift
<< 7)));
3152 } while (--steps
!= 0);
3157 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, add
, TMP_REG2
, mem
, TYPE2_TRANSFER_IMM(memw
))));
3159 FAIL_IF(push_inst(compiler
, EMIT_DATA_TRANSFER(flags
, add
, TMP_REG2
, mem
, memw
)));
3161 return push_inst(compiler
, ORR
| RD(reg
) | RN(reg
) | RM(TMP_REG2
) | (shift
<< 7));
3164 #endif /* SLJIT_CONFIG_ARM_V5 */
3166 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_mem(struct sljit_compiler
*compiler
, sljit_s32 type
,
3168 sljit_s32 mem
, sljit_sw memw
)
3171 sljit_uw is_type1_transfer
, inst
;
3174 CHECK(check_sljit_emit_mem(compiler
, type
, reg
, mem
, memw
));
3176 if (type
& SLJIT_MEM_UNALIGNED
)
3177 return sljit_emit_mem_unaligned(compiler
, type
, reg
, mem
, memw
);
3179 is_type1_transfer
= 1;
3181 switch (type
& 0xff) {
3193 if (!(type
& SLJIT_MEM_STORE
))
3194 is_type1_transfer
= 0;
3195 flags
= BYTE_SIZE
| SIGNED
;
3198 is_type1_transfer
= 0;
3202 is_type1_transfer
= 0;
3203 flags
= HALF_SIZE
| SIGNED
;
3206 SLJIT_UNREACHABLE();
3211 if (!(type
& SLJIT_MEM_STORE
))
3214 SLJIT_ASSERT(is_type1_transfer
== !!IS_TYPE1_TRANSFER(flags
));
3216 if (SLJIT_UNLIKELY(mem
& OFFS_REG_MASK
)) {
3217 if (!is_type1_transfer
&& memw
!= 0)
3218 return SLJIT_ERR_UNSUPPORTED
;
3221 if (is_type1_transfer
) {
3222 if (memw
> 4095 || memw
< -4095)
3223 return SLJIT_ERR_UNSUPPORTED
;
3226 if (memw
> 255 || memw
< -255)
3227 return SLJIT_ERR_UNSUPPORTED
;
3231 if (type
& SLJIT_MEM_SUPP
)
3232 return SLJIT_SUCCESS
;
3234 if (SLJIT_UNLIKELY(mem
& OFFS_REG_MASK
)) {
3237 inst
= EMIT_DATA_TRANSFER(flags
, 1, reg
, mem
& REG_MASK
, RM(OFFS_REG(mem
)) | ((sljit_uw
)memw
<< 7));
3239 if (is_type1_transfer
)
3242 if (type
& SLJIT_MEM_PRE
)
3247 return push_inst(compiler
, inst
);
3250 inst
= EMIT_DATA_TRANSFER(flags
, 0, reg
, mem
& REG_MASK
, 0);
3252 if (type
& SLJIT_MEM_PRE
)
3257 if (is_type1_transfer
) {
3263 return push_inst(compiler
, inst
| (sljit_uw
)memw
);
3271 return push_inst(compiler
, inst
| TYPE2_TRANSFER_IMM((sljit_uw
)memw
));
3274 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fmem(struct sljit_compiler
*compiler
, sljit_s32 type
,
3276 sljit_s32 mem
, sljit_sw memw
)
3278 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
3279 sljit_s32 max_offset
;
3281 #endif /* SLJIT_CONFIG_ARM_V5 */
3284 CHECK(check_sljit_emit_fmem(compiler
, type
, freg
, mem
, memw
));
3286 if (type
& (SLJIT_MEM_PRE
| SLJIT_MEM_POST
))
3287 return SLJIT_ERR_UNSUPPORTED
;
3289 if (type
& SLJIT_MEM_ALIGNED_32
)
3290 return emit_fop_mem(compiler
, ((type
^ SLJIT_32
) & SLJIT_32
) | ((type
& SLJIT_MEM_STORE
) ? 0 : FPU_LOAD
), freg
, mem
, memw
);
3292 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
3293 if (type
& SLJIT_MEM_STORE
) {
3294 FAIL_IF(push_inst(compiler
, VMOV
| (1 << 20) | VN(freg
) | RD(TMP_REG2
)));
3296 if (type
& SLJIT_32
)
3297 return sljit_emit_mem_unaligned(compiler
, SLJIT_MOV
| SLJIT_MEM_STORE
| (type
& SLJIT_MEM_ALIGNED_16
), TMP_REG2
, mem
, memw
);
3299 max_offset
= 0xfff - 7;
3300 if (type
& SLJIT_MEM_ALIGNED_16
)
3303 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, max_offset
));
3306 FAIL_IF(sljit_emit_mem_unaligned(compiler
, SLJIT_MOV
| SLJIT_MEM_STORE
| (type
& SLJIT_MEM_ALIGNED_16
), TMP_REG2
, mem
, memw
));
3308 FAIL_IF(push_inst(compiler
, VMOV
| (1 << 20) | VN(freg
) | 0x80 | RD(TMP_REG2
)));
3309 return sljit_emit_mem_unaligned(compiler
, SLJIT_MOV
| SLJIT_MEM_STORE
| (type
& SLJIT_MEM_ALIGNED_16
), TMP_REG2
, mem
, memw
+ 4);
3312 max_offset
= (type
& SLJIT_32
) ? 0xfff - 3 : 0xfff - 7;
3313 if (type
& SLJIT_MEM_ALIGNED_16
)
3316 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, max_offset
));
3320 /* Stack offset adjustment is not needed because dst
3321 is not stored on the stack when mem is SLJIT_SP. */
3323 if (mem
== TMP_REG1
) {
3326 if (compiler
->scratches
>= 4)
3327 FAIL_IF(push_inst(compiler
, STR
| (1 << 21) | RN(SLJIT_SP
) | RD(SLJIT_R3
) | 8));
3332 FAIL_IF(sljit_emit_mem_unaligned(compiler
, SLJIT_MOV
| (type
& SLJIT_MEM_ALIGNED_16
), dst
, mem
, memw
));
3333 FAIL_IF(push_inst(compiler
, VMOV
| VN(freg
) | RD(dst
)));
3335 if (!(type
& SLJIT_32
)) {
3336 FAIL_IF(sljit_emit_mem_unaligned(compiler
, SLJIT_MOV
| (type
& SLJIT_MEM_ALIGNED_16
), dst
, mem
, memw
+ 4));
3337 FAIL_IF(push_inst(compiler
, VMOV
| VN(freg
) | 0x80 | RD(dst
)));
3340 if (dst
== SLJIT_R3
&& compiler
->scratches
>= 4)
3341 FAIL_IF(push_inst(compiler
, (LDR
^ (0x1 << 24)) | (0x1 << 23) | RN(SLJIT_SP
) | RD(SLJIT_R3
) | 8));
3342 return SLJIT_SUCCESS
;
3343 #else /* !SLJIT_CONFIG_ARM_V5 */
3344 if (type
& SLJIT_MEM_STORE
) {
3345 FAIL_IF(push_inst(compiler
, VMOV
| (1 << 20) | VN(freg
) | RD(TMP_REG2
)));
3347 if (type
& SLJIT_32
)
3348 return emit_op_mem(compiler
, WORD_SIZE
, TMP_REG2
, mem
, memw
, TMP_REG1
);
3350 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, 0xfff - 4));
3353 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
, TMP_REG2
, mem
, memw
, TMP_REG1
));
3354 FAIL_IF(push_inst(compiler
, VMOV
| (1 << 20) | VN(freg
) | 0x80 | RD(TMP_REG2
)));
3355 return emit_op_mem(compiler
, WORD_SIZE
, TMP_REG2
, mem
, memw
+ 4, TMP_REG1
);
3358 if (type
& SLJIT_32
) {
3359 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
| LOAD_DATA
, TMP_REG2
, mem
, memw
, TMP_REG1
));
3360 return push_inst(compiler
, VMOV
| VN(freg
) | RD(TMP_REG2
));
3363 FAIL_IF(update_mem_addr(compiler
, &mem
, &memw
, 0xfff - 4));
3366 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
| LOAD_DATA
, TMP_REG2
, mem
, memw
, TMP_REG1
));
3367 FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
| LOAD_DATA
, TMP_REG1
, mem
, memw
+ 4, TMP_REG1
));
3368 return push_inst(compiler
, VMOV2
| VM(freg
) | RD(TMP_REG2
) | RN(TMP_REG1
));
3369 #endif /* SLJIT_CONFIG_ARM_V5 */
3374 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_const
* sljit_emit_const(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
, sljit_sw init_value
)
3376 struct sljit_const
*const_
;
3380 CHECK_PTR(check_sljit_emit_const(compiler
, dst
, dstw
, init_value
));
3381 ADJUST_LOCAL_OFFSET(dst
, dstw
);
3383 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_REG2
;
3385 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
3386 PTR_FAIL_IF(push_inst_with_unique_literal(compiler
,
3387 EMIT_DATA_TRANSFER(WORD_SIZE
| LOAD_DATA
, 1, dst_r
, TMP_PC
, 0), (sljit_uw
)init_value
));
3388 compiler
->patches
++;
3390 PTR_FAIL_IF(emit_imm(compiler
, dst_r
, init_value
));
3393 const_
= (struct sljit_const
*)ensure_abuf(compiler
, sizeof(struct sljit_const
));
3394 PTR_FAIL_IF(!const_
);
3395 set_const(const_
, compiler
);
3397 if (dst
& SLJIT_MEM
)
3398 PTR_FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
, TMP_REG2
, dst
, dstw
, TMP_REG1
));
3402 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_put_label
* sljit_emit_put_label(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
)
3404 struct sljit_put_label
*put_label
;
3408 CHECK_PTR(check_sljit_emit_put_label(compiler
, dst
, dstw
));
3409 ADJUST_LOCAL_OFFSET(dst
, dstw
);
3411 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_REG2
;
3413 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
3414 PTR_FAIL_IF(push_inst_with_unique_literal(compiler
, EMIT_DATA_TRANSFER(WORD_SIZE
| LOAD_DATA
, 1, dst_r
, TMP_PC
, 0), 0));
3415 compiler
->patches
++;
3417 PTR_FAIL_IF(emit_imm(compiler
, dst_r
, 0));
3420 put_label
= (struct sljit_put_label
*)ensure_abuf(compiler
, sizeof(struct sljit_put_label
));
3421 PTR_FAIL_IF(!put_label
);
3422 set_put_label(put_label
, compiler
, 0);
3424 if (dst
& SLJIT_MEM
)
3425 PTR_FAIL_IF(emit_op_mem(compiler
, WORD_SIZE
, TMP_REG2
, dst
, dstw
, TMP_REG1
));
3429 SLJIT_API_FUNC_ATTRIBUTE
void sljit_set_jump_addr(sljit_uw addr
, sljit_uw new_target
, sljit_sw executable_offset
)
3431 inline_set_jump_addr(addr
, executable_offset
, new_target
, 1);
3434 SLJIT_API_FUNC_ATTRIBUTE
void sljit_set_const(sljit_uw addr
, sljit_sw new_constant
, sljit_sw executable_offset
)
3436 inline_set_const(addr
, executable_offset
, (sljit_uw
)new_constant
, 1);