2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 SLJIT_API_FUNC_ATTRIBUTE
const char* sljit_get_platform_name(void)
29 return "SPARC" SLJIT_CPUINFO
;
32 /* Length of an instruction word
33 Both for sparc-32 and sparc-64 */
34 typedef sljit_u32 sljit_ins
;
36 #if (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL)
38 static void sparc_cache_flush(sljit_ins
*from
, sljit_ins
*to
)
40 #if defined(__SUNPRO_C) && __SUNPRO_C < 0x590
42 /* if (from == to) return */
47 /* loop until from >= to */
55 /* The comparison was done above. */
57 /* nop is not necessary here, since the
58 sub operation has no side effect. */
64 if (SLJIT_UNLIKELY(from
== to
))
72 /* Operates at least on doubleword. */
77 /* Flush the last word. */
87 #endif /* (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) */
89 /* TMP_REG2 is not used by getput_arg */
90 #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
91 #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
92 #define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4)
93 /* This register is modified by calls, which affects the instruction
94 in the delay slot if it is used as a source register. */
95 #define TMP_LINK (SLJIT_NUMBER_OF_REGISTERS + 5)
97 #define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
98 #define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
100 static const sljit_u8 reg_map
[SLJIT_NUMBER_OF_REGISTERS
+ 6] = {
101 0, 8, 9, 10, 11, 23, 22, 21, 20, 19, 18, 17, 16, 29, 28, 27, 26, 25, 24, 14, 1, 12, 13, 15
104 static const sljit_u8 freg_map
[SLJIT_NUMBER_OF_FLOAT_REGISTERS
+ 3] = {
105 0, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
108 /* --------------------------------------------------------------------- */
109 /* Instrucion forms */
110 /* --------------------------------------------------------------------- */
112 #define D(d) ((sljit_ins)reg_map[d] << 25)
113 #define FD(d) ((sljit_ins)freg_map[d] << 25)
114 #define FDN(d) (((sljit_ins)freg_map[d] | 0x1) << 25)
115 #define DA(d) ((sljit_ins)(d) << 25)
116 #define S1(s1) ((sljit_ins)reg_map[s1] << 14)
117 #define FS1(s1) ((sljit_ins)freg_map[s1] << 14)
118 #define S1A(s1) ((sljit_ins)(s1) << 14)
119 #define S2(s2) ((sljit_ins)reg_map[s2])
120 #define FS2(s2) ((sljit_ins)freg_map[s2])
121 #define FS2N(s2) ((sljit_ins)freg_map[s2] | 0x1)
122 #define S2A(s2) ((sljit_ins)(s2))
123 #define IMM_ARG 0x2000
124 #define DOP(op) ((sljit_ins)(op) << 5)
125 #define IMM(imm) (((sljit_ins)(imm) & 0x1fff) | IMM_ARG)
127 #define DR(dr) (reg_map[dr])
128 #define DRF(dr, flags) ((sljit_s32)(reg_map[dr] | ((flags) & SET_FLAGS)))
129 #define OPC1(opcode) ((sljit_ins)(opcode) << 30)
130 #define OPC2(opcode) ((sljit_ins)(opcode) << 22)
131 #define OPC3(opcode) ((sljit_ins)(opcode) << 19)
132 #define SET_FLAGS OPC3(0x10)
134 #define ADD (OPC1(0x2) | OPC3(0x00))
135 #define ADDC (OPC1(0x2) | OPC3(0x08))
136 #define AND (OPC1(0x2) | OPC3(0x01))
137 #define ANDN (OPC1(0x2) | OPC3(0x05))
138 #define CALL (OPC1(0x1))
139 #define FABSS (OPC1(0x2) | OPC3(0x34) | DOP(0x09))
140 #define FADDD (OPC1(0x2) | OPC3(0x34) | DOP(0x42))
141 #define FADDS (OPC1(0x2) | OPC3(0x34) | DOP(0x41))
142 #define FCMPD (OPC1(0x2) | OPC3(0x35) | DOP(0x52))
143 #define FCMPS (OPC1(0x2) | OPC3(0x35) | DOP(0x51))
144 #define FDIVD (OPC1(0x2) | OPC3(0x34) | DOP(0x4e))
145 #define FDIVS (OPC1(0x2) | OPC3(0x34) | DOP(0x4d))
146 #define FDTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd2))
147 #define FDTOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc6))
148 #define FITOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc8))
149 #define FITOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc4))
150 #define FMOVS (OPC1(0x2) | OPC3(0x34) | DOP(0x01))
151 #define FMULD (OPC1(0x2) | OPC3(0x34) | DOP(0x4a))
152 #define FMULS (OPC1(0x2) | OPC3(0x34) | DOP(0x49))
153 #define FNEGS (OPC1(0x2) | OPC3(0x34) | DOP(0x05))
154 #define FSTOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc9))
155 #define FSTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd1))
156 #define FSUBD (OPC1(0x2) | OPC3(0x34) | DOP(0x46))
157 #define FSUBS (OPC1(0x2) | OPC3(0x34) | DOP(0x45))
158 #define JMPL (OPC1(0x2) | OPC3(0x38))
159 #define LDD (OPC1(0x3) | OPC3(0x03))
160 #define LDDF (OPC1(0x3) | OPC3(0x23))
161 #define LDF (OPC1(0x3) | OPC3(0x20))
162 #define LDUW (OPC1(0x3) | OPC3(0x00))
163 #define NOP (OPC1(0x0) | OPC2(0x04))
164 #define OR (OPC1(0x2) | OPC3(0x02))
165 #define ORN (OPC1(0x2) | OPC3(0x06))
166 #define RDY (OPC1(0x2) | OPC3(0x28) | S1A(0))
167 #define RESTORE (OPC1(0x2) | OPC3(0x3d))
168 #define SAVE (OPC1(0x2) | OPC3(0x3c))
169 #define SETHI (OPC1(0x0) | OPC2(0x04))
170 #define SLL (OPC1(0x2) | OPC3(0x25))
171 #define SLLX (OPC1(0x2) | OPC3(0x25) | (1 << 12))
172 #define SRA (OPC1(0x2) | OPC3(0x27))
173 #define SRAX (OPC1(0x2) | OPC3(0x27) | (1 << 12))
174 #define SRL (OPC1(0x2) | OPC3(0x26))
175 #define SRLX (OPC1(0x2) | OPC3(0x26) | (1 << 12))
176 #define STD (OPC1(0x3) | OPC3(0x07))
177 #define STDF (OPC1(0x3) | OPC3(0x27))
178 #define STF (OPC1(0x3) | OPC3(0x24))
179 #define STW (OPC1(0x3) | OPC3(0x04))
180 #define SUB (OPC1(0x2) | OPC3(0x04))
181 #define SUBC (OPC1(0x2) | OPC3(0x0c))
182 #define TA (OPC1(0x2) | OPC3(0x3a) | (8 << 25))
183 #define WRY (OPC1(0x2) | OPC3(0x30) | DA(0))
184 #define XOR (OPC1(0x2) | OPC3(0x03))
185 #define XNOR (OPC1(0x2) | OPC3(0x07))
187 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
188 #define MAX_DISP (0x1fffff)
189 #define MIN_DISP (-0x200000)
190 #define DISP_MASK ((sljit_ins)0x3fffff)
192 #define BICC (OPC1(0x0) | OPC2(0x2))
193 #define FBFCC (OPC1(0x0) | OPC2(0x6))
195 #define SDIV (OPC1(0x2) | OPC3(0x0f))
196 #define SMUL (OPC1(0x2) | OPC3(0x0b))
197 #define UDIV (OPC1(0x2) | OPC3(0x0e))
198 #define UMUL (OPC1(0x2) | OPC3(0x0a))
203 #define SIMM_MAX (0x0fff)
204 #define SIMM_MIN (-0x1000)
206 /* dest_reg is the absolute name of the register
207 Useful for reordering instructions in the delay slot. */
208 static sljit_s32
push_inst(struct sljit_compiler
*compiler
, sljit_ins ins
, sljit_s32 delay_slot
)
211 SLJIT_ASSERT((delay_slot
& DST_INS_MASK
) == UNMOVABLE_INS
212 || (delay_slot
& DST_INS_MASK
) == MOVABLE_INS
213 || (delay_slot
& DST_INS_MASK
) == ((ins
>> 25) & 0x1f));
214 ptr
= (sljit_ins
*)ensure_buf(compiler
, sizeof(sljit_ins
));
218 compiler
->delay_slot
= delay_slot
;
219 return SLJIT_SUCCESS
;
222 static SLJIT_INLINE sljit_ins
* detect_jump_type(struct sljit_jump
*jump
, sljit_ins
*code_ptr
, sljit_ins
*code
, sljit_sw executable_offset
)
225 sljit_uw target_addr
;
227 sljit_ins saved_inst
;
229 if (jump
->flags
& SLJIT_REWRITABLE_JUMP
)
232 if (jump
->flags
& JUMP_ADDR
)
233 target_addr
= jump
->u
.target
;
235 SLJIT_ASSERT(jump
->flags
& JUMP_LABEL
);
236 target_addr
= (sljit_uw
)(code
+ jump
->u
.label
->size
) + (sljit_uw
)executable_offset
;
238 inst
= (sljit_ins
*)jump
->addr
;
240 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
241 if (jump
->flags
& IS_CALL
) {
242 /* Call is always patchable on sparc 32. */
243 jump
->flags
|= PATCH_CALL
;
244 if (jump
->flags
& IS_MOVABLE
) {
247 jump
->addr
-= sizeof(sljit_ins
);
255 /* Both calls and BPr instructions shall not pass this point. */
256 #error "Implementation required"
259 if (jump
->flags
& IS_COND
)
262 diff
= ((sljit_sw
)target_addr
- (sljit_sw
)(inst
- 1) - executable_offset
) >> 2;
264 if (jump
->flags
& IS_MOVABLE
) {
265 if (diff
<= MAX_DISP
&& diff
>= MIN_DISP
) {
266 jump
->flags
|= PATCH_B
;
268 if (jump
->flags
& IS_COND
) {
269 saved_inst
= inst
[0];
270 inst
[0] = inst
[1] ^ (1 << 28);
271 inst
[1] = saved_inst
;
274 inst
[0] = BICC
| DA(0x8);
276 jump
->addr
= (sljit_uw
)inst
;
281 diff
+= SSIZE_OF(ins
);
283 if (diff
<= MAX_DISP
&& diff
>= MIN_DISP
) {
284 jump
->flags
|= PATCH_B
;
285 if (jump
->flags
& IS_COND
)
286 inst
[0] ^= (1 << 28);
288 inst
[0] = BICC
| DA(0x8);
290 jump
->addr
= (sljit_uw
)inst
;
297 SLJIT_API_FUNC_ATTRIBUTE
void* sljit_generate_code(struct sljit_compiler
*compiler
)
299 struct sljit_memory_fragment
*buf
;
306 sljit_sw executable_offset
;
309 struct sljit_label
*label
;
310 struct sljit_jump
*jump
;
311 struct sljit_const
*const_
;
312 struct sljit_put_label
*put_label
;
315 CHECK_PTR(check_sljit_generate_code(compiler
));
316 reverse_buf(compiler
);
318 code
= (sljit_ins
*)SLJIT_MALLOC_EXEC(compiler
->size
* sizeof(sljit_ins
), compiler
->exec_allocator_data
);
319 PTR_FAIL_WITH_EXEC_IF(code
);
325 executable_offset
= SLJIT_EXEC_OFFSET(code
);
327 label
= compiler
->labels
;
328 jump
= compiler
->jumps
;
329 const_
= compiler
->consts
;
330 put_label
= compiler
->put_labels
;
333 buf_ptr
= (sljit_ins
*)buf
->memory
;
334 buf_end
= buf_ptr
+ (buf
->used_size
>> 2);
336 *code_ptr
= *buf_ptr
++;
337 if (next_addr
== word_count
) {
338 SLJIT_ASSERT(!label
|| label
->size
>= word_count
);
339 SLJIT_ASSERT(!jump
|| jump
->addr
>= word_count
);
340 SLJIT_ASSERT(!const_
|| const_
->addr
>= word_count
);
341 SLJIT_ASSERT(!put_label
|| put_label
->addr
>= word_count
);
343 /* These structures are ordered by their address. */
344 if (label
&& label
->size
== word_count
) {
345 /* Just recording the address. */
346 label
->addr
= (sljit_uw
)SLJIT_ADD_EXEC_OFFSET(code_ptr
, executable_offset
);
347 label
->size
= (sljit_uw
)(code_ptr
- code
);
350 if (jump
&& jump
->addr
== word_count
) {
351 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
352 jump
->addr
= (sljit_uw
)(code_ptr
- 3);
354 jump
->addr
= (sljit_uw
)(code_ptr
- 6);
356 code_ptr
= detect_jump_type(jump
, code_ptr
, code
, executable_offset
);
359 if (const_
&& const_
->addr
== word_count
) {
360 /* Just recording the address. */
361 const_
->addr
= (sljit_uw
)code_ptr
;
362 const_
= const_
->next
;
364 if (put_label
&& put_label
->addr
== word_count
) {
365 SLJIT_ASSERT(put_label
->label
);
366 put_label
->addr
= (sljit_uw
)code_ptr
;
367 put_label
= put_label
->next
;
369 next_addr
= compute_next_addr(label
, jump
, const_
, put_label
);
373 } while (buf_ptr
< buf_end
);
378 if (label
&& label
->size
== word_count
) {
379 label
->addr
= (sljit_uw
)SLJIT_ADD_EXEC_OFFSET(code_ptr
, executable_offset
);
380 label
->size
= (sljit_uw
)(code_ptr
- code
);
384 SLJIT_ASSERT(!label
);
386 SLJIT_ASSERT(!const_
);
387 SLJIT_ASSERT(!put_label
);
388 SLJIT_ASSERT(code_ptr
- code
<= (sljit_s32
)compiler
->size
);
390 jump
= compiler
->jumps
;
393 addr
= (sljit_sw
)((jump
->flags
& JUMP_LABEL
) ? jump
->u
.label
->addr
: jump
->u
.target
);
394 buf_ptr
= (sljit_ins
*)jump
->addr
;
396 if (jump
->flags
& PATCH_CALL
) {
397 addr
= (addr
- (sljit_sw
)SLJIT_ADD_EXEC_OFFSET(buf_ptr
, executable_offset
)) >> 2;
398 SLJIT_ASSERT(addr
<= 0x1fffffff && addr
>= -0x20000000);
399 buf_ptr
[0] = CALL
| ((sljit_ins
)addr
& 0x3fffffff);
402 if (jump
->flags
& PATCH_B
) {
403 addr
= (addr
- (sljit_sw
)SLJIT_ADD_EXEC_OFFSET(buf_ptr
, executable_offset
)) >> 2;
404 SLJIT_ASSERT(addr
<= MAX_DISP
&& addr
>= MIN_DISP
);
405 buf_ptr
[0] = (buf_ptr
[0] & ~DISP_MASK
) | ((sljit_ins
)addr
& DISP_MASK
);
409 /* Set the fields of immediate loads. */
410 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
411 SLJIT_ASSERT(((buf_ptr
[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr
[1] & 0xc1f83fff) == 0x80102000));
412 buf_ptr
[0] |= (sljit_ins
)(addr
>> 10) & 0x3fffff;
413 buf_ptr
[1] |= (sljit_ins
)addr
& 0x3ff;
415 #error "Implementation required"
421 put_label
= compiler
->put_labels
;
423 addr
= (sljit_sw
)put_label
->label
->addr
;
424 buf_ptr
= (sljit_ins
*)put_label
->addr
;
426 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
427 SLJIT_ASSERT(((buf_ptr
[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr
[1] & 0xc1f83fff) == 0x80102000));
428 buf_ptr
[0] |= (addr
>> 10) & 0x3fffff;
429 buf_ptr
[1] |= addr
& 0x3ff;
431 #error "Implementation required"
433 put_label
= put_label
->next
;
436 compiler
->error
= SLJIT_ERR_COMPILED
;
437 compiler
->executable_offset
= executable_offset
;
438 compiler
->executable_size
= (sljit_uw
)(code_ptr
- code
) * sizeof(sljit_ins
);
440 code
= (sljit_ins
*)SLJIT_ADD_EXEC_OFFSET(code
, executable_offset
);
441 code_ptr
= (sljit_ins
*)SLJIT_ADD_EXEC_OFFSET(code_ptr
, executable_offset
);
443 SLJIT_CACHE_FLUSH(code
, code_ptr
);
444 SLJIT_UPDATE_WX_FLAGS(code
, code_ptr
, 1);
448 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_has_cpu_feature(sljit_s32 feature_type
)
450 switch (feature_type
) {
452 #ifdef SLJIT_IS_FPU_AVAILABLE
453 return SLJIT_IS_FPU_AVAILABLE
;
455 /* Available by default. */
459 case SLJIT_HAS_ZERO_REGISTER
:
462 #if (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64)
472 /* --------------------------------------------------------------------- */
474 /* --------------------------------------------------------------------- */
476 /* Creates an index in data_transfer_insts array. */
477 #define LOAD_DATA 0x01
478 #define WORD_DATA 0x00
479 #define BYTE_DATA 0x02
480 #define HALF_DATA 0x04
481 #define INT_DATA 0x06
482 #define SIGNED_DATA 0x08
483 /* Separates integer and floating point registers */
485 #define DOUBLE_DATA 0x10
486 #define SINGLE_DATA 0x12
488 #define MEM_MASK 0x1f
490 #define ARG_TEST 0x00020
491 #define ALT_KEEP_CACHE 0x00040
492 #define CUMULATIVE_OP 0x00080
493 #define IMM_OP 0x00100
494 #define MOVE_OP 0x00200
495 #define SRC2_IMM 0x00400
497 #define REG_DEST 0x00800
498 #define REG2_SOURCE 0x01000
499 #define SLOW_SRC1 0x02000
500 #define SLOW_SRC2 0x04000
501 #define SLOW_DEST 0x08000
503 /* SET_FLAGS (0x10 << 19) also belong here! */
505 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
506 #include "sljitNativeSPARC_32.c"
508 #include "sljitNativeSPARC_64.c"
511 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_enter(struct sljit_compiler
*compiler
,
512 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
513 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
515 sljit_s32 reg_index
, types
, tmp
;
516 sljit_u32 float_offset
, args_offset
;
517 sljit_s32 saved_arg_index
, scratch_arg_index
, float_arg_index
;
520 CHECK(check_sljit_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
521 set_emit_enter(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
523 local_size
= (local_size
+ SLJIT_LOCALS_OFFSET
+ 7) & ~0x7;
524 compiler
->local_size
= local_size
;
526 if (local_size
<= -SIMM_MIN
) {
527 FAIL_IF(push_inst(compiler
, SAVE
| D(SLJIT_SP
) | S1(SLJIT_SP
) | IMM(-local_size
), UNMOVABLE_INS
));
530 FAIL_IF(load_immediate(compiler
, TMP_REG1
, -local_size
));
531 FAIL_IF(push_inst(compiler
, SAVE
| D(SLJIT_SP
) | S1(SLJIT_SP
) | S2(TMP_REG1
), UNMOVABLE_INS
));
534 arg_types
>>= SLJIT_ARG_SHIFT
;
537 float_offset
= 16 * sizeof(sljit_sw
);
540 while (types
&& reg_index
< 24 + 6) {
541 switch (types
& SLJIT_ARG_MASK
) {
542 case SLJIT_ARG_TYPE_F64
:
543 if (reg_index
& 0x1) {
544 FAIL_IF(push_inst(compiler
, STW
| DA(reg_index
) | S1(SLJIT_SP
) | IMM(float_offset
), MOVABLE_INS
));
545 if (reg_index
>= 24 + 6 - 1)
547 FAIL_IF(push_inst(compiler
, STW
| DA(reg_index
+ 1) | S1(SLJIT_SP
) | IMM(float_offset
+ sizeof(sljit_sw
)), MOVABLE_INS
));
549 FAIL_IF(push_inst(compiler
, STD
| DA(reg_index
) | S1(SLJIT_SP
) | IMM(float_offset
), MOVABLE_INS
));
551 float_offset
+= sizeof(sljit_f64
);
554 case SLJIT_ARG_TYPE_F32
:
555 FAIL_IF(push_inst(compiler
, STW
| DA(reg_index
) | S1(SLJIT_SP
) | IMM(float_offset
), MOVABLE_INS
));
556 float_offset
+= sizeof(sljit_f64
);
561 types
>>= SLJIT_ARG_SHIFT
;
564 args_offset
= (16 + 1 + 6) * sizeof(sljit_sw
);
565 float_offset
= 16 * sizeof(sljit_sw
);
567 saved_arg_index
= 24;
568 scratch_arg_index
= 8 - 1;
572 switch (arg_types
& SLJIT_ARG_MASK
) {
573 case SLJIT_ARG_TYPE_F64
:
574 if (reg_index
< 24 + 6 - 1) {
575 FAIL_IF(push_inst(compiler
, LDDF
| FD(float_arg_index
) | S1(SLJIT_SP
) | IMM(float_offset
), MOVABLE_INS
));
576 } else if (reg_index
< 24 + 6) {
577 FAIL_IF(push_inst(compiler
, LDF
| FD(float_arg_index
) | S1(SLJIT_SP
) | IMM(float_offset
), MOVABLE_INS
));
578 FAIL_IF(push_inst(compiler
, LDF
| FD(float_arg_index
) | (1 << 25) | S1A(30) | IMM(args_offset
), MOVABLE_INS
));
580 FAIL_IF(push_inst(compiler
, LDF
| FD(float_arg_index
) | S1A(30) | IMM(args_offset
), MOVABLE_INS
));
581 FAIL_IF(push_inst(compiler
, LDF
| FD(float_arg_index
) | (1 << 25) | S1A(30) | IMM(args_offset
+ sizeof(sljit_sw
)), MOVABLE_INS
));
585 float_offset
+= sizeof(sljit_f64
);
588 case SLJIT_ARG_TYPE_F32
:
589 if (reg_index
< 24 + 6)
590 FAIL_IF(push_inst(compiler
, LDF
| FD(float_arg_index
) | S1(SLJIT_SP
) | IMM(float_offset
), MOVABLE_INS
));
592 FAIL_IF(push_inst(compiler
, LDF
| FD(float_arg_index
) | S1A(30) | IMM(args_offset
), MOVABLE_INS
));
594 float_offset
+= sizeof(sljit_f64
);
599 if (!(arg_types
& SLJIT_ARG_TYPE_SCRATCH_REG
)) {
600 tmp
= saved_arg_index
++;
601 if (tmp
== reg_index
)
604 tmp
= scratch_arg_index
;
606 if (reg_index
< 24 + 6)
607 FAIL_IF(push_inst(compiler
, OR
| DA(tmp
) | S1(0) | S2A(reg_index
), tmp
));
609 FAIL_IF(push_inst(compiler
, LDUW
| DA(tmp
) | S1A(30) | IMM(args_offset
), tmp
));
614 arg_types
>>= SLJIT_ARG_SHIFT
;
617 return SLJIT_SUCCESS
;
620 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_set_context(struct sljit_compiler
*compiler
,
621 sljit_s32 options
, sljit_s32 arg_types
, sljit_s32 scratches
, sljit_s32 saveds
,
622 sljit_s32 fscratches
, sljit_s32 fsaveds
, sljit_s32 local_size
)
625 CHECK(check_sljit_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
));
626 set_set_context(compiler
, options
, arg_types
, scratches
, saveds
, fscratches
, fsaveds
, local_size
);
628 compiler
->local_size
= (local_size
+ SLJIT_LOCALS_OFFSET
+ 7) & ~0x7;
629 return SLJIT_SUCCESS
;
632 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_return_void(struct sljit_compiler
*compiler
)
635 CHECK(check_sljit_emit_return_void(compiler
));
637 FAIL_IF(push_inst(compiler
, JMPL
| D(0) | S1A(31) | IMM(8), UNMOVABLE_INS
));
638 return push_inst(compiler
, RESTORE
| D(SLJIT_R0
) | S1(SLJIT_R0
) | S2(0), UNMOVABLE_INS
);
641 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_return(struct sljit_compiler
*compiler
, sljit_s32 op
, sljit_s32 src
, sljit_sw srcw
)
644 CHECK(check_sljit_emit_return(compiler
, op
, src
, srcw
));
646 if (TYPE_CAST_NEEDED(op
) || !FAST_IS_REG(src
)) {
647 FAIL_IF(emit_mov_before_return(compiler
, op
, src
, srcw
));
651 FAIL_IF(push_inst(compiler
, JMPL
| D(0) | S1A(31) | IMM(8), UNMOVABLE_INS
));
652 return push_inst(compiler
, RESTORE
| D(SLJIT_R0
) | S1(src
) | S2(0), UNMOVABLE_INS
);
655 /* --------------------------------------------------------------------- */
657 /* --------------------------------------------------------------------- */
659 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
660 #define ARCH_32_64(a, b) a
662 #define ARCH_32_64(a, b) b
665 static const sljit_ins data_transfer_insts
[16 + 4] = {
666 /* u w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */),
667 /* u w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */),
668 /* u b s */ OPC1(3) | OPC3(0x05) /* stb */,
669 /* u b l */ OPC1(3) | OPC3(0x01) /* ldub */,
670 /* u h s */ OPC1(3) | OPC3(0x06) /* sth */,
671 /* u h l */ OPC1(3) | OPC3(0x02) /* lduh */,
672 /* u i s */ OPC1(3) | OPC3(0x04) /* stw */,
673 /* u i l */ OPC1(3) | OPC3(0x00) /* lduw */,
675 /* s w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */),
676 /* s w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */),
677 /* s b s */ OPC1(3) | OPC3(0x05) /* stb */,
678 /* s b l */ OPC1(3) | OPC3(0x09) /* ldsb */,
679 /* s h s */ OPC1(3) | OPC3(0x06) /* sth */,
680 /* s h l */ OPC1(3) | OPC3(0x0a) /* ldsh */,
681 /* s i s */ OPC1(3) | OPC3(0x04) /* stw */,
682 /* s i l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x08) /* ldsw */),
684 /* d s */ OPC1(3) | OPC3(0x27),
685 /* d l */ OPC1(3) | OPC3(0x23),
686 /* s s */ OPC1(3) | OPC3(0x24),
687 /* s l */ OPC1(3) | OPC3(0x20),
692 /* Can perform an operation using at most 1 instruction. */
693 static sljit_s32
getput_arg_fast(struct sljit_compiler
*compiler
, sljit_u32 flags
, sljit_s32 reg
, sljit_s32 arg
, sljit_sw argw
)
695 SLJIT_ASSERT(arg
& SLJIT_MEM
);
697 if ((!(arg
& OFFS_REG_MASK
) && argw
<= SIMM_MAX
&& argw
>= SIMM_MIN
)
698 || ((arg
& OFFS_REG_MASK
) && (argw
& 0x3) == 0)) {
699 /* Works for both absoulte and relative addresses (immediate case). */
700 if (SLJIT_UNLIKELY(flags
& ARG_TEST
))
702 FAIL_IF(push_inst(compiler
, data_transfer_insts
[flags
& MEM_MASK
]
703 | ((flags
& MEM_MASK
) <= GPR_REG
? D(reg
) : FD(reg
))
704 | S1(arg
& REG_MASK
) | ((arg
& OFFS_REG_MASK
) ? S2(OFFS_REG(arg
)) : IMM(argw
)),
705 ((flags
& MEM_MASK
) <= GPR_REG
&& (flags
& LOAD_DATA
)) ? DR(reg
) : MOVABLE_INS
));
711 /* See getput_arg below.
712 Note: can_cache is called only for binary operators. Those
713 operators always uses word arguments without write back. */
714 static sljit_s32
can_cache(sljit_s32 arg
, sljit_sw argw
, sljit_s32 next_arg
, sljit_sw next_argw
)
716 SLJIT_ASSERT((arg
& SLJIT_MEM
) && (next_arg
& SLJIT_MEM
));
718 /* Simple operation except for updates. */
719 if (arg
& OFFS_REG_MASK
) {
723 if ((arg
& OFFS_REG_MASK
) == (next_arg
& OFFS_REG_MASK
) && argw
== next_argw
)
728 if (((next_argw
- argw
) <= SIMM_MAX
&& (next_argw
- argw
) >= SIMM_MIN
))
733 /* Emit the necessary instructions. See can_cache above. */
734 static sljit_s32
getput_arg(struct sljit_compiler
*compiler
, sljit_u32 flags
, sljit_s32 reg
, sljit_s32 arg
, sljit_sw argw
, sljit_s32 next_arg
, sljit_sw next_argw
)
736 sljit_s32 base
, arg2
, delay_slot
;
739 SLJIT_ASSERT(arg
& SLJIT_MEM
);
740 if (!(next_arg
& SLJIT_MEM
)) {
745 base
= arg
& REG_MASK
;
746 if (SLJIT_UNLIKELY(arg
& OFFS_REG_MASK
)) {
749 /* Using the cache. */
750 if (((SLJIT_MEM
| (arg
& OFFS_REG_MASK
)) == compiler
->cache_arg
) && (argw
== compiler
->cache_argw
))
753 if ((arg
& OFFS_REG_MASK
) == (next_arg
& OFFS_REG_MASK
) && argw
== (next_argw
& 0x3)) {
754 compiler
->cache_arg
= SLJIT_MEM
| (arg
& OFFS_REG_MASK
);
755 compiler
->cache_argw
= argw
;
758 else if ((flags
& LOAD_DATA
) && ((flags
& MEM_MASK
) <= GPR_REG
) && reg
!= base
&& reg
!= OFFS_REG(arg
))
760 else /* It must be a mov operation, so tmp1 must be free to use. */
762 FAIL_IF(push_inst(compiler
, SLL_W
| D(arg2
) | S1(OFFS_REG(arg
)) | IMM_ARG
| (sljit_ins
)argw
, DR(arg2
)));
766 /* Using the cache. */
767 if ((compiler
->cache_arg
== SLJIT_MEM
) && (argw
- compiler
->cache_argw
) <= SIMM_MAX
&& (argw
- compiler
->cache_argw
) >= SIMM_MIN
) {
768 if (argw
!= compiler
->cache_argw
) {
769 FAIL_IF(push_inst(compiler
, ADD
| D(TMP_REG3
) | S1(TMP_REG3
) | IMM(argw
- compiler
->cache_argw
), DR(TMP_REG3
)));
770 compiler
->cache_argw
= argw
;
774 if ((next_argw
- argw
) <= SIMM_MAX
&& (next_argw
- argw
) >= SIMM_MIN
) {
775 compiler
->cache_arg
= SLJIT_MEM
;
776 compiler
->cache_argw
= argw
;
779 else if ((flags
& LOAD_DATA
) && ((flags
& MEM_MASK
) <= GPR_REG
) && reg
!= base
)
781 else /* It must be a mov operation, so tmp1 must be free to use. */
783 FAIL_IF(load_immediate(compiler
, arg2
, argw
));
787 dest
= ((flags
& MEM_MASK
) <= GPR_REG
? D(reg
) : FD(reg
));
788 delay_slot
= ((flags
& MEM_MASK
) <= GPR_REG
&& (flags
& LOAD_DATA
)) ? DR(reg
) : MOVABLE_INS
;
790 return push_inst(compiler
, data_transfer_insts
[flags
& MEM_MASK
] | dest
| S1(arg2
) | IMM(0), delay_slot
);
791 return push_inst(compiler
, data_transfer_insts
[flags
& MEM_MASK
] | dest
| S1(base
) | S2(arg2
), delay_slot
);
794 static SLJIT_INLINE sljit_s32
emit_op_mem(struct sljit_compiler
*compiler
, sljit_u32 flags
, sljit_s32 reg
, sljit_s32 arg
, sljit_sw argw
)
796 if (getput_arg_fast(compiler
, flags
, reg
, arg
, argw
))
797 return compiler
->error
;
798 compiler
->cache_arg
= 0;
799 compiler
->cache_argw
= 0;
800 return getput_arg(compiler
, flags
, reg
, arg
, argw
, 0, 0);
803 static SLJIT_INLINE sljit_s32
emit_op_mem2(struct sljit_compiler
*compiler
, sljit_u32 flags
, sljit_s32 reg
, sljit_s32 arg1
, sljit_sw arg1w
, sljit_s32 arg2
, sljit_sw arg2w
)
805 if (getput_arg_fast(compiler
, flags
, reg
, arg1
, arg1w
))
806 return compiler
->error
;
807 return getput_arg(compiler
, flags
, reg
, arg1
, arg1w
, arg2
, arg2w
);
810 static sljit_s32
emit_op(struct sljit_compiler
*compiler
, sljit_s32 op
, sljit_u32 flags
,
811 sljit_s32 dst
, sljit_sw dstw
,
812 sljit_s32 src1
, sljit_sw src1w
,
813 sljit_s32 src2
, sljit_sw src2w
)
815 /* arg1 goes to TMP_REG1 or src reg
816 arg2 goes to TMP_REG2, imm or src reg
817 TMP_REG3 can be used for caching
818 result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */
819 sljit_s32 dst_r
= TMP_REG2
;
822 sljit_s32 sugg_src2_r
= TMP_REG2
;
824 if (!(flags
& ALT_KEEP_CACHE
)) {
825 compiler
->cache_arg
= 0;
826 compiler
->cache_argw
= 0;
829 if (dst
!= TMP_REG2
) {
830 if (FAST_IS_REG(dst
)) {
836 else if ((dst
& SLJIT_MEM
) && !getput_arg_fast(compiler
, flags
| ARG_TEST
, TMP_REG1
, dst
, dstw
))
840 if (flags
& IMM_OP
) {
841 if ((src2
& SLJIT_IMM
) && src2w
) {
842 if (src2w
<= SIMM_MAX
&& src2w
>= SIMM_MIN
) {
847 if (!(flags
& SRC2_IMM
) && (flags
& CUMULATIVE_OP
) && (src1
& SLJIT_IMM
) && src1w
) {
848 if (src1w
<= SIMM_MAX
&& src1w
>= SIMM_MIN
) {
852 /* And swap arguments. */
856 /* src2w = src2_r unneeded. */
862 if (FAST_IS_REG(src1
))
864 else if (src1
& SLJIT_IMM
) {
866 FAIL_IF(load_immediate(compiler
, TMP_REG1
, src1w
));
873 if (getput_arg_fast(compiler
, flags
| LOAD_DATA
, TMP_REG1
, src1
, src1w
))
874 FAIL_IF(compiler
->error
);
881 if (FAST_IS_REG(src2
)) {
883 flags
|= REG2_SOURCE
;
884 if ((flags
& (REG_DEST
| MOVE_OP
)) == MOVE_OP
)
887 else if (src2
& SLJIT_IMM
) {
888 if (!(flags
& SRC2_IMM
)) {
890 FAIL_IF(load_immediate(compiler
, sugg_src2_r
, src2w
));
891 src2_r
= sugg_src2_r
;
895 if (flags
& MOVE_OP
) {
905 if (getput_arg_fast(compiler
, flags
| LOAD_DATA
, sugg_src2_r
, src2
, src2w
))
906 FAIL_IF(compiler
->error
);
909 src2_r
= sugg_src2_r
;
912 if ((flags
& (SLOW_SRC1
| SLOW_SRC2
)) == (SLOW_SRC1
| SLOW_SRC2
)) {
913 SLJIT_ASSERT(src2_r
== TMP_REG2
);
914 if (!can_cache(src1
, src1w
, src2
, src2w
) && can_cache(src1
, src1w
, dst
, dstw
)) {
915 FAIL_IF(getput_arg(compiler
, flags
| LOAD_DATA
, TMP_REG2
, src2
, src2w
, src1
, src1w
));
916 FAIL_IF(getput_arg(compiler
, flags
| LOAD_DATA
, TMP_REG1
, src1
, src1w
, dst
, dstw
));
919 FAIL_IF(getput_arg(compiler
, flags
| LOAD_DATA
, TMP_REG1
, src1
, src1w
, src2
, src2w
));
920 FAIL_IF(getput_arg(compiler
, flags
| LOAD_DATA
, TMP_REG2
, src2
, src2w
, dst
, dstw
));
923 else if (flags
& SLOW_SRC1
)
924 FAIL_IF(getput_arg(compiler
, flags
| LOAD_DATA
, TMP_REG1
, src1
, src1w
, dst
, dstw
));
925 else if (flags
& SLOW_SRC2
)
926 FAIL_IF(getput_arg(compiler
, flags
| LOAD_DATA
, sugg_src2_r
, src2
, src2w
, dst
, dstw
));
928 FAIL_IF(emit_single_op(compiler
, op
, flags
, dst_r
, src1_r
, src2_r
));
930 if (dst
& SLJIT_MEM
) {
931 if (!(flags
& SLOW_DEST
)) {
932 getput_arg_fast(compiler
, flags
, dst_r
, dst
, dstw
);
933 return compiler
->error
;
935 return getput_arg(compiler
, flags
, dst_r
, dst
, dstw
, 0, 0);
938 return SLJIT_SUCCESS
;
941 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op0(struct sljit_compiler
*compiler
, sljit_s32 op
)
944 CHECK(check_sljit_emit_op0(compiler
, op
));
948 case SLJIT_BREAKPOINT
:
949 return push_inst(compiler
, TA
, UNMOVABLE_INS
);
951 return push_inst(compiler
, NOP
, UNMOVABLE_INS
);
954 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
955 FAIL_IF(push_inst(compiler
, (op
== SLJIT_LMUL_UW
? UMUL
: SMUL
) | D(SLJIT_R0
) | S1(SLJIT_R0
) | S2(SLJIT_R1
), DR(SLJIT_R0
)));
956 return push_inst(compiler
, RDY
| D(SLJIT_R1
), DR(SLJIT_R1
));
958 #error "Implementation required"
960 case SLJIT_DIVMOD_UW
:
961 case SLJIT_DIVMOD_SW
:
964 SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW
& 0x2) == 0 && SLJIT_DIV_UW
- 0x2 == SLJIT_DIVMOD_UW
, bad_div_opcode_assignments
);
965 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
966 if ((op
| 0x2) == SLJIT_DIV_UW
)
967 FAIL_IF(push_inst(compiler
, WRY
| S1(0), MOVABLE_INS
));
969 FAIL_IF(push_inst(compiler
, SRA
| D(TMP_REG1
) | S1(SLJIT_R0
) | IMM(31), DR(TMP_REG1
)));
970 FAIL_IF(push_inst(compiler
, WRY
| S1(TMP_REG1
), MOVABLE_INS
));
972 if (op
<= SLJIT_DIVMOD_SW
)
973 FAIL_IF(push_inst(compiler
, OR
| D(TMP_REG2
) | S1(0) | S2(SLJIT_R0
), DR(TMP_REG2
)));
974 FAIL_IF(push_inst(compiler
, ((op
| 0x2) == SLJIT_DIV_UW
? UDIV
: SDIV
) | D(SLJIT_R0
) | S1(SLJIT_R0
) | S2(SLJIT_R1
), DR(SLJIT_R0
)));
975 if (op
>= SLJIT_DIV_UW
)
976 return SLJIT_SUCCESS
;
977 FAIL_IF(push_inst(compiler
, SMUL
| D(SLJIT_R1
) | S1(SLJIT_R0
) | S2(SLJIT_R1
), DR(SLJIT_R1
)));
978 return push_inst(compiler
, SUB
| D(SLJIT_R1
) | S1(TMP_REG2
) | S2(SLJIT_R1
), DR(SLJIT_R1
));
980 #error "Implementation required"
983 case SLJIT_SKIP_FRAMES_BEFORE_RETURN
:
984 return SLJIT_SUCCESS
;
987 return SLJIT_SUCCESS
;
990 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op1(struct sljit_compiler
*compiler
, sljit_s32 op
,
991 sljit_s32 dst
, sljit_sw dstw
,
992 sljit_s32 src
, sljit_sw srcw
)
994 sljit_u32 flags
= HAS_FLAGS(op
) ? SET_FLAGS
: 0;
997 CHECK(check_sljit_emit_op1(compiler
, op
, dst
, dstw
, src
, srcw
));
998 ADJUST_LOCAL_OFFSET(dst
, dstw
);
999 ADJUST_LOCAL_OFFSET(src
, srcw
);
1001 op
= GET_OPCODE(op
);
1004 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1010 return emit_op(compiler
, SLJIT_MOV
, flags
| WORD_DATA
| MOVE_OP
, dst
, dstw
, TMP_REG1
, 0, src
, srcw
);
1013 return emit_op(compiler
, SLJIT_MOV_U8
, flags
| BYTE_DATA
| MOVE_OP
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_u8
)srcw
: srcw
);
1016 return emit_op(compiler
, SLJIT_MOV_S8
, flags
| BYTE_DATA
| SIGNED_DATA
| MOVE_OP
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_s8
)srcw
: srcw
);
1019 return emit_op(compiler
, SLJIT_MOV_U16
, flags
| HALF_DATA
| MOVE_OP
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_u16
)srcw
: srcw
);
1022 return emit_op(compiler
, SLJIT_MOV_S16
, flags
| HALF_DATA
| SIGNED_DATA
| MOVE_OP
, dst
, dstw
, TMP_REG1
, 0, src
, (src
& SLJIT_IMM
) ? (sljit_s16
)srcw
: srcw
);
1026 return emit_op(compiler
, op
, flags
, dst
, dstw
, TMP_REG1
, 0, src
, srcw
);
1029 return emit_op(compiler
, SLJIT_SUB
, flags
| IMM_OP
, dst
, dstw
, SLJIT_IMM
, 0, src
, srcw
);
1032 return SLJIT_SUCCESS
;
1035 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op2(struct sljit_compiler
*compiler
, sljit_s32 op
,
1036 sljit_s32 dst
, sljit_sw dstw
,
1037 sljit_s32 src1
, sljit_sw src1w
,
1038 sljit_s32 src2
, sljit_sw src2w
)
1040 sljit_u32 flags
= HAS_FLAGS(op
) ? SET_FLAGS
: 0;
1043 CHECK(check_sljit_emit_op2(compiler
, op
, 0, dst
, dstw
, src1
, src1w
, src2
, src2w
));
1044 ADJUST_LOCAL_OFFSET(dst
, dstw
);
1045 ADJUST_LOCAL_OFFSET(src1
, src1w
);
1046 ADJUST_LOCAL_OFFSET(src2
, src2w
);
1048 op
= GET_OPCODE(op
);
1056 return emit_op(compiler
, op
, flags
| CUMULATIVE_OP
| IMM_OP
, dst
, dstw
, src1
, src1w
, src2
, src2w
);
1060 return emit_op(compiler
, op
, flags
| IMM_OP
, dst
, dstw
, src1
, src1w
, src2
, src2w
);
1065 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1066 if (src2
& SLJIT_IMM
)
1069 SLJIT_UNREACHABLE();
1071 return emit_op(compiler
, op
, flags
| IMM_OP
, dst
, dstw
, src1
, src1w
, src2
, src2w
);
1074 return SLJIT_SUCCESS
;
1077 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op2u(struct sljit_compiler
*compiler
, sljit_s32 op
,
1078 sljit_s32 src1
, sljit_sw src1w
,
1079 sljit_s32 src2
, sljit_sw src2w
)
1082 CHECK(check_sljit_emit_op2(compiler
, op
, 1, 0, 0, src1
, src1w
, src2
, src2w
));
1084 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
1085 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
1086 compiler
->skip_checks
= 1;
1088 return sljit_emit_op2(compiler
, op
, TMP_REG2
, 0, src1
, src1w
, src2
, src2w
);
1091 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op_src(struct sljit_compiler
*compiler
, sljit_s32 op
,
1092 sljit_s32 src
, sljit_sw srcw
)
1095 CHECK(check_sljit_emit_op_src(compiler
, op
, src
, srcw
));
1096 ADJUST_LOCAL_OFFSET(src
, srcw
);
1099 case SLJIT_FAST_RETURN
:
1100 if (FAST_IS_REG(src
))
1101 FAIL_IF(push_inst(compiler
, OR
| D(TMP_LINK
) | S1(0) | S2(src
), DR(TMP_LINK
)));
1103 FAIL_IF(emit_op_mem(compiler
, WORD_DATA
| LOAD_DATA
, TMP_LINK
, src
, srcw
));
1105 FAIL_IF(push_inst(compiler
, JMPL
| D(0) | S1(TMP_LINK
) | IMM(8), UNMOVABLE_INS
));
1106 return push_inst(compiler
, NOP
, UNMOVABLE_INS
);
1107 case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN
:
1108 case SLJIT_PREFETCH_L1
:
1109 case SLJIT_PREFETCH_L2
:
1110 case SLJIT_PREFETCH_L3
:
1111 case SLJIT_PREFETCH_ONCE
:
1112 return SLJIT_SUCCESS
;
1115 return SLJIT_SUCCESS
;
1118 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_get_register_index(sljit_s32 reg
)
1120 CHECK_REG_INDEX(check_sljit_get_register_index(reg
));
1121 return reg_map
[reg
];
1124 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_get_float_register_index(sljit_s32 reg
)
1126 CHECK_REG_INDEX(check_sljit_get_float_register_index(reg
));
1127 return freg_map
[reg
];
1130 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op_custom(struct sljit_compiler
*compiler
,
1131 void *instruction
, sljit_u32 size
)
1134 CHECK(check_sljit_emit_op_custom(compiler
, instruction
, size
));
1136 return push_inst(compiler
, *(sljit_ins
*)instruction
, UNMOVABLE_INS
);
1139 /* --------------------------------------------------------------------- */
1140 /* Floating point operators */
1141 /* --------------------------------------------------------------------- */
1143 #define FLOAT_DATA(op) ((sljit_ins)DOUBLE_DATA | (((sljit_ins)(op) & SLJIT_32) >> 7))
1144 #define SELECT_FOP(op, single, double) ((op & SLJIT_32) ? single : double)
1145 #define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw))
1147 static SLJIT_INLINE sljit_s32
sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler
*compiler
, sljit_s32 op
,
1148 sljit_s32 dst
, sljit_sw dstw
,
1149 sljit_s32 src
, sljit_sw srcw
)
1151 if (src
& SLJIT_MEM
) {
1152 FAIL_IF(emit_op_mem2(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG1
, src
, srcw
, dst
, dstw
));
1156 FAIL_IF(push_inst(compiler
, SELECT_FOP(op
, FSTOI
, FDTOI
) | FD(TMP_FREG1
) | FS2(src
), MOVABLE_INS
));
1158 if (FAST_IS_REG(dst
)) {
1159 FAIL_IF(emit_op_mem2(compiler
, SINGLE_DATA
, TMP_FREG1
, SLJIT_MEM1(SLJIT_SP
), FLOAT_TMP_MEM_OFFSET
, SLJIT_MEM1(SLJIT_SP
), FLOAT_TMP_MEM_OFFSET
));
1160 return emit_op_mem2(compiler
, WORD_DATA
| LOAD_DATA
, dst
, SLJIT_MEM1(SLJIT_SP
), FLOAT_TMP_MEM_OFFSET
, SLJIT_MEM1(SLJIT_SP
), FLOAT_TMP_MEM_OFFSET
);
1163 /* Store the integer value from a VFP register. */
1164 return emit_op_mem2(compiler
, SINGLE_DATA
, TMP_FREG1
, dst
, dstw
, 0, 0);
1167 static SLJIT_INLINE sljit_s32
sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler
*compiler
, sljit_s32 op
,
1168 sljit_s32 dst
, sljit_sw dstw
,
1169 sljit_s32 src
, sljit_sw srcw
)
1171 sljit_s32 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_FREG1
;
1173 if (src
& SLJIT_IMM
) {
1174 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
1175 if (GET_OPCODE(op
) == SLJIT_CONV_F64_FROM_S32
)
1176 srcw
= (sljit_s32
)srcw
;
1178 FAIL_IF(load_immediate(compiler
, TMP_REG1
, srcw
));
1183 if (FAST_IS_REG(src
)) {
1184 FAIL_IF(emit_op_mem2(compiler
, WORD_DATA
, src
, SLJIT_MEM1(SLJIT_SP
), FLOAT_TMP_MEM_OFFSET
, SLJIT_MEM1(SLJIT_SP
), FLOAT_TMP_MEM_OFFSET
));
1185 src
= SLJIT_MEM1(SLJIT_SP
);
1186 srcw
= FLOAT_TMP_MEM_OFFSET
;
1189 FAIL_IF(emit_op_mem2(compiler
, SINGLE_DATA
| LOAD_DATA
, TMP_FREG1
, src
, srcw
, dst
, dstw
));
1190 FAIL_IF(push_inst(compiler
, SELECT_FOP(op
, FITOS
, FITOD
) | FD(dst_r
) | FS2(TMP_FREG1
), MOVABLE_INS
));
1192 if (dst
& SLJIT_MEM
)
1193 return emit_op_mem2(compiler
, FLOAT_DATA(op
), TMP_FREG1
, dst
, dstw
, 0, 0);
1194 return SLJIT_SUCCESS
;
1197 static SLJIT_INLINE sljit_s32
sljit_emit_fop1_cmp(struct sljit_compiler
*compiler
, sljit_s32 op
,
1198 sljit_s32 src1
, sljit_sw src1w
,
1199 sljit_s32 src2
, sljit_sw src2w
)
1201 if (src1
& SLJIT_MEM
) {
1202 FAIL_IF(emit_op_mem2(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG1
, src1
, src1w
, src2
, src2w
));
1206 if (src2
& SLJIT_MEM
) {
1207 FAIL_IF(emit_op_mem2(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG2
, src2
, src2w
, 0, 0));
1211 return push_inst(compiler
, SELECT_FOP(op
, FCMPS
, FCMPD
) | FS1(src1
) | FS2(src2
), FCC_IS_SET
| MOVABLE_INS
);
1214 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fop1(struct sljit_compiler
*compiler
, sljit_s32 op
,
1215 sljit_s32 dst
, sljit_sw dstw
,
1216 sljit_s32 src
, sljit_sw srcw
)
1221 compiler
->cache_arg
= 0;
1222 compiler
->cache_argw
= 0;
1224 SLJIT_COMPILE_ASSERT((SLJIT_32
== 0x100) && !(DOUBLE_DATA
& 0x2), float_transfer_bit_error
);
1225 SELECT_FOP1_OPERATION_WITH_CHECKS(compiler
, op
, dst
, dstw
, src
, srcw
);
1227 if (GET_OPCODE(op
) == SLJIT_CONV_F64_FROM_F32
)
1230 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_FREG1
;
1232 if (src
& SLJIT_MEM
) {
1233 FAIL_IF(emit_op_mem2(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, dst_r
, src
, srcw
, dst
, dstw
));
1237 switch (GET_OPCODE(op
)) {
1240 if (dst_r
!= TMP_FREG1
) {
1241 FAIL_IF(push_inst(compiler
, FMOVS
| FD(dst_r
) | FS2(src
), MOVABLE_INS
));
1242 if (!(op
& SLJIT_32
))
1243 FAIL_IF(push_inst(compiler
, FMOVS
| FDN(dst_r
) | FS2N(src
), MOVABLE_INS
));
1250 FAIL_IF(push_inst(compiler
, FNEGS
| FD(dst_r
) | FS2(src
), MOVABLE_INS
));
1251 if (dst_r
!= src
&& !(op
& SLJIT_32
))
1252 FAIL_IF(push_inst(compiler
, FMOVS
| FDN(dst_r
) | FS2N(src
), MOVABLE_INS
));
1255 FAIL_IF(push_inst(compiler
, FABSS
| FD(dst_r
) | FS2(src
), MOVABLE_INS
));
1256 if (dst_r
!= src
&& !(op
& SLJIT_32
))
1257 FAIL_IF(push_inst(compiler
, FMOVS
| FDN(dst_r
) | FS2N(src
), MOVABLE_INS
));
1259 case SLJIT_CONV_F64_FROM_F32
:
1260 FAIL_IF(push_inst(compiler
, SELECT_FOP(op
, FSTOD
, FDTOS
) | FD(dst_r
) | FS2(src
), MOVABLE_INS
));
1265 if (dst
& SLJIT_MEM
)
1266 FAIL_IF(emit_op_mem2(compiler
, FLOAT_DATA(op
), dst_r
, dst
, dstw
, 0, 0));
1267 return SLJIT_SUCCESS
;
1270 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fop2(struct sljit_compiler
*compiler
, sljit_s32 op
,
1271 sljit_s32 dst
, sljit_sw dstw
,
1272 sljit_s32 src1
, sljit_sw src1w
,
1273 sljit_s32 src2
, sljit_sw src2w
)
1275 sljit_s32 dst_r
, flags
= 0;
1278 CHECK(check_sljit_emit_fop2(compiler
, op
, dst
, dstw
, src1
, src1w
, src2
, src2w
));
1279 ADJUST_LOCAL_OFFSET(dst
, dstw
);
1280 ADJUST_LOCAL_OFFSET(src1
, src1w
);
1281 ADJUST_LOCAL_OFFSET(src2
, src2w
);
1283 compiler
->cache_arg
= 0;
1284 compiler
->cache_argw
= 0;
1286 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_FREG2
;
1288 if (src1
& SLJIT_MEM
) {
1289 if (getput_arg_fast(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG1
, src1
, src1w
)) {
1290 FAIL_IF(compiler
->error
);
1296 if (src2
& SLJIT_MEM
) {
1297 if (getput_arg_fast(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG2
, src2
, src2w
)) {
1298 FAIL_IF(compiler
->error
);
1304 if ((flags
& (SLOW_SRC1
| SLOW_SRC2
)) == (SLOW_SRC1
| SLOW_SRC2
)) {
1305 if (!can_cache(src1
, src1w
, src2
, src2w
) && can_cache(src1
, src1w
, dst
, dstw
)) {
1306 FAIL_IF(getput_arg(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG2
, src2
, src2w
, src1
, src1w
));
1307 FAIL_IF(getput_arg(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG1
, src1
, src1w
, dst
, dstw
));
1310 FAIL_IF(getput_arg(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG1
, src1
, src1w
, src2
, src2w
));
1311 FAIL_IF(getput_arg(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG2
, src2
, src2w
, dst
, dstw
));
1314 else if (flags
& SLOW_SRC1
)
1315 FAIL_IF(getput_arg(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG1
, src1
, src1w
, dst
, dstw
));
1316 else if (flags
& SLOW_SRC2
)
1317 FAIL_IF(getput_arg(compiler
, FLOAT_DATA(op
) | LOAD_DATA
, TMP_FREG2
, src2
, src2w
, dst
, dstw
));
1319 if (flags
& SLOW_SRC1
)
1321 if (flags
& SLOW_SRC2
)
1324 switch (GET_OPCODE(op
)) {
1326 FAIL_IF(push_inst(compiler
, SELECT_FOP(op
, FADDS
, FADDD
) | FD(dst_r
) | FS1(src1
) | FS2(src2
), MOVABLE_INS
));
1330 FAIL_IF(push_inst(compiler
, SELECT_FOP(op
, FSUBS
, FSUBD
) | FD(dst_r
) | FS1(src1
) | FS2(src2
), MOVABLE_INS
));
1334 FAIL_IF(push_inst(compiler
, SELECT_FOP(op
, FMULS
, FMULD
) | FD(dst_r
) | FS1(src1
) | FS2(src2
), MOVABLE_INS
));
1338 FAIL_IF(push_inst(compiler
, SELECT_FOP(op
, FDIVS
, FDIVD
) | FD(dst_r
) | FS1(src1
) | FS2(src2
), MOVABLE_INS
));
1342 if (dst_r
== TMP_FREG2
)
1343 FAIL_IF(emit_op_mem2(compiler
, FLOAT_DATA(op
), TMP_FREG2
, dst
, dstw
, 0, 0));
1345 return SLJIT_SUCCESS
;
1351 /* --------------------------------------------------------------------- */
1352 /* Other instructions */
1353 /* --------------------------------------------------------------------- */
1355 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_fast_enter(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
)
1358 CHECK(check_sljit_emit_fast_enter(compiler
, dst
, dstw
));
1359 ADJUST_LOCAL_OFFSET(dst
, dstw
);
1361 if (FAST_IS_REG(dst
))
1362 return push_inst(compiler
, OR
| D(dst
) | S1(0) | S2(TMP_LINK
), UNMOVABLE_INS
);
1365 FAIL_IF(emit_op_mem(compiler
, WORD_DATA
, TMP_LINK
, dst
, dstw
));
1366 compiler
->delay_slot
= UNMOVABLE_INS
;
1367 return SLJIT_SUCCESS
;
1370 /* --------------------------------------------------------------------- */
1371 /* Conditional instructions */
1372 /* --------------------------------------------------------------------- */
1374 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_label
* sljit_emit_label(struct sljit_compiler
*compiler
)
1376 struct sljit_label
*label
;
1379 CHECK_PTR(check_sljit_emit_label(compiler
));
1381 if (compiler
->last_label
&& compiler
->last_label
->size
== compiler
->size
)
1382 return compiler
->last_label
;
1384 label
= (struct sljit_label
*)ensure_abuf(compiler
, sizeof(struct sljit_label
));
1385 PTR_FAIL_IF(!label
);
1386 set_label(label
, compiler
);
1387 compiler
->delay_slot
= UNMOVABLE_INS
;
1391 static sljit_ins
get_cc(struct sljit_compiler
*compiler
, sljit_s32 type
)
1395 case SLJIT_NOT_EQUAL_F64
: /* Unordered. */
1398 case SLJIT_NOT_EQUAL
:
1399 case SLJIT_EQUAL_F64
:
1403 case SLJIT_GREATER_F64
: /* Unordered. */
1406 case SLJIT_GREATER_EQUAL
:
1407 case SLJIT_LESS_EQUAL_F64
:
1411 case SLJIT_GREATER_EQUAL_F64
: /* Unordered. */
1414 case SLJIT_LESS_EQUAL
:
1415 case SLJIT_LESS_F64
:
1418 case SLJIT_SIG_LESS
:
1421 case SLJIT_SIG_GREATER_EQUAL
:
1424 case SLJIT_SIG_GREATER
:
1427 case SLJIT_SIG_LESS_EQUAL
:
1430 case SLJIT_OVERFLOW
:
1431 if (!(compiler
->status_flags_state
& SLJIT_CURRENT_FLAGS_ADD_SUB
))
1435 case SLJIT_UNORDERED_F64
:
1438 case SLJIT_NOT_OVERFLOW
:
1439 if (!(compiler
->status_flags_state
& SLJIT_CURRENT_FLAGS_ADD_SUB
))
1443 case SLJIT_ORDERED_F64
:
1447 SLJIT_UNREACHABLE();
1452 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_jump
* sljit_emit_jump(struct sljit_compiler
*compiler
, sljit_s32 type
)
1454 struct sljit_jump
*jump
;
1457 CHECK_PTR(check_sljit_emit_jump(compiler
, type
));
1459 jump
= (struct sljit_jump
*)ensure_abuf(compiler
, sizeof(struct sljit_jump
));
1461 set_jump(jump
, compiler
, type
& SLJIT_REWRITABLE_JUMP
);
1464 if (type
< SLJIT_EQUAL_F64
) {
1465 jump
->flags
|= IS_COND
;
1466 if (((compiler
->delay_slot
& DST_INS_MASK
) != UNMOVABLE_INS
) && !(compiler
->delay_slot
& ICC_IS_SET
))
1467 jump
->flags
|= IS_MOVABLE
;
1468 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1469 PTR_FAIL_IF(push_inst(compiler
, BICC
| get_cc(compiler
, type
^ 1) | 5, UNMOVABLE_INS
));
1471 #error "Implementation required"
1474 else if (type
< SLJIT_JUMP
) {
1475 jump
->flags
|= IS_COND
;
1476 if (((compiler
->delay_slot
& DST_INS_MASK
) != UNMOVABLE_INS
) && !(compiler
->delay_slot
& FCC_IS_SET
))
1477 jump
->flags
|= IS_MOVABLE
;
1478 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1479 PTR_FAIL_IF(push_inst(compiler
, FBFCC
| get_cc(compiler
, type
^ 1) | 5, UNMOVABLE_INS
));
1481 #error "Implementation required"
1485 if ((compiler
->delay_slot
& DST_INS_MASK
) != UNMOVABLE_INS
)
1486 jump
->flags
|= IS_MOVABLE
;
1487 if (type
>= SLJIT_FAST_CALL
)
1488 jump
->flags
|= IS_CALL
;
1491 PTR_FAIL_IF(emit_const(compiler
, TMP_REG1
, 0));
1492 PTR_FAIL_IF(push_inst(compiler
, JMPL
| D(type
>= SLJIT_FAST_CALL
? TMP_LINK
: 0) | S1(TMP_REG1
) | IMM(0), UNMOVABLE_INS
));
1493 jump
->addr
= compiler
->size
;
1494 PTR_FAIL_IF(push_inst(compiler
, NOP
, UNMOVABLE_INS
));
1499 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_jump
* sljit_emit_call(struct sljit_compiler
*compiler
, sljit_s32 type
,
1500 sljit_s32 arg_types
)
1503 CHECK_PTR(check_sljit_emit_call(compiler
, type
, arg_types
));
1505 PTR_FAIL_IF(call_with_args(compiler
, arg_types
, NULL
));
1507 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
1508 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
1509 compiler
->skip_checks
= 1;
1512 return sljit_emit_jump(compiler
, type
);
1515 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_ijump(struct sljit_compiler
*compiler
, sljit_s32 type
, sljit_s32 src
, sljit_sw srcw
)
1517 struct sljit_jump
*jump
= NULL
;
1521 CHECK(check_sljit_emit_ijump(compiler
, type
, src
, srcw
));
1522 ADJUST_LOCAL_OFFSET(src
, srcw
);
1524 if (FAST_IS_REG(src
))
1526 else if (src
& SLJIT_IMM
) {
1527 jump
= (struct sljit_jump
*)ensure_abuf(compiler
, sizeof(struct sljit_jump
));
1529 set_jump(jump
, compiler
, JUMP_ADDR
);
1530 jump
->u
.target
= (sljit_uw
)srcw
;
1532 if ((compiler
->delay_slot
& DST_INS_MASK
) != UNMOVABLE_INS
)
1533 jump
->flags
|= IS_MOVABLE
;
1534 if (type
>= SLJIT_FAST_CALL
)
1535 jump
->flags
|= IS_CALL
;
1537 FAIL_IF(emit_const(compiler
, TMP_REG1
, 0));
1541 FAIL_IF(emit_op_mem(compiler
, WORD_DATA
| LOAD_DATA
, TMP_REG1
, src
, srcw
));
1545 FAIL_IF(push_inst(compiler
, JMPL
| D(type
>= SLJIT_FAST_CALL
? TMP_LINK
: 0) | S1(src_r
) | IMM(0), UNMOVABLE_INS
));
1547 jump
->addr
= compiler
->size
;
1548 return push_inst(compiler
, NOP
, UNMOVABLE_INS
);
1551 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_icall(struct sljit_compiler
*compiler
, sljit_s32 type
,
1552 sljit_s32 arg_types
,
1553 sljit_s32 src
, sljit_sw srcw
)
1556 CHECK(check_sljit_emit_icall(compiler
, type
, arg_types
, src
, srcw
));
1558 if (src
& SLJIT_MEM
) {
1559 ADJUST_LOCAL_OFFSET(src
, srcw
);
1560 FAIL_IF(emit_op_mem(compiler
, WORD_DATA
| LOAD_DATA
, TMP_REG1
, src
, srcw
));
1564 FAIL_IF(call_with_args(compiler
, arg_types
, &src
));
1566 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
1567 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
1568 compiler
->skip_checks
= 1;
1571 return sljit_emit_ijump(compiler
, type
, src
, srcw
);
1574 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_op_flags(struct sljit_compiler
*compiler
, sljit_s32 op
,
1575 sljit_s32 dst
, sljit_sw dstw
,
1579 sljit_u32 flags
= HAS_FLAGS(op
) ? SET_FLAGS
: 0;
1582 CHECK(check_sljit_emit_op_flags(compiler
, op
, dst
, dstw
, type
));
1583 ADJUST_LOCAL_OFFSET(dst
, dstw
);
1585 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1586 op
= GET_OPCODE(op
);
1587 reg
= (op
< SLJIT_ADD
&& FAST_IS_REG(dst
)) ? dst
: TMP_REG2
;
1589 compiler
->cache_arg
= 0;
1590 compiler
->cache_argw
= 0;
1592 if (op
>= SLJIT_ADD
&& (dst
& SLJIT_MEM
))
1593 FAIL_IF(emit_op_mem2(compiler
, WORD_DATA
| LOAD_DATA
, TMP_REG1
, dst
, dstw
, dst
, dstw
));
1596 if (type
< SLJIT_EQUAL_F64
)
1597 FAIL_IF(push_inst(compiler
, BICC
| get_cc(compiler
, type
) | 3, UNMOVABLE_INS
));
1599 FAIL_IF(push_inst(compiler
, FBFCC
| get_cc(compiler
, type
) | 3, UNMOVABLE_INS
));
1601 FAIL_IF(push_inst(compiler
, OR
| D(reg
) | S1(0) | IMM(1), UNMOVABLE_INS
));
1602 FAIL_IF(push_inst(compiler
, OR
| D(reg
) | S1(0) | IMM(0), UNMOVABLE_INS
));
1604 if (op
>= SLJIT_ADD
) {
1605 flags
|= CUMULATIVE_OP
| IMM_OP
| ALT_KEEP_CACHE
;
1606 if (dst
& SLJIT_MEM
)
1607 return emit_op(compiler
, op
, flags
, dst
, dstw
, TMP_REG1
, 0, TMP_REG2
, 0);
1608 return emit_op(compiler
, op
, flags
, dst
, 0, dst
, 0, TMP_REG2
, 0);
1611 if (!(dst
& SLJIT_MEM
))
1612 return SLJIT_SUCCESS
;
1614 return emit_op_mem(compiler
, WORD_DATA
, TMP_REG2
, dst
, dstw
);
1616 #error "Implementation required"
1620 SLJIT_API_FUNC_ATTRIBUTE sljit_s32
sljit_emit_cmov(struct sljit_compiler
*compiler
, sljit_s32 type
,
1622 sljit_s32 src
, sljit_sw srcw
)
1625 CHECK(check_sljit_emit_cmov(compiler
, type
, dst_reg
, src
, srcw
));
1627 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1628 return sljit_emit_cmov_generic(compiler
, type
, dst_reg
, src
, srcw
);;
1630 #error "Implementation required"
1634 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_const
* sljit_emit_const(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
, sljit_sw init_value
)
1636 struct sljit_const
*const_
;
1640 CHECK_PTR(check_sljit_emit_const(compiler
, dst
, dstw
, init_value
));
1641 ADJUST_LOCAL_OFFSET(dst
, dstw
);
1643 const_
= (struct sljit_const
*)ensure_abuf(compiler
, sizeof(struct sljit_const
));
1644 PTR_FAIL_IF(!const_
);
1645 set_const(const_
, compiler
);
1647 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_REG2
;
1648 PTR_FAIL_IF(emit_const(compiler
, dst_r
, init_value
));
1650 if (dst
& SLJIT_MEM
)
1651 PTR_FAIL_IF(emit_op_mem(compiler
, WORD_DATA
, TMP_REG2
, dst
, dstw
));
1655 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_put_label
* sljit_emit_put_label(struct sljit_compiler
*compiler
, sljit_s32 dst
, sljit_sw dstw
)
1657 struct sljit_put_label
*put_label
;
1661 CHECK_PTR(check_sljit_emit_put_label(compiler
, dst
, dstw
));
1662 ADJUST_LOCAL_OFFSET(dst
, dstw
);
1664 put_label
= (struct sljit_put_label
*)ensure_abuf(compiler
, sizeof(struct sljit_put_label
));
1665 PTR_FAIL_IF(!put_label
);
1666 set_put_label(put_label
, compiler
, 0);
1668 dst_r
= FAST_IS_REG(dst
) ? dst
: TMP_REG2
;
1669 PTR_FAIL_IF(emit_const(compiler
, dst_r
, 0));
1671 if (dst
& SLJIT_MEM
)
1672 PTR_FAIL_IF(emit_op_mem(compiler
, WORD_DATA
, TMP_REG2
, dst
, dstw
));