Swap f32 and f64 argument types.
[sljit.git] / sljit_src / sljitNativeSPARC_common.c
blobf8c82cfd27a04933ab089066e2f0fb0a0046792a
1 /*
2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
29 return "SPARC" SLJIT_CPUINFO;
32 /* Length of an instruction word
33 Both for sparc-32 and sparc-64 */
34 typedef sljit_u32 sljit_ins;
36 #if (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL)
38 static void sparc_cache_flush(sljit_ins *from, sljit_ins *to)
40 #if defined(__SUNPRO_C) && __SUNPRO_C < 0x590
41 __asm (
42 /* if (from == to) return */
43 "cmp %i0, %i1\n"
44 "be .leave\n"
45 "nop\n"
47 /* loop until from >= to */
48 ".mainloop:\n"
49 "flush %i0\n"
50 "add %i0, 8, %i0\n"
51 "cmp %i0, %i1\n"
52 "bcs .mainloop\n"
53 "nop\n"
55 /* The comparison was done above. */
56 "bne .leave\n"
57 /* nop is not necessary here, since the
58 sub operation has no side effect. */
59 "sub %i0, 4, %i0\n"
60 "flush %i0\n"
61 ".leave:"
63 #else
64 if (SLJIT_UNLIKELY(from == to))
65 return;
67 do {
68 __asm__ volatile (
69 "flush %0\n"
70 : : "r"(from)
72 /* Operates at least on doubleword. */
73 from += 2;
74 } while (from < to);
76 if (from == to) {
77 /* Flush the last word. */
78 from --;
79 __asm__ volatile (
80 "flush %0\n"
81 : : "r"(from)
84 #endif
87 #endif /* (defined SLJIT_CACHE_FLUSH_OWN_IMPL && SLJIT_CACHE_FLUSH_OWN_IMPL) */
89 /* TMP_REG2 is not used by getput_arg */
90 #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
91 #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
92 #define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4)
93 /* This register is modified by calls, which affects the instruction
94 in the delay slot if it is used as a source register. */
95 #define TMP_LINK (SLJIT_NUMBER_OF_REGISTERS + 5)
97 #define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
98 #define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
100 static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 6] = {
101 0, 8, 9, 10, 11, 23, 22, 21, 20, 19, 18, 17, 16, 29, 28, 27, 26, 25, 24, 14, 1, 12, 13, 15
104 static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
105 0, 0, 2, 4, 6, 8, 10, 12, 14
108 /* --------------------------------------------------------------------- */
109 /* Instrucion forms */
110 /* --------------------------------------------------------------------- */
112 #define D(d) (reg_map[d] << 25)
113 #define FD(d) (freg_map[d] << 25)
114 #define FDN(d) ((freg_map[d] | 0x1) << 25)
115 #define DA(d) ((d) << 25)
116 #define S1(s1) (reg_map[s1] << 14)
117 #define FS1(s1) (freg_map[s1] << 14)
118 #define S1A(s1) ((s1) << 14)
119 #define S2(s2) (reg_map[s2])
120 #define FS2(s2) (freg_map[s2])
121 #define FS2N(s2) (freg_map[s2] | 0x1)
122 #define S2A(s2) (s2)
123 #define IMM_ARG 0x2000
124 #define DOP(op) ((op) << 5)
125 #define IMM(imm) (((imm) & 0x1fff) | IMM_ARG)
127 #define DR(dr) (reg_map[dr])
128 #define OPC1(opcode) ((opcode) << 30)
129 #define OPC2(opcode) ((opcode) << 22)
130 #define OPC3(opcode) ((opcode) << 19)
131 #define SET_FLAGS OPC3(0x10)
133 #define ADD (OPC1(0x2) | OPC3(0x00))
134 #define ADDC (OPC1(0x2) | OPC3(0x08))
135 #define AND (OPC1(0x2) | OPC3(0x01))
136 #define ANDN (OPC1(0x2) | OPC3(0x05))
137 #define CALL (OPC1(0x1))
138 #define FABSS (OPC1(0x2) | OPC3(0x34) | DOP(0x09))
139 #define FADDD (OPC1(0x2) | OPC3(0x34) | DOP(0x42))
140 #define FADDS (OPC1(0x2) | OPC3(0x34) | DOP(0x41))
141 #define FCMPD (OPC1(0x2) | OPC3(0x35) | DOP(0x52))
142 #define FCMPS (OPC1(0x2) | OPC3(0x35) | DOP(0x51))
143 #define FDIVD (OPC1(0x2) | OPC3(0x34) | DOP(0x4e))
144 #define FDIVS (OPC1(0x2) | OPC3(0x34) | DOP(0x4d))
145 #define FDTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd2))
146 #define FDTOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc6))
147 #define FITOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc8))
148 #define FITOS (OPC1(0x2) | OPC3(0x34) | DOP(0xc4))
149 #define FMOVS (OPC1(0x2) | OPC3(0x34) | DOP(0x01))
150 #define FMULD (OPC1(0x2) | OPC3(0x34) | DOP(0x4a))
151 #define FMULS (OPC1(0x2) | OPC3(0x34) | DOP(0x49))
152 #define FNEGS (OPC1(0x2) | OPC3(0x34) | DOP(0x05))
153 #define FSTOD (OPC1(0x2) | OPC3(0x34) | DOP(0xc9))
154 #define FSTOI (OPC1(0x2) | OPC3(0x34) | DOP(0xd1))
155 #define FSUBD (OPC1(0x2) | OPC3(0x34) | DOP(0x46))
156 #define FSUBS (OPC1(0x2) | OPC3(0x34) | DOP(0x45))
157 #define JMPL (OPC1(0x2) | OPC3(0x38))
158 #define LDD (OPC1(0x3) | OPC3(0x03))
159 #define LDDF (OPC1(0x3) | OPC3(0x23))
160 #define LDF (OPC1(0x3) | OPC3(0x20))
161 #define LDUW (OPC1(0x3) | OPC3(0x00))
162 #define NOP (OPC1(0x0) | OPC2(0x04))
163 #define OR (OPC1(0x2) | OPC3(0x02))
164 #define ORN (OPC1(0x2) | OPC3(0x06))
165 #define RDY (OPC1(0x2) | OPC3(0x28) | S1A(0))
166 #define RESTORE (OPC1(0x2) | OPC3(0x3d))
167 #define SAVE (OPC1(0x2) | OPC3(0x3c))
168 #define SETHI (OPC1(0x0) | OPC2(0x04))
169 #define SLL (OPC1(0x2) | OPC3(0x25))
170 #define SLLX (OPC1(0x2) | OPC3(0x25) | (1 << 12))
171 #define SRA (OPC1(0x2) | OPC3(0x27))
172 #define SRAX (OPC1(0x2) | OPC3(0x27) | (1 << 12))
173 #define SRL (OPC1(0x2) | OPC3(0x26))
174 #define SRLX (OPC1(0x2) | OPC3(0x26) | (1 << 12))
175 #define STD (OPC1(0x3) | OPC3(0x07))
176 #define STDF (OPC1(0x3) | OPC3(0x27))
177 #define STF (OPC1(0x3) | OPC3(0x24))
178 #define STW (OPC1(0x3) | OPC3(0x04))
179 #define SUB (OPC1(0x2) | OPC3(0x04))
180 #define SUBC (OPC1(0x2) | OPC3(0x0c))
181 #define TA (OPC1(0x2) | OPC3(0x3a) | (8 << 25))
182 #define WRY (OPC1(0x2) | OPC3(0x30) | DA(0))
183 #define XOR (OPC1(0x2) | OPC3(0x03))
184 #define XNOR (OPC1(0x2) | OPC3(0x07))
186 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
187 #define MAX_DISP (0x1fffff)
188 #define MIN_DISP (-0x200000)
189 #define DISP_MASK (0x3fffff)
191 #define BICC (OPC1(0x0) | OPC2(0x2))
192 #define FBFCC (OPC1(0x0) | OPC2(0x6))
193 #define SLL_W SLL
194 #define SDIV (OPC1(0x2) | OPC3(0x0f))
195 #define SMUL (OPC1(0x2) | OPC3(0x0b))
196 #define UDIV (OPC1(0x2) | OPC3(0x0e))
197 #define UMUL (OPC1(0x2) | OPC3(0x0a))
198 #else
199 #define SLL_W SLLX
200 #endif
202 #define SIMM_MAX (0x0fff)
203 #define SIMM_MIN (-0x1000)
205 /* dest_reg is the absolute name of the register
206 Useful for reordering instructions in the delay slot. */
207 static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_s32 delay_slot)
209 sljit_ins *ptr;
210 SLJIT_ASSERT((delay_slot & DST_INS_MASK) == UNMOVABLE_INS
211 || (delay_slot & DST_INS_MASK) == MOVABLE_INS
212 || (delay_slot & DST_INS_MASK) == ((ins >> 25) & 0x1f));
213 ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
214 FAIL_IF(!ptr);
215 *ptr = ins;
216 compiler->size++;
217 compiler->delay_slot = delay_slot;
218 return SLJIT_SUCCESS;
221 static SLJIT_INLINE sljit_ins* detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset)
223 sljit_sw diff;
224 sljit_uw target_addr;
225 sljit_ins *inst;
226 sljit_ins saved_inst;
228 if (jump->flags & SLJIT_REWRITABLE_JUMP)
229 return code_ptr;
231 if (jump->flags & JUMP_ADDR)
232 target_addr = jump->u.target;
233 else {
234 SLJIT_ASSERT(jump->flags & JUMP_LABEL);
235 target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
237 inst = (sljit_ins*)jump->addr;
239 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
240 if (jump->flags & IS_CALL) {
241 /* Call is always patchable on sparc 32. */
242 jump->flags |= PATCH_CALL;
243 if (jump->flags & IS_MOVABLE) {
244 inst[0] = inst[-1];
245 inst[-1] = CALL;
246 jump->addr -= sizeof(sljit_ins);
247 return inst;
249 inst[0] = CALL;
250 inst[1] = NOP;
251 return inst + 1;
253 #else
254 /* Both calls and BPr instructions shall not pass this point. */
255 #error "Implementation required"
256 #endif
258 if (jump->flags & IS_COND)
259 inst--;
261 diff = ((sljit_sw)target_addr - (sljit_sw)(inst - 1) - executable_offset) >> 2;
263 if (jump->flags & IS_MOVABLE) {
264 if (diff <= MAX_DISP && diff >= MIN_DISP) {
265 jump->flags |= PATCH_B;
266 inst--;
267 if (jump->flags & IS_COND) {
268 saved_inst = inst[0];
269 inst[0] = inst[1] ^ (1 << 28);
270 inst[1] = saved_inst;
271 } else {
272 inst[1] = inst[0];
273 inst[0] = BICC | DA(0x8);
275 jump->addr = (sljit_uw)inst;
276 return inst + 1;
280 diff += sizeof(sljit_ins);
282 if (diff <= MAX_DISP && diff >= MIN_DISP) {
283 jump->flags |= PATCH_B;
284 if (jump->flags & IS_COND)
285 inst[0] ^= (1 << 28);
286 else
287 inst[0] = BICC | DA(0x8);
288 inst[1] = NOP;
289 jump->addr = (sljit_uw)inst;
290 return inst + 1;
293 return code_ptr;
296 SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
298 struct sljit_memory_fragment *buf;
299 sljit_ins *code;
300 sljit_ins *code_ptr;
301 sljit_ins *buf_ptr;
302 sljit_ins *buf_end;
303 sljit_uw word_count;
304 sljit_uw next_addr;
305 sljit_sw executable_offset;
306 sljit_uw addr;
308 struct sljit_label *label;
309 struct sljit_jump *jump;
310 struct sljit_const *const_;
311 struct sljit_put_label *put_label;
313 CHECK_ERROR_PTR();
314 CHECK_PTR(check_sljit_generate_code(compiler));
315 reverse_buf(compiler);
317 code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins), compiler->exec_allocator_data);
318 PTR_FAIL_WITH_EXEC_IF(code);
319 buf = compiler->buf;
321 code_ptr = code;
322 word_count = 0;
323 next_addr = 0;
324 executable_offset = SLJIT_EXEC_OFFSET(code);
326 label = compiler->labels;
327 jump = compiler->jumps;
328 const_ = compiler->consts;
329 put_label = compiler->put_labels;
331 do {
332 buf_ptr = (sljit_ins*)buf->memory;
333 buf_end = buf_ptr + (buf->used_size >> 2);
334 do {
335 *code_ptr = *buf_ptr++;
336 if (next_addr == word_count) {
337 SLJIT_ASSERT(!label || label->size >= word_count);
338 SLJIT_ASSERT(!jump || jump->addr >= word_count);
339 SLJIT_ASSERT(!const_ || const_->addr >= word_count);
340 SLJIT_ASSERT(!put_label || put_label->addr >= word_count);
342 /* These structures are ordered by their address. */
343 if (label && label->size == word_count) {
344 /* Just recording the address. */
345 label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
346 label->size = code_ptr - code;
347 label = label->next;
349 if (jump && jump->addr == word_count) {
350 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
351 jump->addr = (sljit_uw)(code_ptr - 3);
352 #else
353 jump->addr = (sljit_uw)(code_ptr - 6);
354 #endif
355 code_ptr = detect_jump_type(jump, code_ptr, code, executable_offset);
356 jump = jump->next;
358 if (const_ && const_->addr == word_count) {
359 /* Just recording the address. */
360 const_->addr = (sljit_uw)code_ptr;
361 const_ = const_->next;
363 if (put_label && put_label->addr == word_count) {
364 SLJIT_ASSERT(put_label->label);
365 put_label->addr = (sljit_uw)code_ptr;
366 put_label = put_label->next;
368 next_addr = compute_next_addr(label, jump, const_, put_label);
370 code_ptr ++;
371 word_count ++;
372 } while (buf_ptr < buf_end);
374 buf = buf->next;
375 } while (buf);
377 if (label && label->size == word_count) {
378 label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
379 label->size = code_ptr - code;
380 label = label->next;
383 SLJIT_ASSERT(!label);
384 SLJIT_ASSERT(!jump);
385 SLJIT_ASSERT(!const_);
386 SLJIT_ASSERT(!put_label);
387 SLJIT_ASSERT(code_ptr - code <= (sljit_s32)compiler->size);
389 jump = compiler->jumps;
390 while (jump) {
391 do {
392 addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
393 buf_ptr = (sljit_ins *)jump->addr;
395 if (jump->flags & PATCH_CALL) {
396 addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
397 SLJIT_ASSERT((sljit_sw)addr <= 0x1fffffff && (sljit_sw)addr >= -0x20000000);
398 buf_ptr[0] = CALL | (addr & 0x3fffffff);
399 break;
401 if (jump->flags & PATCH_B) {
402 addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
403 SLJIT_ASSERT((sljit_sw)addr <= MAX_DISP && (sljit_sw)addr >= MIN_DISP);
404 buf_ptr[0] = (buf_ptr[0] & ~DISP_MASK) | (addr & DISP_MASK);
405 break;
408 /* Set the fields of immediate loads. */
409 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
410 SLJIT_ASSERT(((buf_ptr[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr[1] & 0xc1f83fff) == 0x80102000));
411 buf_ptr[0] |= (addr >> 10) & 0x3fffff;
412 buf_ptr[1] |= addr & 0x3ff;
413 #else
414 #error "Implementation required"
415 #endif
416 } while (0);
417 jump = jump->next;
420 put_label = compiler->put_labels;
421 while (put_label) {
422 addr = put_label->label->addr;
423 buf_ptr = (sljit_ins *)put_label->addr;
425 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
426 SLJIT_ASSERT(((buf_ptr[0] & 0xc1cfffff) == 0x01000000) && ((buf_ptr[1] & 0xc1f83fff) == 0x80102000));
427 buf_ptr[0] |= (addr >> 10) & 0x3fffff;
428 buf_ptr[1] |= addr & 0x3ff;
429 #else
430 #error "Implementation required"
431 #endif
432 put_label = put_label->next;
435 compiler->error = SLJIT_ERR_COMPILED;
436 compiler->executable_offset = executable_offset;
437 compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
439 code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
440 code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
442 SLJIT_CACHE_FLUSH(code, code_ptr);
443 SLJIT_UPDATE_WX_FLAGS(code, code_ptr, 1);
444 return code;
447 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
449 switch (feature_type) {
450 case SLJIT_HAS_FPU:
451 #ifdef SLJIT_IS_FPU_AVAILABLE
452 return SLJIT_IS_FPU_AVAILABLE;
453 #else
454 /* Available by default. */
455 return 1;
456 #endif
458 case SLJIT_HAS_ZERO_REGISTER:
459 return 1;
461 #if (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64)
462 case SLJIT_HAS_CMOV:
463 return 1;
464 #endif
466 default:
467 return 0;
471 /* --------------------------------------------------------------------- */
472 /* Entry, exit */
473 /* --------------------------------------------------------------------- */
475 /* Creates an index in data_transfer_insts array. */
476 #define LOAD_DATA 0x01
477 #define WORD_DATA 0x00
478 #define BYTE_DATA 0x02
479 #define HALF_DATA 0x04
480 #define INT_DATA 0x06
481 #define SIGNED_DATA 0x08
482 /* Separates integer and floating point registers */
483 #define GPR_REG 0x0f
484 #define DOUBLE_DATA 0x10
485 #define SINGLE_DATA 0x12
487 #define MEM_MASK 0x1f
489 #define ARG_TEST 0x00020
490 #define ALT_KEEP_CACHE 0x00040
491 #define CUMULATIVE_OP 0x00080
492 #define IMM_OP 0x00100
493 #define MOVE_OP 0x00200
494 #define SRC2_IMM 0x00400
496 #define REG_DEST 0x00800
497 #define REG2_SOURCE 0x01000
498 #define SLOW_SRC1 0x02000
499 #define SLOW_SRC2 0x04000
500 #define SLOW_DEST 0x08000
502 /* SET_FLAGS (0x10 << 19) also belong here! */
504 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
505 #include "sljitNativeSPARC_32.c"
506 #else
507 #include "sljitNativeSPARC_64.c"
508 #endif
510 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
511 sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
512 sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
514 sljit_s32 reg_index, float_offset, args_offset, types;
515 sljit_s32 word_arg_index, float_arg_index;
517 CHECK_ERROR();
518 CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
519 set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
521 local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7;
522 compiler->local_size = local_size;
524 if (local_size <= -SIMM_MIN) {
525 FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | IMM(-local_size), UNMOVABLE_INS));
527 else {
528 FAIL_IF(load_immediate(compiler, TMP_REG1, -local_size));
529 FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_SP) | S1(SLJIT_SP) | S2(TMP_REG1), UNMOVABLE_INS));
532 arg_types >>= SLJIT_ARG_SHIFT;
534 types = arg_types;
535 float_offset = 16 * sizeof(sljit_sw);
536 reg_index = 24;
538 while (types && reg_index < 24 + 6) {
539 switch (types & SLJIT_ARG_MASK) {
540 case SLJIT_ARG_TYPE_F64:
541 if (reg_index & 0x1) {
542 FAIL_IF(push_inst(compiler, STW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
543 if (reg_index >= 24 + 6 - 1)
544 break;
545 FAIL_IF(push_inst(compiler, STW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), MOVABLE_INS));
546 } else
547 FAIL_IF(push_inst(compiler, STD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
549 float_offset += sizeof(sljit_f64);
550 reg_index++;
551 break;
552 case SLJIT_ARG_TYPE_F32:
553 FAIL_IF(push_inst(compiler, STW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
554 float_offset += sizeof(sljit_f64);
555 break;
558 reg_index++;
559 types >>= SLJIT_ARG_SHIFT;
562 args_offset = (16 + 1 + 6) * sizeof(sljit_sw);
563 float_offset = 16 * sizeof(sljit_sw);
564 reg_index = 24;
565 word_arg_index = 24;
566 float_arg_index = 1;
568 while (arg_types) {
569 switch (arg_types & SLJIT_ARG_MASK) {
570 case SLJIT_ARG_TYPE_F64:
571 if (reg_index < 24 + 6 - 1) {
572 FAIL_IF(push_inst(compiler, LDDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
573 } else if (reg_index < 24 + 6) {
574 FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
575 FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | (1 << 25) | S1A(30) | IMM(args_offset), MOVABLE_INS));
576 } else {
577 FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1A(30) | IMM(args_offset), MOVABLE_INS));
578 FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | (1 << 25) | S1A(30) | IMM(args_offset + sizeof(sljit_sw)), MOVABLE_INS));
581 float_arg_index++;
582 float_offset += sizeof(sljit_f64);
583 reg_index++;
584 break;
585 case SLJIT_ARG_TYPE_F32:
586 if (reg_index < 24 + 6)
587 FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
588 else
589 FAIL_IF(push_inst(compiler, LDF | FD(float_arg_index) | S1A(30) | IMM(args_offset), MOVABLE_INS));
590 float_arg_index++;
591 float_offset += sizeof(sljit_f64);
592 break;
593 default:
594 if (reg_index != word_arg_index) {
595 if (reg_index < 24 + 6)
596 FAIL_IF(push_inst(compiler, OR | DA(word_arg_index) | S1(0) | S2A(reg_index), word_arg_index));
597 else
598 FAIL_IF(push_inst(compiler, LDUW | DA(word_arg_index) | S1A(30) | IMM(args_offset), word_arg_index));
601 word_arg_index++;
602 break;
605 reg_index++;
606 arg_types >>= SLJIT_ARG_SHIFT;
609 return SLJIT_SUCCESS;
612 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
613 sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
614 sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
616 CHECK_ERROR();
617 CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
618 set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
620 compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 7) & ~0x7;
621 return SLJIT_SUCCESS;
624 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
626 CHECK_ERROR();
627 CHECK(check_sljit_emit_return_void(compiler));
629 FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS));
630 return push_inst(compiler, RESTORE | D(SLJIT_R0) | S1(SLJIT_R0) | S2(0), UNMOVABLE_INS);
633 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
635 CHECK_ERROR();
636 CHECK(check_sljit_emit_return(compiler, op, src, srcw));
638 if (TYPE_CAST_NEEDED(op) || !FAST_IS_REG(src)) {
639 FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
640 src = SLJIT_R0;
643 FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS));
644 return push_inst(compiler, RESTORE | D(SLJIT_R0) | S1(src) | S2(0), UNMOVABLE_INS);
647 /* --------------------------------------------------------------------- */
648 /* Operators */
649 /* --------------------------------------------------------------------- */
651 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
652 #define ARCH_32_64(a, b) a
653 #else
654 #define ARCH_32_64(a, b) b
655 #endif
657 static const sljit_ins data_transfer_insts[16 + 4] = {
658 /* u w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */),
659 /* u w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */),
660 /* u b s */ OPC1(3) | OPC3(0x05) /* stb */,
661 /* u b l */ OPC1(3) | OPC3(0x01) /* ldub */,
662 /* u h s */ OPC1(3) | OPC3(0x06) /* sth */,
663 /* u h l */ OPC1(3) | OPC3(0x02) /* lduh */,
664 /* u i s */ OPC1(3) | OPC3(0x04) /* stw */,
665 /* u i l */ OPC1(3) | OPC3(0x00) /* lduw */,
667 /* s w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */),
668 /* s w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */),
669 /* s b s */ OPC1(3) | OPC3(0x05) /* stb */,
670 /* s b l */ OPC1(3) | OPC3(0x09) /* ldsb */,
671 /* s h s */ OPC1(3) | OPC3(0x06) /* sth */,
672 /* s h l */ OPC1(3) | OPC3(0x0a) /* ldsh */,
673 /* s i s */ OPC1(3) | OPC3(0x04) /* stw */,
674 /* s i l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x08) /* ldsw */),
676 /* d s */ OPC1(3) | OPC3(0x27),
677 /* d l */ OPC1(3) | OPC3(0x23),
678 /* s s */ OPC1(3) | OPC3(0x24),
679 /* s l */ OPC1(3) | OPC3(0x20),
682 #undef ARCH_32_64
684 /* Can perform an operation using at most 1 instruction. */
685 static sljit_s32 getput_arg_fast(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
687 SLJIT_ASSERT(arg & SLJIT_MEM);
689 if ((!(arg & OFFS_REG_MASK) && argw <= SIMM_MAX && argw >= SIMM_MIN)
690 || ((arg & OFFS_REG_MASK) && (argw & 0x3) == 0)) {
691 /* Works for both absoulte and relative addresses (immediate case). */
692 if (SLJIT_UNLIKELY(flags & ARG_TEST))
693 return 1;
694 FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK]
695 | ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg))
696 | S1(arg & REG_MASK) | ((arg & OFFS_REG_MASK) ? S2(OFFS_REG(arg)) : IMM(argw)),
697 ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS));
698 return -1;
700 return 0;
703 /* See getput_arg below.
704 Note: can_cache is called only for binary operators. Those
705 operators always uses word arguments without write back. */
706 static sljit_s32 can_cache(sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
708 SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM));
710 /* Simple operation except for updates. */
711 if (arg & OFFS_REG_MASK) {
712 argw &= 0x3;
713 SLJIT_ASSERT(argw);
714 next_argw &= 0x3;
715 if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == next_argw)
716 return 1;
717 return 0;
720 if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN))
721 return 1;
722 return 0;
725 /* Emit the necessary instructions. See can_cache above. */
726 static sljit_s32 getput_arg(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw, sljit_s32 next_arg, sljit_sw next_argw)
728 sljit_s32 base, arg2, delay_slot;
729 sljit_ins dest;
731 SLJIT_ASSERT(arg & SLJIT_MEM);
732 if (!(next_arg & SLJIT_MEM)) {
733 next_arg = 0;
734 next_argw = 0;
737 base = arg & REG_MASK;
738 if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
739 argw &= 0x3;
741 /* Using the cache. */
742 if (((SLJIT_MEM | (arg & OFFS_REG_MASK)) == compiler->cache_arg) && (argw == compiler->cache_argw))
743 arg2 = TMP_REG3;
744 else {
745 if ((arg & OFFS_REG_MASK) == (next_arg & OFFS_REG_MASK) && argw == (next_argw & 0x3)) {
746 compiler->cache_arg = SLJIT_MEM | (arg & OFFS_REG_MASK);
747 compiler->cache_argw = argw;
748 arg2 = TMP_REG3;
750 else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base && reg != OFFS_REG(arg))
751 arg2 = reg;
752 else /* It must be a mov operation, so tmp1 must be free to use. */
753 arg2 = TMP_REG1;
754 FAIL_IF(push_inst(compiler, SLL_W | D(arg2) | S1(OFFS_REG(arg)) | IMM_ARG | argw, DR(arg2)));
757 else {
758 /* Using the cache. */
759 if ((compiler->cache_arg == SLJIT_MEM) && (argw - compiler->cache_argw) <= SIMM_MAX && (argw - compiler->cache_argw) >= SIMM_MIN) {
760 if (argw != compiler->cache_argw) {
761 FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | S1(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3)));
762 compiler->cache_argw = argw;
764 arg2 = TMP_REG3;
765 } else {
766 if ((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN) {
767 compiler->cache_arg = SLJIT_MEM;
768 compiler->cache_argw = argw;
769 arg2 = TMP_REG3;
771 else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base)
772 arg2 = reg;
773 else /* It must be a mov operation, so tmp1 must be free to use. */
774 arg2 = TMP_REG1;
775 FAIL_IF(load_immediate(compiler, arg2, argw));
779 dest = ((flags & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg));
780 delay_slot = ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS;
781 if (!base)
782 return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(arg2) | IMM(0), delay_slot);
783 return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot);
786 static SLJIT_INLINE sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
788 if (getput_arg_fast(compiler, flags, reg, arg, argw))
789 return compiler->error;
790 compiler->cache_arg = 0;
791 compiler->cache_argw = 0;
792 return getput_arg(compiler, flags, reg, arg, argw, 0, 0);
795 static SLJIT_INLINE sljit_s32 emit_op_mem2(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg1, sljit_sw arg1w, sljit_s32 arg2, sljit_sw arg2w)
797 if (getput_arg_fast(compiler, flags, reg, arg1, arg1w))
798 return compiler->error;
799 return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w);
802 static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
803 sljit_s32 dst, sljit_sw dstw,
804 sljit_s32 src1, sljit_sw src1w,
805 sljit_s32 src2, sljit_sw src2w)
807 /* arg1 goes to TMP_REG1 or src reg
808 arg2 goes to TMP_REG2, imm or src reg
809 TMP_REG3 can be used for caching
810 result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */
811 sljit_s32 dst_r = TMP_REG2;
812 sljit_s32 src1_r;
813 sljit_sw src2_r = 0;
814 sljit_s32 sugg_src2_r = TMP_REG2;
816 if (!(flags & ALT_KEEP_CACHE)) {
817 compiler->cache_arg = 0;
818 compiler->cache_argw = 0;
821 if (dst != TMP_REG2) {
822 if (FAST_IS_REG(dst)) {
823 dst_r = dst;
824 flags |= REG_DEST;
825 if (flags & MOVE_OP)
826 sugg_src2_r = dst_r;
828 else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw))
829 flags |= SLOW_DEST;
832 if (flags & IMM_OP) {
833 if ((src2 & SLJIT_IMM) && src2w) {
834 if (src2w <= SIMM_MAX && src2w >= SIMM_MIN) {
835 flags |= SRC2_IMM;
836 src2_r = src2w;
839 if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) {
840 if (src1w <= SIMM_MAX && src1w >= SIMM_MIN) {
841 flags |= SRC2_IMM;
842 src2_r = src1w;
844 /* And swap arguments. */
845 src1 = src2;
846 src1w = src2w;
847 src2 = SLJIT_IMM;
848 /* src2w = src2_r unneeded. */
853 /* Source 1. */
854 if (FAST_IS_REG(src1))
855 src1_r = src1;
856 else if (src1 & SLJIT_IMM) {
857 if (src1w) {
858 FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
859 src1_r = TMP_REG1;
861 else
862 src1_r = 0;
864 else {
865 if (getput_arg_fast(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w))
866 FAIL_IF(compiler->error);
867 else
868 flags |= SLOW_SRC1;
869 src1_r = TMP_REG1;
872 /* Source 2. */
873 if (FAST_IS_REG(src2)) {
874 src2_r = src2;
875 flags |= REG2_SOURCE;
876 if ((flags & (REG_DEST | MOVE_OP)) == MOVE_OP)
877 dst_r = src2_r;
879 else if (src2 & SLJIT_IMM) {
880 if (!(flags & SRC2_IMM)) {
881 if (src2w) {
882 FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w));
883 src2_r = sugg_src2_r;
885 else {
886 src2_r = 0;
887 if (flags & MOVE_OP) {
888 if (dst & SLJIT_MEM)
889 dst_r = 0;
890 else
891 op = SLJIT_MOV;
896 else {
897 if (getput_arg_fast(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w))
898 FAIL_IF(compiler->error);
899 else
900 flags |= SLOW_SRC2;
901 src2_r = sugg_src2_r;
904 if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
905 SLJIT_ASSERT(src2_r == TMP_REG2);
906 if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
907 FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w));
908 FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
910 else {
911 FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w));
912 FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw));
915 else if (flags & SLOW_SRC1)
916 FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw));
917 else if (flags & SLOW_SRC2)
918 FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw));
920 FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r));
922 if (dst & SLJIT_MEM) {
923 if (!(flags & SLOW_DEST)) {
924 getput_arg_fast(compiler, flags, dst_r, dst, dstw);
925 return compiler->error;
927 return getput_arg(compiler, flags, dst_r, dst, dstw, 0, 0);
930 return SLJIT_SUCCESS;
933 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
935 CHECK_ERROR();
936 CHECK(check_sljit_emit_op0(compiler, op));
938 op = GET_OPCODE(op);
939 switch (op) {
940 case SLJIT_BREAKPOINT:
941 return push_inst(compiler, TA, UNMOVABLE_INS);
942 case SLJIT_NOP:
943 return push_inst(compiler, NOP, UNMOVABLE_INS);
944 case SLJIT_LMUL_UW:
945 case SLJIT_LMUL_SW:
946 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
947 FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
948 return push_inst(compiler, RDY | D(SLJIT_R1), DR(SLJIT_R1));
949 #else
950 #error "Implementation required"
951 #endif
952 case SLJIT_DIVMOD_UW:
953 case SLJIT_DIVMOD_SW:
954 case SLJIT_DIV_UW:
955 case SLJIT_DIV_SW:
956 SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
957 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
958 if ((op | 0x2) == SLJIT_DIV_UW)
959 FAIL_IF(push_inst(compiler, WRY | S1(0), MOVABLE_INS));
960 else {
961 FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(SLJIT_R0) | IMM(31), DR(TMP_REG1)));
962 FAIL_IF(push_inst(compiler, WRY | S1(TMP_REG1), MOVABLE_INS));
964 if (op <= SLJIT_DIVMOD_SW)
965 FAIL_IF(push_inst(compiler, OR | D(TMP_REG2) | S1(0) | S2(SLJIT_R0), DR(TMP_REG2)));
966 FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
967 if (op >= SLJIT_DIV_UW)
968 return SLJIT_SUCCESS;
969 FAIL_IF(push_inst(compiler, SMUL | D(SLJIT_R1) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R1)));
970 return push_inst(compiler, SUB | D(SLJIT_R1) | S1(TMP_REG2) | S2(SLJIT_R1), DR(SLJIT_R1));
971 #else
972 #error "Implementation required"
973 #endif
974 case SLJIT_ENDBR:
975 case SLJIT_SKIP_FRAMES_BEFORE_RETURN:
976 return SLJIT_SUCCESS;
979 return SLJIT_SUCCESS;
982 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
983 sljit_s32 dst, sljit_sw dstw,
984 sljit_s32 src, sljit_sw srcw)
986 sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
988 CHECK_ERROR();
989 CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
990 ADJUST_LOCAL_OFFSET(dst, dstw);
991 ADJUST_LOCAL_OFFSET(src, srcw);
993 op = GET_OPCODE(op);
994 switch (op) {
995 case SLJIT_MOV:
996 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
997 case SLJIT_MOV_U32:
998 case SLJIT_MOV_S32:
999 case SLJIT_MOV32:
1000 #endif
1001 case SLJIT_MOV_P:
1002 return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, srcw);
1004 case SLJIT_MOV_U8:
1005 return emit_op(compiler, SLJIT_MOV_U8, flags | BYTE_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u8)srcw : srcw);
1007 case SLJIT_MOV_S8:
1008 return emit_op(compiler, SLJIT_MOV_S8, flags | BYTE_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s8)srcw : srcw);
1010 case SLJIT_MOV_U16:
1011 return emit_op(compiler, SLJIT_MOV_U16, flags | HALF_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_u16)srcw : srcw);
1013 case SLJIT_MOV_S16:
1014 return emit_op(compiler, SLJIT_MOV_S16, flags | HALF_DATA | SIGNED_DATA | MOVE_OP, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_s16)srcw : srcw);
1016 case SLJIT_NOT:
1017 case SLJIT_CLZ:
1018 return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
1020 case SLJIT_NEG:
1021 return emit_op(compiler, SLJIT_SUB, flags | IMM_OP, dst, dstw, SLJIT_IMM, 0, src, srcw);
1024 return SLJIT_SUCCESS;
1027 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
1028 sljit_s32 dst, sljit_sw dstw,
1029 sljit_s32 src1, sljit_sw src1w,
1030 sljit_s32 src2, sljit_sw src2w)
1032 sljit_s32 flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
1034 CHECK_ERROR();
1035 CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
1036 ADJUST_LOCAL_OFFSET(dst, dstw);
1037 ADJUST_LOCAL_OFFSET(src1, src1w);
1038 ADJUST_LOCAL_OFFSET(src2, src2w);
1040 op = GET_OPCODE(op);
1041 switch (op) {
1042 case SLJIT_ADD:
1043 case SLJIT_ADDC:
1044 case SLJIT_MUL:
1045 case SLJIT_AND:
1046 case SLJIT_OR:
1047 case SLJIT_XOR:
1048 return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
1050 case SLJIT_SUB:
1051 case SLJIT_SUBC:
1052 return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
1054 case SLJIT_SHL:
1055 case SLJIT_LSHR:
1056 case SLJIT_ASHR:
1057 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1058 if (src2 & SLJIT_IMM)
1059 src2w &= 0x1f;
1060 #else
1061 SLJIT_UNREACHABLE();
1062 #endif
1063 return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w);
1066 return SLJIT_SUCCESS;
1069 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
1070 sljit_s32 src1, sljit_sw src1w,
1071 sljit_s32 src2, sljit_sw src2w)
1073 CHECK_ERROR();
1074 CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
1076 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
1077 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
1078 compiler->skip_checks = 1;
1079 #endif
1080 return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w);
1083 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
1084 sljit_s32 src, sljit_sw srcw)
1086 CHECK_ERROR();
1087 CHECK(check_sljit_emit_op_src(compiler, op, src, srcw));
1088 ADJUST_LOCAL_OFFSET(src, srcw);
1090 switch (op) {
1091 case SLJIT_FAST_RETURN:
1092 if (FAST_IS_REG(src))
1093 FAIL_IF(push_inst(compiler, OR | D(TMP_LINK) | S1(0) | S2(src), DR(TMP_LINK)));
1094 else
1095 FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_LINK, src, srcw));
1097 FAIL_IF(push_inst(compiler, JMPL | D(0) | S1(TMP_LINK) | IMM(8), UNMOVABLE_INS));
1098 return push_inst(compiler, NOP, UNMOVABLE_INS);
1099 case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN:
1100 case SLJIT_PREFETCH_L1:
1101 case SLJIT_PREFETCH_L2:
1102 case SLJIT_PREFETCH_L3:
1103 case SLJIT_PREFETCH_ONCE:
1104 return SLJIT_SUCCESS;
1107 return SLJIT_SUCCESS;
1110 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
1112 CHECK_REG_INDEX(check_sljit_get_register_index(reg));
1113 return reg_map[reg];
1116 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
1118 CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
1119 return freg_map[reg];
1122 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
1123 void *instruction, sljit_s32 size)
1125 CHECK_ERROR();
1126 CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
1128 return push_inst(compiler, *(sljit_ins*)instruction, UNMOVABLE_INS);
1131 /* --------------------------------------------------------------------- */
1132 /* Floating point operators */
1133 /* --------------------------------------------------------------------- */
1135 #define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_32) >> 7))
1136 #define SELECT_FOP(op, single, double) ((op & SLJIT_32) ? single : double)
1137 #define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw))
1139 static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
1140 sljit_s32 dst, sljit_sw dstw,
1141 sljit_s32 src, sljit_sw srcw)
1143 if (src & SLJIT_MEM) {
1144 FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
1145 src = TMP_FREG1;
1148 FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOI, FDTOI) | FD(TMP_FREG1) | FS2(src), MOVABLE_INS));
1150 if (FAST_IS_REG(dst)) {
1151 FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
1152 return emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET);
1155 /* Store the integer value from a VFP register. */
1156 return emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, dst, dstw, 0, 0);
1159 static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
1160 sljit_s32 dst, sljit_sw dstw,
1161 sljit_s32 src, sljit_sw srcw)
1163 sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
1165 if (src & SLJIT_IMM) {
1166 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
1167 if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
1168 srcw = (sljit_s32)srcw;
1169 #endif
1170 FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
1171 src = TMP_REG1;
1172 srcw = 0;
1175 if (FAST_IS_REG(src)) {
1176 FAIL_IF(emit_op_mem2(compiler, WORD_DATA, src, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET));
1177 src = SLJIT_MEM1(SLJIT_SP);
1178 srcw = FLOAT_TMP_MEM_OFFSET;
1181 FAIL_IF(emit_op_mem2(compiler, SINGLE_DATA | LOAD_DATA, TMP_FREG1, src, srcw, dst, dstw));
1182 FAIL_IF(push_inst(compiler, SELECT_FOP(op, FITOS, FITOD) | FD(dst_r) | FS2(TMP_FREG1), MOVABLE_INS));
1184 if (dst & SLJIT_MEM)
1185 return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
1186 return SLJIT_SUCCESS;
1189 static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
1190 sljit_s32 src1, sljit_sw src1w,
1191 sljit_s32 src2, sljit_sw src2w)
1193 if (src1 & SLJIT_MEM) {
1194 FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
1195 src1 = TMP_FREG1;
1198 if (src2 & SLJIT_MEM) {
1199 FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0));
1200 src2 = TMP_FREG2;
1203 return push_inst(compiler, SELECT_FOP(op, FCMPS, FCMPD) | FS1(src1) | FS2(src2), FCC_IS_SET | MOVABLE_INS);
1206 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
1207 sljit_s32 dst, sljit_sw dstw,
1208 sljit_s32 src, sljit_sw srcw)
1210 sljit_s32 dst_r;
1212 CHECK_ERROR();
1213 compiler->cache_arg = 0;
1214 compiler->cache_argw = 0;
1216 SLJIT_COMPILE_ASSERT((SLJIT_32 == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
1217 SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
1219 if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
1220 op ^= SLJIT_32;
1222 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
1224 if (src & SLJIT_MEM) {
1225 FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_r, src, srcw, dst, dstw));
1226 src = dst_r;
1229 switch (GET_OPCODE(op)) {
1230 case SLJIT_MOV_F64:
1231 if (src != dst_r) {
1232 if (dst_r != TMP_FREG1) {
1233 FAIL_IF(push_inst(compiler, FMOVS | FD(dst_r) | FS2(src), MOVABLE_INS));
1234 if (!(op & SLJIT_32))
1235 FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
1237 else
1238 dst_r = src;
1240 break;
1241 case SLJIT_NEG_F64:
1242 FAIL_IF(push_inst(compiler, FNEGS | FD(dst_r) | FS2(src), MOVABLE_INS));
1243 if (dst_r != src && !(op & SLJIT_32))
1244 FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
1245 break;
1246 case SLJIT_ABS_F64:
1247 FAIL_IF(push_inst(compiler, FABSS | FD(dst_r) | FS2(src), MOVABLE_INS));
1248 if (dst_r != src && !(op & SLJIT_32))
1249 FAIL_IF(push_inst(compiler, FMOVS | FDN(dst_r) | FS2N(src), MOVABLE_INS));
1250 break;
1251 case SLJIT_CONV_F64_FROM_F32:
1252 FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | FD(dst_r) | FS2(src), MOVABLE_INS));
1253 op ^= SLJIT_32;
1254 break;
1257 if (dst & SLJIT_MEM)
1258 FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_r, dst, dstw, 0, 0));
1259 return SLJIT_SUCCESS;
1262 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
1263 sljit_s32 dst, sljit_sw dstw,
1264 sljit_s32 src1, sljit_sw src1w,
1265 sljit_s32 src2, sljit_sw src2w)
1267 sljit_s32 dst_r, flags = 0;
1269 CHECK_ERROR();
1270 CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
1271 ADJUST_LOCAL_OFFSET(dst, dstw);
1272 ADJUST_LOCAL_OFFSET(src1, src1w);
1273 ADJUST_LOCAL_OFFSET(src2, src2w);
1275 compiler->cache_arg = 0;
1276 compiler->cache_argw = 0;
1278 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG2;
1280 if (src1 & SLJIT_MEM) {
1281 if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) {
1282 FAIL_IF(compiler->error);
1283 src1 = TMP_FREG1;
1284 } else
1285 flags |= SLOW_SRC1;
1288 if (src2 & SLJIT_MEM) {
1289 if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) {
1290 FAIL_IF(compiler->error);
1291 src2 = TMP_FREG2;
1292 } else
1293 flags |= SLOW_SRC2;
1296 if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) {
1297 if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) {
1298 FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w));
1299 FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
1301 else {
1302 FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w));
1303 FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
1306 else if (flags & SLOW_SRC1)
1307 FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw));
1308 else if (flags & SLOW_SRC2)
1309 FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw));
1311 if (flags & SLOW_SRC1)
1312 src1 = TMP_FREG1;
1313 if (flags & SLOW_SRC2)
1314 src2 = TMP_FREG2;
1316 switch (GET_OPCODE(op)) {
1317 case SLJIT_ADD_F64:
1318 FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
1319 break;
1321 case SLJIT_SUB_F64:
1322 FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
1323 break;
1325 case SLJIT_MUL_F64:
1326 FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
1327 break;
1329 case SLJIT_DIV_F64:
1330 FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | FD(dst_r) | FS1(src1) | FS2(src2), MOVABLE_INS));
1331 break;
1334 if (dst_r == TMP_FREG2)
1335 FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0));
1337 return SLJIT_SUCCESS;
1340 #undef FLOAT_DATA
1341 #undef SELECT_FOP
1343 /* --------------------------------------------------------------------- */
1344 /* Other instructions */
1345 /* --------------------------------------------------------------------- */
1347 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
1349 CHECK_ERROR();
1350 CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
1351 ADJUST_LOCAL_OFFSET(dst, dstw);
1353 if (FAST_IS_REG(dst))
1354 return push_inst(compiler, OR | D(dst) | S1(0) | S2(TMP_LINK), UNMOVABLE_INS);
1356 /* Memory. */
1357 FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_LINK, dst, dstw));
1358 compiler->delay_slot = UNMOVABLE_INS;
1359 return SLJIT_SUCCESS;
1362 /* --------------------------------------------------------------------- */
1363 /* Conditional instructions */
1364 /* --------------------------------------------------------------------- */
1366 SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
1368 struct sljit_label *label;
1370 CHECK_ERROR_PTR();
1371 CHECK_PTR(check_sljit_emit_label(compiler));
1373 if (compiler->last_label && compiler->last_label->size == compiler->size)
1374 return compiler->last_label;
1376 label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label));
1377 PTR_FAIL_IF(!label);
1378 set_label(label, compiler);
1379 compiler->delay_slot = UNMOVABLE_INS;
1380 return label;
1383 static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
1385 switch (type) {
1386 case SLJIT_EQUAL:
1387 case SLJIT_NOT_EQUAL_F64: /* Unordered. */
1388 return DA(0x1);
1390 case SLJIT_NOT_EQUAL:
1391 case SLJIT_EQUAL_F64:
1392 return DA(0x9);
1394 case SLJIT_LESS:
1395 case SLJIT_GREATER_F64: /* Unordered. */
1396 return DA(0x5);
1398 case SLJIT_GREATER_EQUAL:
1399 case SLJIT_LESS_EQUAL_F64:
1400 return DA(0xd);
1402 case SLJIT_GREATER:
1403 case SLJIT_GREATER_EQUAL_F64: /* Unordered. */
1404 return DA(0xc);
1406 case SLJIT_LESS_EQUAL:
1407 case SLJIT_LESS_F64:
1408 return DA(0x4);
1410 case SLJIT_SIG_LESS:
1411 return DA(0x3);
1413 case SLJIT_SIG_GREATER_EQUAL:
1414 return DA(0xb);
1416 case SLJIT_SIG_GREATER:
1417 return DA(0xa);
1419 case SLJIT_SIG_LESS_EQUAL:
1420 return DA(0x2);
1422 case SLJIT_OVERFLOW:
1423 if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
1424 return DA(0x9);
1426 case SLJIT_UNORDERED_F64:
1427 return DA(0x7);
1429 case SLJIT_NOT_OVERFLOW:
1430 if (!(compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD_SUB))
1431 return DA(0x1);
1433 case SLJIT_ORDERED_F64:
1434 return DA(0xf);
1436 default:
1437 SLJIT_UNREACHABLE();
1438 return DA(0x8);
1442 SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
1444 struct sljit_jump *jump;
1446 CHECK_ERROR_PTR();
1447 CHECK_PTR(check_sljit_emit_jump(compiler, type));
1449 jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
1450 PTR_FAIL_IF(!jump);
1451 set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
1452 type &= 0xff;
1454 if (type < SLJIT_EQUAL_F64) {
1455 jump->flags |= IS_COND;
1456 if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & ICC_IS_SET))
1457 jump->flags |= IS_MOVABLE;
1458 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1459 PTR_FAIL_IF(push_inst(compiler, BICC | get_cc(compiler, type ^ 1) | 5, UNMOVABLE_INS));
1460 #else
1461 #error "Implementation required"
1462 #endif
1464 else if (type < SLJIT_JUMP) {
1465 jump->flags |= IS_COND;
1466 if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & FCC_IS_SET))
1467 jump->flags |= IS_MOVABLE;
1468 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1469 PTR_FAIL_IF(push_inst(compiler, FBFCC | get_cc(compiler, type ^ 1) | 5, UNMOVABLE_INS));
1470 #else
1471 #error "Implementation required"
1472 #endif
1474 else {
1475 if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS)
1476 jump->flags |= IS_MOVABLE;
1477 if (type >= SLJIT_FAST_CALL)
1478 jump->flags |= IS_CALL;
1481 PTR_FAIL_IF(emit_const(compiler, TMP_REG1, 0));
1482 PTR_FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(TMP_REG1) | IMM(0), UNMOVABLE_INS));
1483 jump->addr = compiler->size;
1484 PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
1486 return jump;
1489 SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
1490 sljit_s32 arg_types)
1492 CHECK_ERROR_PTR();
1493 CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
1495 PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
1497 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
1498 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
1499 compiler->skip_checks = 1;
1500 #endif
1502 return sljit_emit_jump(compiler, type);
1505 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
1507 struct sljit_jump *jump = NULL;
1508 sljit_s32 src_r;
1510 CHECK_ERROR();
1511 CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
1512 ADJUST_LOCAL_OFFSET(src, srcw);
1514 if (FAST_IS_REG(src))
1515 src_r = src;
1516 else if (src & SLJIT_IMM) {
1517 jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
1518 FAIL_IF(!jump);
1519 set_jump(jump, compiler, JUMP_ADDR);
1520 jump->u.target = srcw;
1522 if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS)
1523 jump->flags |= IS_MOVABLE;
1524 if (type >= SLJIT_FAST_CALL)
1525 jump->flags |= IS_CALL;
1527 FAIL_IF(emit_const(compiler, TMP_REG1, 0));
1528 src_r = TMP_REG1;
1530 else {
1531 FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
1532 src_r = TMP_REG1;
1535 FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? TMP_LINK : 0) | S1(src_r) | IMM(0), UNMOVABLE_INS));
1536 if (jump)
1537 jump->addr = compiler->size;
1538 return push_inst(compiler, NOP, UNMOVABLE_INS);
1541 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
1542 sljit_s32 arg_types,
1543 sljit_s32 src, sljit_sw srcw)
1545 CHECK_ERROR();
1546 CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
1548 if (src & SLJIT_MEM) {
1549 ADJUST_LOCAL_OFFSET(src, srcw);
1550 FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw));
1551 src = TMP_REG1;
1554 FAIL_IF(call_with_args(compiler, arg_types, &src));
1556 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
1557 || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
1558 compiler->skip_checks = 1;
1559 #endif
1561 return sljit_emit_ijump(compiler, type, src, srcw);
1564 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
1565 sljit_s32 dst, sljit_sw dstw,
1566 sljit_s32 type)
1568 sljit_s32 reg, flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
1570 CHECK_ERROR();
1571 CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
1572 ADJUST_LOCAL_OFFSET(dst, dstw);
1574 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1575 op = GET_OPCODE(op);
1576 reg = (op < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG2;
1578 compiler->cache_arg = 0;
1579 compiler->cache_argw = 0;
1581 if (op >= SLJIT_ADD && (dst & SLJIT_MEM))
1582 FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, dst, dstw, dst, dstw));
1584 type &= 0xff;
1585 if (type < SLJIT_EQUAL_F64)
1586 FAIL_IF(push_inst(compiler, BICC | get_cc(compiler, type) | 3, UNMOVABLE_INS));
1587 else
1588 FAIL_IF(push_inst(compiler, FBFCC | get_cc(compiler, type) | 3, UNMOVABLE_INS));
1590 FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(1), UNMOVABLE_INS));
1591 FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(0), UNMOVABLE_INS));
1593 if (op >= SLJIT_ADD) {
1594 flags |= CUMULATIVE_OP | IMM_OP | ALT_KEEP_CACHE;
1595 if (dst & SLJIT_MEM)
1596 return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, TMP_REG2, 0);
1597 return emit_op(compiler, op, flags, dst, 0, dst, 0, TMP_REG2, 0);
1600 if (!(dst & SLJIT_MEM))
1601 return SLJIT_SUCCESS;
1603 return emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw);
1604 #else
1605 #error "Implementation required"
1606 #endif
1609 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
1610 sljit_s32 dst_reg,
1611 sljit_s32 src, sljit_sw srcw)
1613 CHECK_ERROR();
1614 CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
1616 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
1617 return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);;
1618 #else
1619 #error "Implementation required"
1620 #endif
1623 SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
1625 struct sljit_const *const_;
1626 sljit_s32 dst_r;
1628 CHECK_ERROR_PTR();
1629 CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
1630 ADJUST_LOCAL_OFFSET(dst, dstw);
1632 const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
1633 PTR_FAIL_IF(!const_);
1634 set_const(const_, compiler);
1636 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
1637 PTR_FAIL_IF(emit_const(compiler, dst_r, init_value));
1639 if (dst & SLJIT_MEM)
1640 PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw));
1641 return const_;
1644 SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
1646 struct sljit_put_label *put_label;
1647 sljit_s32 dst_r;
1649 CHECK_ERROR_PTR();
1650 CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw));
1651 ADJUST_LOCAL_OFFSET(dst, dstw);
1653 put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label));
1654 PTR_FAIL_IF(!put_label);
1655 set_put_label(put_label, compiler, 0);
1657 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
1658 PTR_FAIL_IF(emit_const(compiler, dst_r, 0));
1660 if (dst & SLJIT_MEM)
1661 PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw));
1662 return put_label;