2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
35 #include "cpu-sparc.h"
36 #include "jit-icalls.h"
40 * Sparc V9 means two things:
41 * - the instruction set
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
89 * - struct arguments/returns
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
118 #define MONO_SPARC_THR_TLS 1
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9
= FALSE
;
156 /* Whenever this is a 64bit executable */
158 static gboolean v64
= TRUE
;
160 static gboolean v64
= FALSE
;
163 static gpointer
mono_arch_get_lmf_addr (void);
166 mono_arch_regname (int reg
) {
167 static const char * rnames
[] = {
168 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
169 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
170 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
171 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
172 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
173 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
174 "sparc_fp", "sparc_retadr"
176 if (reg
>= 0 && reg
< 32)
182 mono_arch_fregname (int reg
) {
183 static const char *rnames
[] = {
184 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
185 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
186 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
187 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
188 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
189 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
190 "sparc_f30", "sparc_f31"
193 if (reg
>= 0 && reg
< 32)
200 * Initialize the cpu to execute managed code.
203 mono_arch_cpu_init (void)
206 /* make sure sparcv9 is initialized for embedded use */
207 mono_arch_cpu_optimizazions(&dummy
);
211 * Initialize architecture specific code.
214 mono_arch_init (void)
219 * Cleanup architecture specific code.
222 mono_arch_cleanup (void)
227 * This function returns the optimizations supported on this cpu.
230 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
238 if (!sysinfo (SI_ISALIST
, buf
, 1024))
239 g_assert_not_reached ();
241 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
242 * (in)directly implies that we're a v9 or better.
243 * Improvements to this are greatly accepted...
244 * Also, we don't differentiate between v7 and v8. I sense SIGILL
245 * sniffing in my future.
247 if (getpagesize() == 8192)
248 strcpy (buf
, "sparcv9");
250 strcpy (buf
, "sparcv8");
254 * On some processors, the cmov instructions are even slower than the
257 if (strstr (buf
, "sparcv9")) {
258 opts
|= MONO_OPT_CMOV
| MONO_OPT_FCMOV
;
262 *exclude_mask
|= MONO_OPT_CMOV
| MONO_OPT_FCMOV
;
268 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
269 #else /* assume Sun's compiler */
270 static void flushi(void *addr
)
277 void sync_instruction_memory(caddr_t addr
, int len
);
281 mono_arch_flush_icache (guint8
*code
, gint size
)
284 /* Hopefully this is optimized based on the actual CPU */
285 sync_instruction_memory (code
, size
);
287 gulong start
= (gulong
) code
;
288 gulong end
= start
+ size
;
291 /* Sparcv9 chips only need flushes on 32 byte
292 * cacheline boundaries.
294 * Sparcv8 needs a flush every 8 bytes.
296 align
= (sparcv9
? 32 : 8);
298 start
&= ~(align
- 1);
299 end
= (end
+ (align
- 1)) & ~(align
- 1);
301 while (start
< end
) {
303 __asm__
__volatile__ ("iflush %0"::"r"(start
));
315 * Flush all register windows to memory. Every register window is saved to
316 * a 16 word area on the stack pointed to by its %sp register.
319 mono_sparc_flushw (void)
321 static guint32 start
[64];
322 static int inited
= 0;
324 static void (*flushw
) (void);
329 sparc_save_imm (code
, sparc_sp
, -160, sparc_sp
);
332 sparc_restore_simple (code
);
334 g_assert ((code
- start
) < 64);
336 mono_arch_flush_icache ((guint8
*)start
, (guint8
*)code
- (guint8
*)start
);
338 flushw
= (gpointer
)start
;
347 mono_arch_flush_register_windows (void)
349 mono_sparc_flushw ();
353 mono_arch_is_inst_imm (gint64 imm
)
355 return sparc_is_imm13 (imm
);
359 mono_sparc_is_v9 (void) {
364 mono_sparc_is_sparc64 (void) {
376 ArgInFloatReg
, /* V9 only */
377 ArgInDoubleReg
/* V9 only */
382 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
385 guint32 vt_offset
; /* for valuetypes */
403 add_general (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean pair
)
405 ainfo
->offset
= *stack_size
;
408 if (*gr
>= PARAM_REGS
) {
409 ainfo
->storage
= ArgOnStack
;
412 ainfo
->storage
= ArgInIReg
;
417 /* Allways reserve stack space for parameters passed in registers */
418 (*stack_size
) += sizeof (gpointer
);
421 if (*gr
< PARAM_REGS
- 1) {
422 /* A pair of registers */
423 ainfo
->storage
= ArgInIRegPair
;
427 else if (*gr
>= PARAM_REGS
) {
428 /* A pair of stack locations */
429 ainfo
->storage
= ArgOnStackPair
;
432 ainfo
->storage
= ArgInSplitRegStack
;
437 (*stack_size
) += 2 * sizeof (gpointer
);
443 #define FLOAT_PARAM_REGS 32
446 add_float (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean single
)
448 ainfo
->offset
= *stack_size
;
451 if (*gr
>= FLOAT_PARAM_REGS
) {
452 ainfo
->storage
= ArgOnStack
;
455 /* A single is passed in an even numbered fp register */
456 ainfo
->storage
= ArgInFloatReg
;
457 ainfo
->reg
= *gr
+ 1;
462 if (*gr
< FLOAT_PARAM_REGS
) {
463 /* A double register */
464 ainfo
->storage
= ArgInDoubleReg
;
469 ainfo
->storage
= ArgOnStack
;
473 (*stack_size
) += sizeof (gpointer
);
481 * Obtain information about a call according to the calling convention.
482 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
483 * document for more information.
484 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
485 * the 'Sparc Compliance Definition 2.4' document.
488 get_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
491 int n
= sig
->hasthis
+ sig
->param_count
;
492 guint32 stack_size
= 0;
495 MonoGenericSharingContext
*gsctx
= cfg
? cfg
->generic_sharing_context
: NULL
;
497 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
503 if (MONO_TYPE_ISSTRUCT ((sig
->ret
))) {
504 /* The address of the return value is passed in %o0 */
505 add_general (&gr
, &stack_size
, &cinfo
->ret
, FALSE
);
506 cinfo
->ret
.reg
+= sparc_i0
;
512 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, FALSE
);
514 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
517 /* Emit the signature cookie just before the implicit arguments */
518 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
521 for (i
= 0; i
< sig
->param_count
; ++i
) {
522 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
525 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
528 /* Emit the signature cookie just before the implicit arguments */
529 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
532 DEBUG(printf("param %d: ", i
));
533 if (sig
->params
[i
]->byref
) {
534 DEBUG(printf("byref\n"));
536 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
539 ptype
= mono_type_get_underlying_type (sig
->params
[i
]);
540 ptype
= mini_get_basic_type_from_generic (gsctx
, ptype
);
541 switch (ptype
->type
) {
542 case MONO_TYPE_BOOLEAN
:
545 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
546 /* the value is in the ls byte */
547 ainfo
->offset
+= sizeof (gpointer
) - 1;
552 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
553 /* the value is in the ls word */
554 ainfo
->offset
+= sizeof (gpointer
) - 2;
558 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
559 /* the value is in the ls dword */
560 ainfo
->offset
+= sizeof (gpointer
) - 4;
565 case MONO_TYPE_FNPTR
:
566 case MONO_TYPE_CLASS
:
567 case MONO_TYPE_OBJECT
:
568 case MONO_TYPE_STRING
:
569 case MONO_TYPE_SZARRAY
:
570 case MONO_TYPE_ARRAY
:
571 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
573 case MONO_TYPE_GENERICINST
:
574 if (!mono_type_generic_inst_is_valuetype (sig
->params
[i
])) {
575 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
579 case MONO_TYPE_VALUETYPE
:
584 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
586 case MONO_TYPE_TYPEDBYREF
:
587 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
592 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
594 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
599 add_float (&fr
, &stack_size
, ainfo
, TRUE
);
602 /* single precision values are passed in integer registers */
603 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
608 add_float (&fr
, &stack_size
, ainfo
, FALSE
);
611 /* double precision values are passed in a pair of registers */
612 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
616 g_assert_not_reached ();
620 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
623 /* Emit the signature cookie just before the implicit arguments */
624 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
628 ret_type
= mono_type_get_underlying_type (sig
->ret
);
629 ret_type
= mini_get_basic_type_from_generic (gsctx
, ret_type
);
630 switch (ret_type
->type
) {
631 case MONO_TYPE_BOOLEAN
:
642 case MONO_TYPE_FNPTR
:
643 case MONO_TYPE_CLASS
:
644 case MONO_TYPE_OBJECT
:
645 case MONO_TYPE_SZARRAY
:
646 case MONO_TYPE_ARRAY
:
647 case MONO_TYPE_STRING
:
648 cinfo
->ret
.storage
= ArgInIReg
;
649 cinfo
->ret
.reg
= sparc_i0
;
656 cinfo
->ret
.storage
= ArgInIReg
;
657 cinfo
->ret
.reg
= sparc_i0
;
661 cinfo
->ret
.storage
= ArgInIRegPair
;
662 cinfo
->ret
.reg
= sparc_i0
;
669 cinfo
->ret
.storage
= ArgInFReg
;
670 cinfo
->ret
.reg
= sparc_f0
;
672 case MONO_TYPE_GENERICINST
:
673 if (!mono_type_generic_inst_is_valuetype (sig
->ret
)) {
674 cinfo
->ret
.storage
= ArgInIReg
;
675 cinfo
->ret
.reg
= sparc_i0
;
681 case MONO_TYPE_VALUETYPE
:
690 cinfo
->ret
.storage
= ArgOnStack
;
692 case MONO_TYPE_TYPEDBYREF
:
695 /* Same as a valuetype with size 24 */
702 cinfo
->ret
.storage
= ArgOnStack
;
707 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
710 cinfo
->stack_usage
= stack_size
;
711 cinfo
->reg_usage
= gr
;
716 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
722 * FIXME: If an argument is allocated to a register, then load it from the
723 * stack in the prolog.
726 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
727 MonoInst
*ins
= cfg
->varinfo
[i
];
728 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
731 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
734 /* FIXME: Make arguments on stack allocateable to registers */
735 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
== OP_REGVAR
) || (ins
->opcode
== OP_ARG
))
738 if (mono_is_regsize_var (ins
->inst_vtype
)) {
739 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
740 g_assert (i
== vmv
->idx
);
742 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
750 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
754 MonoMethodSignature
*sig
;
757 sig
= mono_method_signature (cfg
->method
);
759 cinfo
= get_call_info (cfg
, sig
, FALSE
);
761 /* Use unused input registers */
762 for (i
= cinfo
->reg_usage
; i
< 6; ++i
)
763 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (sparc_i0
+ i
));
765 /* Use %l0..%l6 as global registers */
766 for (i
= sparc_l0
; i
< sparc_l7
; ++i
)
767 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
775 * mono_arch_regalloc_cost:
777 * Return the cost, in number of memory references, of the action of
778 * allocating the variable VMV into a register during global register
782 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
788 * Set var information according to the calling convention. sparc version.
789 * The locals var stuff should most likely be split in another method.
793 mono_arch_allocate_vars (MonoCompile
*cfg
)
795 MonoMethodSignature
*sig
;
796 MonoMethodHeader
*header
;
798 int i
, offset
, size
, align
, curinst
;
801 header
= mono_method_get_header (cfg
->method
);
803 sig
= mono_method_signature (cfg
->method
);
805 cinfo
= get_call_info (cfg
, sig
, FALSE
);
807 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
808 switch (cinfo
->ret
.storage
) {
811 cfg
->ret
->opcode
= OP_REGVAR
;
812 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
815 if (((sig
->ret
->type
== MONO_TYPE_I8
) || (sig
->ret
->type
== MONO_TYPE_U8
))) {
816 MonoInst
*low
= get_vreg_to_inst (cfg
, cfg
->ret
->dreg
+ 1);
817 MonoInst
*high
= get_vreg_to_inst (cfg
, cfg
->ret
->dreg
+ 2);
819 low
->opcode
= OP_REGVAR
;
820 low
->dreg
= cinfo
->ret
.reg
+ 1;
821 high
->opcode
= OP_REGVAR
;
822 high
->dreg
= cinfo
->ret
.reg
;
824 cfg
->ret
->opcode
= OP_REGVAR
;
825 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
829 g_assert_not_reached ();
832 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
833 cfg
->vret_addr
->inst_basereg
= sparc_fp
;
834 cfg
->vret_addr
->inst_offset
= 64;
840 cfg
->ret
->dreg
= cfg
->ret
->inst_c0
;
844 * We use the ABI calling conventions for managed code as well.
845 * Exception: valuetypes are never returned in registers on V9.
846 * FIXME: Use something more optimized.
849 /* Locals are allocated backwards from %fp */
850 cfg
->frame_reg
= sparc_fp
;
854 * Reserve a stack slot for holding information used during exception
857 if (header
->num_clauses
)
858 offset
+= sizeof (gpointer
) * 2;
860 if (cfg
->method
->save_lmf
) {
861 offset
+= sizeof (MonoLMF
);
862 cfg
->arch
.lmf_offset
= offset
;
865 curinst
= cfg
->locals_start
;
866 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
867 inst
= cfg
->varinfo
[i
];
869 if ((inst
->opcode
== OP_REGVAR
) || (inst
->opcode
== OP_REGOFFSET
)) {
870 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
874 if (inst
->flags
& MONO_INST_IS_DEAD
)
877 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
878 * pinvoke wrappers when they call functions returning structure */
879 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
)
880 size
= mono_class_native_size (inst
->inst_vtype
->data
.klass
, &align
);
882 size
= mini_type_stack_size (cfg
->generic_sharing_context
, inst
->inst_vtype
, &align
);
885 * This is needed since structures containing doubles must be doubleword
887 * FIXME: Do this only if needed.
889 if (MONO_TYPE_ISSTRUCT (inst
->inst_vtype
))
893 * variables are accessed as negative offsets from %fp, so increase
894 * the offset before assigning it to a variable
899 offset
&= ~(align
- 1);
900 inst
->opcode
= OP_REGOFFSET
;
901 inst
->inst_basereg
= sparc_fp
;
902 inst
->inst_offset
= STACK_BIAS
+ -offset
;
904 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
907 if (sig
->call_convention
== MONO_CALL_VARARG
) {
908 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
911 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
912 inst
= cfg
->args
[i
];
913 if (inst
->opcode
!= OP_REGVAR
) {
914 ArgInfo
*ainfo
= &cinfo
->args
[i
];
915 gboolean inreg
= TRUE
;
919 if (sig
->hasthis
&& (i
== 0))
920 arg_type
= &mono_defaults
.object_class
->byval_arg
;
922 arg_type
= sig
->params
[i
- sig
->hasthis
];
925 if (!arg_type
->byref
&& ((arg_type
->type
== MONO_TYPE_R4
)
926 || (arg_type
->type
== MONO_TYPE_R8
)))
928 * Since float arguments are passed in integer registers, we need to
929 * save them to the stack in the prolog.
934 /* FIXME: Allocate volatile arguments to registers */
935 /* FIXME: This makes the argument holding a vtype address into volatile */
936 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
939 if (MONO_TYPE_ISSTRUCT (arg_type
))
940 /* FIXME: this isn't needed */
943 inst
->opcode
= OP_REGOFFSET
;
946 storage
= ArgOnStack
;
948 storage
= ainfo
->storage
;
952 inst
->opcode
= OP_REGVAR
;
953 inst
->dreg
= sparc_i0
+ ainfo
->reg
;
956 if (inst
->type
== STACK_I8
) {
957 MonoInst
*low
= get_vreg_to_inst (cfg
, inst
->dreg
+ 1);
958 MonoInst
*high
= get_vreg_to_inst (cfg
, inst
->dreg
+ 2);
960 low
->opcode
= OP_REGVAR
;
961 low
->dreg
= sparc_i0
+ ainfo
->reg
+ 1;
962 high
->opcode
= OP_REGVAR
;
963 high
->dreg
= sparc_i0
+ ainfo
->reg
;
965 inst
->opcode
= OP_REGVAR
;
966 inst
->dreg
= sparc_i0
+ ainfo
->reg
;
971 * Since float regs are volatile, we save the arguments to
972 * the stack in the prolog.
973 * FIXME: Avoid this if the method contains no calls.
977 case ArgInSplitRegStack
:
978 /* Split arguments are saved to the stack in the prolog */
979 inst
->opcode
= OP_REGOFFSET
;
980 /* in parent frame */
981 inst
->inst_basereg
= sparc_fp
;
982 inst
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
984 if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
986 * It is very hard to load doubles from non-doubleword aligned
987 * memory locations. So if the offset is misaligned, we copy the
988 * argument to a stack location in the prolog.
990 if ((inst
->inst_offset
- STACK_BIAS
) % 8) {
991 inst
->inst_basereg
= sparc_fp
;
995 offset
&= ~(align
- 1);
996 inst
->inst_offset
= STACK_BIAS
+ -offset
;
1005 if (MONO_TYPE_ISSTRUCT (arg_type
)) {
1006 /* Add a level of indirection */
1008 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1009 * are destructively modified in a lot of places in inssel.brg.
1012 MONO_INST_NEW (cfg
, indir
, 0);
1014 inst
->opcode
= OP_VTARG_ADDR
;
1015 inst
->inst_left
= indir
;
1020 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1022 offset
= ALIGN_TO (offset
, 8);
1023 cfg
->arch
.float_spill_slot_offset
= offset
;
1026 * spillvars are stored between the normal locals and the storage reserved
1030 cfg
->stack_offset
= offset
;
1036 mono_arch_create_vars (MonoCompile
*cfg
)
1038 MonoMethodSignature
*sig
;
1040 sig
= mono_method_signature (cfg
->method
);
1042 if (MONO_TYPE_ISSTRUCT ((sig
->ret
))) {
1043 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1044 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1045 printf ("vret_addr = ");
1046 mono_print_ins (cfg
->vret_addr
);
1052 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, guint32 sreg
)
1056 MONO_INST_NEW (cfg
, arg
, 0);
1062 arg
->opcode
= OP_MOVE
;
1063 arg
->dreg
= mono_alloc_ireg (cfg
);
1065 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, FALSE
);
1068 arg
->opcode
= OP_FMOVE
;
1069 arg
->dreg
= mono_alloc_freg (cfg
);
1071 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, TRUE
);
1074 g_assert_not_reached ();
1077 MONO_ADD_INS (cfg
->cbb
, arg
);
1081 add_outarg_load (MonoCompile
*cfg
, MonoCallInst
*call
, int opcode
, int basereg
, int offset
, int reg
)
1084 int dreg
= mono_alloc_ireg (cfg
);
1086 EMIT_NEW_LOAD_MEMBASE (cfg
, arg
, OP_LOAD_MEMBASE
, dreg
, sparc_sp
, offset
);
1087 MONO_ADD_INS (cfg
->cbb
, arg
);
1089 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, reg
, FALSE
);
1093 emit_pass_long (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1095 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1097 switch (ainfo
->storage
) {
1099 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
+ 1, in
->dreg
+ 1);
1100 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, in
->dreg
+ 2);
1102 case ArgOnStackPair
:
1103 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
+ 2);
1104 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
+ 4, in
->dreg
+ 1);
1106 case ArgInSplitRegStack
:
1107 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, in
->dreg
+ 2);
1108 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
+ 4, in
->dreg
+ 1);
1111 g_assert_not_reached ();
1116 emit_pass_double (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1118 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1120 switch (ainfo
->storage
) {
1122 /* floating-point <-> integer transfer must go through memory */
1123 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1125 /* Load into a register pair */
1126 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1127 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
+ 4, sparc_o0
+ ainfo
->reg
+ 1);
1129 case ArgOnStackPair
:
1130 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1132 case ArgInSplitRegStack
:
1133 /* floating-point <-> integer transfer must go through memory */
1134 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1135 /* Load most significant word into register */
1136 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1139 g_assert_not_reached ();
1144 emit_pass_float (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1146 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1148 switch (ainfo
->storage
) {
1150 /* floating-point <-> integer transfer must go through memory */
1151 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1152 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1155 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1158 g_assert_not_reached ();
1163 emit_pass_other (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
);
1166 emit_pass_vtype (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
, gboolean pinvoke
)
1169 guint32 align
, offset
, pad
, size
;
1171 if (arg_type
->type
== MONO_TYPE_TYPEDBYREF
) {
1172 size
= sizeof (MonoTypedRef
);
1173 align
= sizeof (gpointer
);
1176 size
= mono_type_native_stack_size (&in
->klass
->byval_arg
, &align
);
1179 * Other backends use mono_type_stack_size (), but that
1180 * aligns the size to 8, which is larger than the size of
1181 * the source, leading to reads of invalid memory if the
1182 * source is at the end of address space.
1184 size
= mono_class_value_size (in
->klass
, &align
);
1187 /* The first 6 argument locations are reserved */
1188 if (cinfo
->stack_usage
< 6 * sizeof (gpointer
))
1189 cinfo
->stack_usage
= 6 * sizeof (gpointer
);
1191 offset
= ALIGN_TO ((ARGS_OFFSET
- STACK_BIAS
) + cinfo
->stack_usage
, align
);
1192 pad
= offset
- ((ARGS_OFFSET
- STACK_BIAS
) + cinfo
->stack_usage
);
1194 cinfo
->stack_usage
+= size
;
1195 cinfo
->stack_usage
+= pad
;
1198 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1199 * use the normal OUTARG opcodes to pass the address of the location to
1203 MONO_INST_NEW (cfg
, arg
, OP_OUTARG_VT
);
1204 arg
->sreg1
= in
->dreg
;
1205 arg
->klass
= in
->klass
;
1206 arg
->backend
.size
= size
;
1207 arg
->inst_p0
= call
;
1208 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1209 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1210 ((ArgInfo
*)(arg
->inst_p1
))->offset
= STACK_BIAS
+ offset
;
1211 MONO_ADD_INS (cfg
->cbb
, arg
);
1213 MONO_INST_NEW (cfg
, arg
, OP_ADD_IMM
);
1214 arg
->dreg
= mono_alloc_preg (cfg
);
1215 arg
->sreg1
= sparc_sp
;
1216 arg
->inst_imm
= STACK_BIAS
+ offset
;
1217 MONO_ADD_INS (cfg
->cbb
, arg
);
1219 emit_pass_other (cfg
, call
, ainfo
, NULL
, arg
);
1224 emit_pass_other (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
)
1226 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1229 switch (ainfo
->storage
) {
1231 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, in
->dreg
);
1238 opcode
= OP_STOREI1_MEMBASE_REG
;
1239 else if (offset
& 0x2)
1240 opcode
= OP_STOREI2_MEMBASE_REG
;
1242 opcode
= OP_STOREI4_MEMBASE_REG
;
1243 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, opcode
, sparc_sp
, offset
, in
->dreg
);
1247 g_assert_not_reached ();
1252 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1254 MonoMethodSignature
*tmp_sig
;
1257 * mono_ArgIterator_Setup assumes the signature cookie is
1258 * passed first and all the arguments which were before it are
1259 * passed on the stack after the signature. So compensate by
1260 * passing a different signature.
1262 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1263 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1264 tmp_sig
->sentinelpos
= 0;
1265 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1267 /* FIXME: Add support for signature tokens to AOT */
1268 cfg
->disable_aot
= TRUE
;
1269 /* We allways pass the signature on the stack for simplicity */
1270 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sparc_sp
, ARGS_OFFSET
+ cinfo
->sig_cookie
.offset
, tmp_sig
);
1274 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1277 MonoMethodSignature
*sig
;
1281 guint32 extra_space
= 0;
1283 sig
= call
->signature
;
1284 n
= sig
->param_count
+ sig
->hasthis
;
1286 cinfo
= get_call_info (cfg
, sig
, sig
->pinvoke
);
1288 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1289 /* Set the 'struct/union return pointer' location on the stack */
1290 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, 64, call
->vret_var
->dreg
);
1293 for (i
= 0; i
< n
; ++i
) {
1296 ainfo
= cinfo
->args
+ i
;
1298 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1299 /* Emit the signature cookie just before the first implicit argument */
1300 emit_sig_cookie (cfg
, call
, cinfo
);
1303 in
= call
->args
[i
];
1305 if (sig
->hasthis
&& (i
== 0))
1306 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1308 arg_type
= sig
->params
[i
- sig
->hasthis
];
1310 if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(sig
->params
[i
- sig
->hasthis
])))
1311 emit_pass_vtype (cfg
, call
, cinfo
, ainfo
, arg_type
, in
, sig
->pinvoke
);
1312 else if (!arg_type
->byref
&& ((arg_type
->type
== MONO_TYPE_I8
) || (arg_type
->type
== MONO_TYPE_U8
)))
1313 emit_pass_long (cfg
, call
, ainfo
, in
);
1314 else if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
))
1315 emit_pass_double (cfg
, call
, ainfo
, in
);
1316 else if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R4
))
1317 emit_pass_float (cfg
, call
, ainfo
, in
);
1319 emit_pass_other (cfg
, call
, ainfo
, arg_type
, in
);
1322 /* Handle the case where there are no implicit arguments */
1323 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
)) {
1324 emit_sig_cookie (cfg
, call
, cinfo
);
1327 call
->stack_usage
= cinfo
->stack_usage
+ extra_space
;
1333 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1335 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
1336 int size
= ins
->backend
.size
;
1338 mini_emit_memcpy (cfg
, sparc_sp
, ainfo
->offset
, src
->dreg
, 0, size
, 0);
1342 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1344 CallInfo
*cinfo
= get_call_info (cfg
, mono_method_signature (method
), FALSE
);
1346 switch (cinfo
->ret
.storage
) {
1348 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1351 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
+ 2);
1352 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
+ 1, val
->dreg
+ 1);
1355 if (mono_method_signature (method
)->ret
->type
== MONO_TYPE_R4
)
1356 MONO_EMIT_NEW_UNALU (cfg
, OP_SETFRET
, cfg
->ret
->dreg
, val
->dreg
);
1358 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1361 g_assert_not_reached ();
1367 int cond_to_sparc_cond
[][3] = {
1368 {sparc_be
, sparc_be
, sparc_fbe
},
1369 {sparc_bne
, sparc_bne
, 0},
1370 {sparc_ble
, sparc_ble
, sparc_fble
},
1371 {sparc_bge
, sparc_bge
, sparc_fbge
},
1372 {sparc_bl
, sparc_bl
, sparc_fbl
},
1373 {sparc_bg
, sparc_bg
, sparc_fbg
},
1374 {sparc_bleu
, sparc_bleu
, 0},
1375 {sparc_beu
, sparc_beu
, 0},
1376 {sparc_blu
, sparc_blu
, sparc_fbl
},
1377 {sparc_bgu
, sparc_bgu
, sparc_fbg
}
1380 /* Map opcode to the sparc condition codes */
1381 static inline SparcCond
1382 opcode_to_sparc_cond (int opcode
)
1388 case OP_COND_EXC_OV
:
1389 case OP_COND_EXC_IOV
:
1392 case OP_COND_EXC_IC
:
1394 case OP_COND_EXC_NO
:
1395 case OP_COND_EXC_NC
:
1398 rel
= mono_opcode_to_cond (opcode
);
1399 t
= mono_opcode_to_type (opcode
, -1);
1401 return cond_to_sparc_cond
[rel
][t
];
1408 #define COMPUTE_DISP(ins) \
1409 if (ins->flags & MONO_INST_BRLABEL) { \
1410 if (ins->inst_i0->inst_c0) \
1411 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1414 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1417 if (ins->inst_true_bb->native_offset) \
1418 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1421 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1426 #define DEFAULT_ICC sparc_xcc_short
1428 #define DEFAULT_ICC sparc_icc_short
1432 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1436 COMPUTE_DISP(ins); \
1437 predict = (disp != 0) ? 1 : 0; \
1438 g_assert (sparc_is_imm19 (disp)); \
1439 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1440 if (filldelay) sparc_nop (code); \
1442 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1443 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1447 COMPUTE_DISP(ins); \
1448 predict = (disp != 0) ? 1 : 0; \
1449 g_assert (sparc_is_imm19 (disp)); \
1450 sparc_fbranch (code, (annul), cond, disp); \
1451 if (filldelay) sparc_nop (code); \
1454 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1455 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1458 COMPUTE_DISP(ins); \
1459 g_assert (sparc_is_imm22 (disp)); \
1460 sparc_ ## bop (code, (annul), cond, disp); \
1461 if (filldelay) sparc_nop (code); \
1463 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1464 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1467 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1471 COMPUTE_DISP(ins); \
1472 predict = (disp != 0) ? 1 : 0; \
1473 g_assert (sparc_is_imm19 (disp)); \
1474 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1475 if (filldelay) sparc_nop (code); \
1478 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1481 COMPUTE_DISP(ins); \
1482 g_assert (sparc_is_imm22 (disp)); \
1483 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1484 if (filldelay) sparc_nop (code); \
1487 /* emit an exception if condition is fail */
1489 * We put the exception throwing code out-of-line, at the end of the method
1491 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1492 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1493 MONO_PATCH_INFO_EXC, sexc_name); \
1494 if (sparcv9 && ((icc) != sparc_icc_short)) { \
1495 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1498 sparc_branch (code, 0, cond, 0); \
1500 if (filldelay) sparc_nop (code); \
1503 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1505 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1506 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1507 MONO_PATCH_INFO_EXC, sexc_name); \
1508 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1512 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1513 if (sparc_is_imm13 ((ins)->inst_imm)) \
1514 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1516 sparc_set (code, ins->inst_imm, sparc_o7); \
1517 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1521 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1522 if (sparc_is_imm13 (ins->inst_offset)) \
1523 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1525 sparc_set (code, ins->inst_offset, sparc_o7); \
1526 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1531 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1533 if (ins->inst_imm == 0) \
1536 sparc_set (code, ins->inst_imm, sparc_o7); \
1539 if (!sparc_is_imm13 (ins->inst_offset)) { \
1540 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1541 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1544 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1547 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1548 if (!sparc_is_imm13 (ins->inst_offset)) { \
1549 sparc_set (code, ins->inst_offset, sparc_o7); \
1550 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1553 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1556 #define EMIT_CALL() do { \
1558 sparc_set_template (code, sparc_o7); \
1559 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1562 sparc_call_simple (code, 0); \
1568 * A call template is 7 instructions long, so we want to avoid it if possible.
1571 emit_call (MonoCompile
*cfg
, guint32
*code
, guint32 patch_type
, gconstpointer data
)
1575 /* FIXME: This only works if the target method is already compiled */
1576 if (0 && v64
&& !cfg
->compile_aot
) {
1577 MonoJumpInfo patch_info
;
1579 patch_info
.type
= patch_type
;
1580 patch_info
.data
.target
= data
;
1582 target
= mono_resolve_patch_target (cfg
->method
, cfg
->domain
, NULL
, &patch_info
, FALSE
);
1584 /* FIXME: Add optimizations if the target is close enough */
1585 sparc_set (code
, target
, sparc_o7
);
1586 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_o7
);
1590 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, patch_type
, data
);
1598 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1603 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1605 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1608 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1609 switch (ins
->opcode
) {
1611 /* remove unnecessary multiplication with 1 */
1612 if (ins
->inst_imm
== 1) {
1613 if (ins
->dreg
!= ins
->sreg1
) {
1614 ins
->opcode
= OP_MOVE
;
1616 MONO_DELETE_INS (bb
, ins
);
1622 case OP_LOAD_MEMBASE
:
1623 case OP_LOADI4_MEMBASE
:
1625 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1626 * OP_LOAD_MEMBASE offset(basereg), reg
1628 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
1629 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
1630 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1631 ins
->inst_offset
== last_ins
->inst_offset
) {
1632 if (ins
->dreg
== last_ins
->sreg1
) {
1633 MONO_DELETE_INS (bb
, ins
);
1636 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1637 ins
->opcode
= OP_MOVE
;
1638 ins
->sreg1
= last_ins
->sreg1
;
1642 * Note: reg1 must be different from the basereg in the second load
1643 * OP_LOAD_MEMBASE offset(basereg), reg1
1644 * OP_LOAD_MEMBASE offset(basereg), reg2
1646 * OP_LOAD_MEMBASE offset(basereg), reg1
1647 * OP_MOVE reg1, reg2
1649 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
1650 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
1651 ins
->inst_basereg
!= last_ins
->dreg
&&
1652 ins
->inst_basereg
== last_ins
->inst_basereg
&&
1653 ins
->inst_offset
== last_ins
->inst_offset
) {
1655 if (ins
->dreg
== last_ins
->dreg
) {
1656 MONO_DELETE_INS (bb
, ins
);
1659 ins
->opcode
= OP_MOVE
;
1660 ins
->sreg1
= last_ins
->dreg
;
1663 //g_assert_not_reached ();
1667 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1668 * OP_LOAD_MEMBASE offset(basereg), reg
1670 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1671 * OP_ICONST reg, imm
1673 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
1674 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
1675 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1676 ins
->inst_offset
== last_ins
->inst_offset
) {
1677 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1678 ins
->opcode
= OP_ICONST
;
1679 ins
->inst_c0
= last_ins
->inst_imm
;
1680 g_assert_not_reached (); // check this rule
1685 case OP_LOADI1_MEMBASE
:
1686 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
1687 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1688 ins
->inst_offset
== last_ins
->inst_offset
) {
1689 if (ins
->dreg
== last_ins
->sreg1
) {
1690 MONO_DELETE_INS (bb
, ins
);
1693 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1694 ins
->opcode
= OP_MOVE
;
1695 ins
->sreg1
= last_ins
->sreg1
;
1699 case OP_LOADI2_MEMBASE
:
1700 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
1701 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1702 ins
->inst_offset
== last_ins
->inst_offset
) {
1703 if (ins
->dreg
== last_ins
->sreg1
) {
1704 MONO_DELETE_INS (bb
, ins
);
1707 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1708 ins
->opcode
= OP_MOVE
;
1709 ins
->sreg1
= last_ins
->sreg1
;
1713 case OP_STOREI4_MEMBASE_IMM
:
1714 /* Convert pairs of 0 stores to a dword 0 store */
1715 /* Used when initializing temporaries */
1716 /* We know sparc_fp is dword aligned */
1717 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
) &&
1718 (ins
->inst_destbasereg
== last_ins
->inst_destbasereg
) &&
1719 (ins
->inst_destbasereg
== sparc_fp
) &&
1720 (ins
->inst_offset
< 0) &&
1721 ((ins
->inst_offset
% 8) == 0) &&
1722 ((ins
->inst_offset
== last_ins
->inst_offset
- 4)) &&
1723 (ins
->inst_imm
== 0) &&
1724 (last_ins
->inst_imm
== 0)) {
1726 last_ins
->opcode
= OP_STOREI8_MEMBASE_IMM
;
1727 last_ins
->inst_offset
= ins
->inst_offset
;
1728 MONO_DELETE_INS (bb
, ins
);
1739 case OP_COND_EXC_EQ
:
1740 case OP_COND_EXC_GE
:
1741 case OP_COND_EXC_GT
:
1742 case OP_COND_EXC_LE
:
1743 case OP_COND_EXC_LT
:
1744 case OP_COND_EXC_NE_UN
:
1746 * Convert compare with zero+branch to BRcc
1749 * This only works in 64 bit mode, since it examines all 64
1750 * bits of the register.
1751 * Only do this if the method is small since BPr only has a 16bit
1754 if (v64
&& (mono_method_get_header (cfg
->method
)->code_size
< 10000) && last_ins
&&
1755 (last_ins
->opcode
== OP_COMPARE_IMM
) &&
1756 (last_ins
->inst_imm
== 0)) {
1757 switch (ins
->opcode
) {
1759 ins
->opcode
= OP_SPARC_BRZ
;
1762 ins
->opcode
= OP_SPARC_BRNZ
;
1765 ins
->opcode
= OP_SPARC_BRLZ
;
1768 ins
->opcode
= OP_SPARC_BRGZ
;
1771 ins
->opcode
= OP_SPARC_BRGEZ
;
1774 ins
->opcode
= OP_SPARC_BRLEZ
;
1776 case OP_COND_EXC_EQ
:
1777 ins
->opcode
= OP_SPARC_COND_EXC_EQZ
;
1779 case OP_COND_EXC_GE
:
1780 ins
->opcode
= OP_SPARC_COND_EXC_GEZ
;
1782 case OP_COND_EXC_GT
:
1783 ins
->opcode
= OP_SPARC_COND_EXC_GTZ
;
1785 case OP_COND_EXC_LE
:
1786 ins
->opcode
= OP_SPARC_COND_EXC_LEZ
;
1788 case OP_COND_EXC_LT
:
1789 ins
->opcode
= OP_SPARC_COND_EXC_LTZ
;
1791 case OP_COND_EXC_NE_UN
:
1792 ins
->opcode
= OP_SPARC_COND_EXC_NEZ
;
1795 g_assert_not_reached ();
1797 ins
->sreg1
= last_ins
->sreg1
;
1799 MONO_DELETE_INS (bb
, ins
);
1807 if (ins
->dreg
== ins
->sreg1
) {
1808 MONO_DELETE_INS (bb
, ins
);
1812 * OP_MOVE sreg, dreg
1813 * OP_MOVE dreg, sreg
1815 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
1816 ins
->sreg1
== last_ins
->dreg
&&
1817 ins
->dreg
== last_ins
->sreg1
) {
1818 MONO_DELETE_INS (bb
, ins
);
1826 bb
->last_ins
= last_ins
;
1830 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1834 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1837 sparc_patch (guint32
*code
, const gpointer target
)
1840 guint32 ins
= *code
;
1841 guint32 op
= ins
>> 30;
1842 guint32 op2
= (ins
>> 22) & 0x7;
1843 guint32 rd
= (ins
>> 25) & 0x1f;
1844 guint8
* target8
= (guint8
*)target
;
1845 gint64 disp
= (target8
- (guint8
*)code
) >> 2;
1848 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1850 if ((op
== 0) && (op2
== 2)) {
1851 if (!sparc_is_imm22 (disp
))
1854 *code
= ((ins
>> 22) << 22) | (disp
& 0x3fffff);
1856 else if ((op
== 0) && (op2
== 1)) {
1857 if (!sparc_is_imm19 (disp
))
1860 *code
= ((ins
>> 19) << 19) | (disp
& 0x7ffff);
1862 else if ((op
== 0) && (op2
== 3)) {
1863 if (!sparc_is_imm16 (disp
))
1866 *code
&= ~(0x180000 | 0x3fff);
1867 *code
|= ((disp
<< 21) & (0x180000)) | (disp
& 0x3fff);
1869 else if ((op
== 0) && (op2
== 6)) {
1870 if (!sparc_is_imm22 (disp
))
1873 *code
= ((ins
>> 22) << 22) | (disp
& 0x3fffff);
1875 else if ((op
== 0) && (op2
== 4)) {
1876 guint32 ins2
= code
[1];
1878 if (((ins2
>> 30) == 2) && (((ins2
>> 19) & 0x3f) == 2)) {
1879 /* sethi followed by or */
1881 sparc_set (p
, target8
, rd
);
1882 while (p
<= (code
+ 1))
1885 else if (ins2
== 0x01000000) {
1886 /* sethi followed by nop */
1888 sparc_set (p
, target8
, rd
);
1889 while (p
<= (code
+ 1))
1892 else if ((sparc_inst_op (ins2
) == 3) && (sparc_inst_imm (ins2
))) {
1893 /* sethi followed by load/store */
1895 guint32 t
= (guint32
)target8
;
1896 *code
&= ~(0x3fffff);
1898 *(code
+ 1) &= ~(0x3ff);
1899 *(code
+ 1) |= (t
& 0x3ff);
1903 (sparc_inst_rd (ins
) == sparc_g1
) &&
1904 (sparc_inst_op (c
[1]) == 0) && (sparc_inst_op2 (c
[1]) == 4) &&
1905 (sparc_inst_op (c
[2]) == 2) && (sparc_inst_op3 (c
[2]) == 2) &&
1906 (sparc_inst_op (c
[3]) == 2) && (sparc_inst_op3 (c
[3]) == 2))
1910 reg
= sparc_inst_rd (c
[1]);
1911 sparc_set (p
, target8
, reg
);
1915 else if ((sparc_inst_op (ins2
) == 2) && (sparc_inst_op3 (ins2
) == 0x38) &&
1916 (sparc_inst_imm (ins2
))) {
1917 /* sethi followed by jmpl */
1919 guint32 t
= (guint32
)target8
;
1920 *code
&= ~(0x3fffff);
1922 *(code
+ 1) &= ~(0x3ff);
1923 *(code
+ 1) |= (t
& 0x3ff);
1929 else if (op
== 01) {
1930 gint64 disp
= (target8
- (guint8
*)code
) >> 2;
1932 if (!sparc_is_imm30 (disp
))
1934 sparc_call_simple (code
, target8
- (guint8
*)code
);
1936 else if ((op
== 2) && (sparc_inst_op3 (ins
) == 0x2) && sparc_inst_imm (ins
)) {
1938 g_assert (sparc_is_imm13 (target8
));
1940 *code
|= (guint32
)target8
;
1942 else if ((sparc_inst_op (ins
) == 2) && (sparc_inst_op3 (ins
) == 0x7)) {
1943 /* sparc_set case 5. */
1947 reg
= sparc_inst_rd (c
[3]);
1948 sparc_set (p
, target
, reg
);
1955 // g_print ("patched with 0x%08x\n", ins);
1959 * mono_sparc_emit_save_lmf:
1961 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1962 * trampolines as well.
1965 mono_sparc_emit_save_lmf (guint32
*code
, guint32 lmf_offset
)
1968 sparc_sti_imm (code
, sparc_o0
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
1969 /* Save previous_lmf */
1970 sparc_ldi (code
, sparc_o0
, sparc_g0
, sparc_o7
);
1971 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
1973 sparc_add_imm (code
, FALSE
, sparc_fp
, lmf_offset
, sparc_o7
);
1974 sparc_sti (code
, sparc_o7
, sparc_o0
, sparc_g0
);
1980 mono_sparc_emit_restore_lmf (guint32
*code
, guint32 lmf_offset
)
1982 /* Load previous_lmf */
1983 sparc_ldi_imm (code
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sparc_l0
);
1985 sparc_ldi_imm (code
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), sparc_l1
);
1986 /* *(lmf) = previous_lmf */
1987 sparc_sti (code
, sparc_l0
, sparc_l1
, sparc_g0
);
1992 emit_save_sp_to_lmf (MonoCompile
*cfg
, guint32
*code
)
1995 * Since register windows are saved to the current value of %sp, we need to
1996 * set the sp field in the lmf before the call, not in the prolog.
1998 if (cfg
->method
->save_lmf
) {
1999 gint32 lmf_offset
= MONO_SPARC_STACK_BIAS
- cfg
->arch
.lmf_offset
;
2002 sparc_sti_imm (code
, sparc_sp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, sp
));
2009 emit_vret_token (MonoGenericSharingContext
*gsctx
, MonoInst
*ins
, guint32
*code
)
2011 MonoCallInst
*call
= (MonoCallInst
*)ins
;
2015 * The sparc ABI requires that calls to functions which return a structure
2016 * contain an additional unimpl instruction which is checked by the callee.
2018 if (call
->signature
->pinvoke
&& MONO_TYPE_ISSTRUCT(call
->signature
->ret
)) {
2019 if (call
->signature
->ret
->type
== MONO_TYPE_TYPEDBYREF
)
2020 size
= mini_type_stack_size (gsctx
, call
->signature
->ret
, NULL
);
2022 size
= mono_class_native_size (call
->signature
->ret
->data
.klass
, NULL
);
2023 sparc_unimp (code
, size
& 0xfff);
2030 emit_move_return_value (MonoInst
*ins
, guint32
*code
)
2032 /* Move return value to the target register */
2033 /* FIXME: do more things in the local reg allocator */
2034 switch (ins
->opcode
) {
2036 case OP_VOIDCALL_REG
:
2037 case OP_VOIDCALL_MEMBASE
:
2041 case OP_CALL_MEMBASE
:
2042 g_assert (ins
->dreg
== sparc_o0
);
2046 case OP_LCALL_MEMBASE
:
2048 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2049 * in inssel-long32.brg.
2052 sparc_mov_reg_reg (code
, sparc_o0
, ins
->dreg
);
2054 g_assert (ins
->dreg
== sparc_o1
);
2059 case OP_FCALL_MEMBASE
:
2061 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
) {
2062 sparc_fmovs (code
, sparc_f0
, ins
->dreg
);
2063 sparc_fstod (code
, ins
->dreg
, ins
->dreg
);
2066 sparc_fmovd (code
, sparc_f0
, ins
->dreg
);
2068 sparc_fmovs (code
, sparc_f0
, ins
->dreg
);
2069 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
)
2070 sparc_fstod (code
, ins
->dreg
, ins
->dreg
);
2072 sparc_fmovs (code
, sparc_f1
, ins
->dreg
+ 1);
2077 case OP_VCALL_MEMBASE
:
2080 case OP_VCALL2_MEMBASE
:
2090 * emit_load_volatile_arguments:
2092 * Load volatile arguments from the stack to the original input registers.
2093 * Required before a tail call.
2096 emit_load_volatile_arguments (MonoCompile
*cfg
, guint32
*code
)
2098 MonoMethod
*method
= cfg
->method
;
2099 MonoMethodSignature
*sig
;
2104 /* FIXME: Generate intermediate code instead */
2106 sig
= mono_method_signature (method
);
2108 cinfo
= get_call_info (cfg
, sig
, FALSE
);
2110 /* This is the opposite of the code in emit_prolog */
2112 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2113 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2114 gint32 stack_offset
;
2117 inst
= cfg
->args
[i
];
2119 if (sig
->hasthis
&& (i
== 0))
2120 arg_type
= &mono_defaults
.object_class
->byval_arg
;
2122 arg_type
= sig
->params
[i
- sig
->hasthis
];
2124 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
2125 ireg
= sparc_i0
+ ainfo
->reg
;
2127 if (ainfo
->storage
== ArgInSplitRegStack
) {
2128 g_assert (inst
->opcode
== OP_REGOFFSET
);
2130 if (!sparc_is_imm13 (stack_offset
))
2132 sparc_st_imm (code
, inst
->inst_basereg
, stack_offset
, sparc_i5
);
2135 if (!v64
&& !arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
2136 if (ainfo
->storage
== ArgInIRegPair
) {
2137 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
2139 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, ireg
);
2140 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, ireg
+ 1);
2143 if (ainfo
->storage
== ArgInSplitRegStack
) {
2144 if (stack_offset
!= inst
->inst_offset
) {
2145 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, sparc_i5
);
2146 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, sparc_o7
);
2147 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
+ 4);
2152 if (ainfo
->storage
== ArgOnStackPair
) {
2153 if (stack_offset
!= inst
->inst_offset
) {
2154 /* stack_offset is not dword aligned, so we need to make a copy */
2155 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, sparc_o7
);
2156 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
);
2158 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, sparc_o7
);
2159 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
+ 4);
2164 g_assert_not_reached ();
2167 if ((ainfo
->storage
== ArgInIReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2168 /* Argument in register, but need to be saved to stack */
2169 if (!sparc_is_imm13 (stack_offset
))
2171 if ((stack_offset
- ARGS_OFFSET
) & 0x1)
2172 /* FIXME: Is this ldsb or ldub ? */
2173 sparc_ldsb_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2175 if ((stack_offset
- ARGS_OFFSET
) & 0x2)
2176 sparc_ldsh_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2178 if ((stack_offset
- ARGS_OFFSET
) & 0x4)
2179 sparc_ld_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2182 sparc_ldx_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2184 sparc_ld_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2187 else if ((ainfo
->storage
== ArgInIRegPair
) && (inst
->opcode
!= OP_REGVAR
)) {
2188 /* Argument in regpair, but need to be saved to stack */
2189 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
2191 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, ireg
);
2192 sparc_st_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, ireg
+ 1);
2194 else if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2197 else if ((ainfo
->storage
== ArgInDoubleReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2201 if ((ainfo
->storage
== ArgInSplitRegStack
) || (ainfo
->storage
== ArgOnStack
))
2202 if (inst
->opcode
== OP_REGVAR
)
2203 /* FIXME: Load the argument into memory */
2213 * mono_sparc_is_virtual_call:
2215 * Determine whenever the instruction at CODE is a virtual call.
2218 mono_sparc_is_virtual_call (guint32
*code
)
2225 if ((sparc_inst_op (*code
) == 0x2) && (sparc_inst_op3 (*code
) == 0x38)) {
2227 * Register indirect call. If it is a virtual call, then the
2228 * instruction in the delay slot is a special kind of nop.
2231 /* Construct special nop */
2232 sparc_or_imm (p
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
2235 if (code
[1] == p
[0])
2243 * mono_arch_get_vcall_slot:
2245 * Determine the vtable slot used by a virtual call.
2248 mono_arch_get_vcall_slot (guint8
*code8
, gpointer
*regs
, int *displacement
)
2250 guint32
*code
= (guint32
*)(gpointer
)code8
;
2251 guint32 ins
= code
[0];
2252 guint32 prev_ins
= code
[-1];
2254 mono_sparc_flushw ();
2258 if (!mono_sparc_is_virtual_call (code
))
2261 if ((sparc_inst_op (ins
) == 0x2) && (sparc_inst_op3 (ins
) == 0x38)) {
2262 if ((sparc_inst_op (prev_ins
) == 0x3) && (sparc_inst_i (prev_ins
) == 1) && (sparc_inst_op3 (prev_ins
) == 0 || sparc_inst_op3 (prev_ins
) == 0xb)) {
2263 /* ld [r1 + CONST ], r2; call r2 */
2264 guint32 base
= sparc_inst_rs1 (prev_ins
);
2265 gint32 disp
= (((gint32
)(sparc_inst_imm13 (prev_ins
))) << 19) >> 19;
2268 g_assert (sparc_inst_rd (prev_ins
) == sparc_inst_rs1 (ins
));
2270 g_assert ((base
>= sparc_o0
) && (base
<= sparc_i7
));
2272 base_val
= regs
[base
];
2274 *displacement
= disp
;
2276 return (gpointer
)base_val
;
2278 else if ((sparc_inst_op (prev_ins
) == 0x3) && (sparc_inst_i (prev_ins
) == 0) && (sparc_inst_op3 (prev_ins
) == 0)) {
2279 /* set r1, ICONST; ld [r1 + r2], r2; call r2 */
2280 /* Decode a sparc_set32 */
2281 guint32 base
= sparc_inst_rs1 (prev_ins
);
2284 guint32 s1
= code
[-3];
2285 guint32 s2
= code
[-2];
2292 g_assert (sparc_inst_op (s1
) == 0);
2293 g_assert (sparc_inst_op2 (s1
) == 4);
2296 g_assert (sparc_inst_op (s2
) == 2);
2297 g_assert (sparc_inst_op3 (s2
) == 2);
2298 g_assert (sparc_inst_i (s2
) == 1);
2299 g_assert (sparc_inst_rs1 (s2
) == sparc_inst_rd (s2
));
2300 g_assert (sparc_inst_rd (s1
) == sparc_inst_rs1 (s2
));
2302 disp
= ((s1
& 0x3fffff) << 10) | sparc_inst_imm13 (s2
);
2304 g_assert ((base
>= sparc_o0
) && (base
<= sparc_i7
));
2306 base_val
= regs
[base
];
2308 *displacement
= disp
;
2310 return (gpointer
)base_val
;
2312 g_assert_not_reached ();
2315 g_assert_not_reached ();
2321 mono_arch_get_vcall_slot_addr (guint8
*code
, gpointer
*regs
)
2325 vt
= mono_arch_get_vcall_slot (code
, regs
, &displacement
);
2328 return (gpointer
*)((char*)vt
+ displacement
);
2332 #define BR_SMALL_SIZE 2
2333 #define BR_LARGE_SIZE 2
2334 #define JUMP_IMM_SIZE 5
2335 #define ENABLE_WRONG_METHOD_CHECK 0
2338 * LOCKING: called with the domain lock held
2341 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
2342 gpointer fail_tramp
)
2346 guint32
*code
, *start
;
2348 g_assert (!fail_tramp
);
2350 for (i
= 0; i
< count
; ++i
) {
2351 MonoIMTCheckItem
*item
= imt_entries
[i
];
2352 if (item
->is_equals
) {
2353 if (item
->check_target_idx
) {
2354 if (!item
->compare_done
)
2355 item
->chunk_size
+= CMP_SIZE
;
2356 item
->chunk_size
+= BR_SMALL_SIZE
+ JUMP_IMM_SIZE
;
2358 item
->chunk_size
+= JUMP_IMM_SIZE
;
2359 #if ENABLE_WRONG_METHOD_CHECK
2360 item
->chunk_size
+= CMP_SIZE
+ BR_SMALL_SIZE
+ 1;
2364 item
->chunk_size
+= CMP_SIZE
+ BR_LARGE_SIZE
;
2365 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
2367 size
+= item
->chunk_size
;
2369 code
= mono_domain_code_reserve (domain
, size
* 4);
2372 for (i
= 0; i
< count
; ++i
) {
2373 MonoIMTCheckItem
*item
= imt_entries
[i
];
2374 item
->code_target
= (guint8
*)code
;
2375 if (item
->is_equals
) {
2376 if (item
->check_target_idx
) {
2377 if (!item
->compare_done
) {
2378 sparc_set (code
, (guint32
)item
->key
, sparc_g5
);
2379 sparc_cmp (code
, MONO_ARCH_IMT_REG
, sparc_g5
);
2381 item
->jmp_code
= (guint8
*)code
;
2382 sparc_branch (code
, 0, sparc_bne
, 0);
2384 sparc_set (code
, ((guint32
)(&(vtable
->vtable
[item
->value
.vtable_slot
]))), sparc_g5
);
2385 sparc_ld (code
, sparc_g5
, 0, sparc_g5
);
2386 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2389 /* enable the commented code to assert on wrong method */
2390 #if ENABLE_WRONG_METHOD_CHECK
2391 g_assert_not_reached ();
2393 sparc_set (code
, ((guint32
)(&(vtable
->vtable
[item
->value
.vtable_slot
]))), sparc_g5
);
2394 sparc_ld (code
, sparc_g5
, 0, sparc_g5
);
2395 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2397 #if ENABLE_WRONG_METHOD_CHECK
2398 g_assert_not_reached ();
2402 sparc_set (code
, (guint32
)item
->key
, sparc_g5
);
2403 sparc_cmp (code
, MONO_ARCH_IMT_REG
, sparc_g5
);
2404 item
->jmp_code
= (guint8
*)code
;
2405 sparc_branch (code
, 0, sparc_beu
, 0);
2409 /* patch the branches to get to the target items */
2410 for (i
= 0; i
< count
; ++i
) {
2411 MonoIMTCheckItem
*item
= imt_entries
[i
];
2412 if (item
->jmp_code
) {
2413 if (item
->check_target_idx
) {
2414 sparc_patch ((guint32
*)item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
2419 mono_arch_flush_icache ((guint8
*)start
, (code
- start
) * 4);
2421 mono_stats
.imt_thunks_size
+= (code
- start
) * 4;
2422 g_assert (code
- start
<= size
);
2427 mono_arch_find_imt_method (gpointer
*regs
, guint8
*code
)
2430 g_assert_not_reached ();
2433 return (MonoMethod
*)regs
[sparc_g1
];
2437 mono_arch_find_this_argument (gpointer
*regs
, MonoMethod
*method
, MonoGenericSharingContext
*gsctx
)
2439 mono_sparc_flushw ();
2441 return (gpointer
)regs
[sparc_o0
];
2445 * Some conventions used in the following code.
2446 * 2) The only scratch registers we have are o7 and g1. We try to
2447 * stick to o7 when we can, and use g1 when necessary.
2451 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2456 guint32
*code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
2457 MonoInst
*last_ins
= NULL
;
2461 if (cfg
->verbose_level
> 2)
2462 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2464 cpos
= bb
->max_offset
;
2466 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
2470 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2473 offset
= (guint8
*)code
- cfg
->native_code
;
2475 spec
= ins_get_spec (ins
->opcode
);
2477 max_len
= ((guint8
*)spec
)[MONO_INST_LEN
];
2479 if (offset
> (cfg
->code_size
- max_len
- 16)) {
2480 cfg
->code_size
*= 2;
2481 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2482 code
= (guint32
*)(cfg
->native_code
+ offset
);
2484 code_start
= (guint8
*)code
;
2485 // if (ins->cil_code)
2486 // g_print ("cil code\n");
2487 mono_debug_record_line_number (cfg
, ins
, offset
);
2489 switch (ins
->opcode
) {
2490 case OP_STOREI1_MEMBASE_IMM
:
2491 EMIT_STORE_MEMBASE_IMM (ins
, stb
);
2493 case OP_STOREI2_MEMBASE_IMM
:
2494 EMIT_STORE_MEMBASE_IMM (ins
, sth
);
2496 case OP_STORE_MEMBASE_IMM
:
2497 EMIT_STORE_MEMBASE_IMM (ins
, sti
);
2499 case OP_STOREI4_MEMBASE_IMM
:
2500 EMIT_STORE_MEMBASE_IMM (ins
, st
);
2502 case OP_STOREI8_MEMBASE_IMM
:
2504 EMIT_STORE_MEMBASE_IMM (ins
, stx
);
2506 /* Only generated by peephole opts */
2507 g_assert ((ins
->inst_offset
% 8) == 0);
2508 g_assert (ins
->inst_imm
== 0);
2509 EMIT_STORE_MEMBASE_IMM (ins
, stx
);
2512 case OP_STOREI1_MEMBASE_REG
:
2513 EMIT_STORE_MEMBASE_REG (ins
, stb
);
2515 case OP_STOREI2_MEMBASE_REG
:
2516 EMIT_STORE_MEMBASE_REG (ins
, sth
);
2518 case OP_STOREI4_MEMBASE_REG
:
2519 EMIT_STORE_MEMBASE_REG (ins
, st
);
2521 case OP_STOREI8_MEMBASE_REG
:
2523 EMIT_STORE_MEMBASE_REG (ins
, stx
);
2525 /* Only used by OP_MEMSET */
2526 EMIT_STORE_MEMBASE_REG (ins
, std
);
2529 case OP_STORE_MEMBASE_REG
:
2530 EMIT_STORE_MEMBASE_REG (ins
, sti
);
2533 sparc_set (code
, ins
->inst_c0
, ins
->dreg
);
2534 sparc_ld (code
, ins
->dreg
, sparc_g0
, ins
->dreg
);
2536 case OP_LOADI4_MEMBASE
:
2538 EMIT_LOAD_MEMBASE (ins
, ldsw
);
2540 EMIT_LOAD_MEMBASE (ins
, ld
);
2543 case OP_LOADU4_MEMBASE
:
2544 EMIT_LOAD_MEMBASE (ins
, ld
);
2546 case OP_LOADU1_MEMBASE
:
2547 EMIT_LOAD_MEMBASE (ins
, ldub
);
2549 case OP_LOADI1_MEMBASE
:
2550 EMIT_LOAD_MEMBASE (ins
, ldsb
);
2552 case OP_LOADU2_MEMBASE
:
2553 EMIT_LOAD_MEMBASE (ins
, lduh
);
2555 case OP_LOADI2_MEMBASE
:
2556 EMIT_LOAD_MEMBASE (ins
, ldsh
);
2558 case OP_LOAD_MEMBASE
:
2560 EMIT_LOAD_MEMBASE (ins
, ldx
);
2562 EMIT_LOAD_MEMBASE (ins
, ld
);
2566 case OP_LOADI8_MEMBASE
:
2567 EMIT_LOAD_MEMBASE (ins
, ldx
);
2570 case OP_ICONV_TO_I1
:
2571 sparc_sll_imm (code
, ins
->sreg1
, 24, sparc_o7
);
2572 sparc_sra_imm (code
, sparc_o7
, 24, ins
->dreg
);
2574 case OP_ICONV_TO_I2
:
2575 sparc_sll_imm (code
, ins
->sreg1
, 16, sparc_o7
);
2576 sparc_sra_imm (code
, sparc_o7
, 16, ins
->dreg
);
2578 case OP_ICONV_TO_U1
:
2579 sparc_and_imm (code
, FALSE
, ins
->sreg1
, 0xff, ins
->dreg
);
2581 case OP_ICONV_TO_U2
:
2582 sparc_sll_imm (code
, ins
->sreg1
, 16, sparc_o7
);
2583 sparc_srl_imm (code
, sparc_o7
, 16, ins
->dreg
);
2585 case OP_LCONV_TO_OVF_U4
:
2586 case OP_ICONV_TO_OVF_U4
:
2587 /* Only used on V9 */
2588 sparc_cmp_imm (code
, ins
->sreg1
, 0);
2589 mono_add_patch_info (cfg
, (guint8
*)(code
) - (cfg
)->native_code
,
2590 MONO_PATCH_INFO_EXC
, "OverflowException");
2591 sparc_branchp (code
, 0, sparc_bl
, sparc_xcc_short
, 0, 0);
2593 sparc_set (code
, 1, sparc_o7
);
2594 sparc_sllx_imm (code
, sparc_o7
, 32, sparc_o7
);
2595 sparc_cmp (code
, ins
->sreg1
, sparc_o7
);
2596 mono_add_patch_info (cfg
, (guint8
*)(code
) - (cfg
)->native_code
,
2597 MONO_PATCH_INFO_EXC
, "OverflowException");
2598 sparc_branchp (code
, 0, sparc_bge
, sparc_xcc_short
, 0, 0);
2600 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
2602 case OP_LCONV_TO_OVF_I4_UN
:
2603 case OP_ICONV_TO_OVF_I4_UN
:
2604 /* Only used on V9 */
2610 sparc_cmp (code
, ins
->sreg1
, ins
->sreg2
);
2612 case OP_COMPARE_IMM
:
2613 case OP_ICOMPARE_IMM
:
2614 if (sparc_is_imm13 (ins
->inst_imm
))
2615 sparc_cmp_imm (code
, ins
->sreg1
, ins
->inst_imm
);
2617 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2618 sparc_cmp (code
, ins
->sreg1
, sparc_o7
);
2623 * gdb does not like encountering 'ta 1' in the debugged code. So
2624 * instead of emitting a trap, we emit a call a C function and place a
2627 //sparc_ta (code, 1);
2628 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, mono_break
);
2633 sparc_add (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2636 sparc_add (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2641 /* according to inssel-long32.brg, this should set cc */
2642 EMIT_ALU_IMM (ins
, add
, TRUE
);
2646 /* according to inssel-long32.brg, this should set cc */
2647 sparc_addx (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2651 EMIT_ALU_IMM (ins
, addx
, TRUE
);
2655 sparc_sub (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2658 sparc_sub (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2663 /* according to inssel-long32.brg, this should set cc */
2664 EMIT_ALU_IMM (ins
, sub
, TRUE
);
2668 /* according to inssel-long32.brg, this should set cc */
2669 sparc_subx (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2673 EMIT_ALU_IMM (ins
, subx
, TRUE
);
2676 sparc_and (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2680 EMIT_ALU_IMM (ins
, and, FALSE
);
2683 /* Sign extend sreg1 into %y */
2684 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2685 sparc_wry (code
, sparc_o7
, sparc_g0
);
2686 sparc_sdiv (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2687 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2690 sparc_wry (code
, sparc_g0
, sparc_g0
);
2691 sparc_udiv (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2697 /* Transform division into a shift */
2698 for (i
= 1; i
< 30; ++i
) {
2700 if (ins
->inst_imm
== imm
)
2706 sparc_srl_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2707 sparc_add (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2708 sparc_sra_imm (code
, ins
->dreg
, 1, ins
->dreg
);
2711 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2712 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2713 sparc_srl_imm (code
, sparc_o7
, 32 - i
, sparc_o7
);
2714 sparc_add (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2715 sparc_sra_imm (code
, ins
->dreg
, i
, ins
->dreg
);
2719 /* Sign extend sreg1 into %y */
2720 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2721 sparc_wry (code
, sparc_o7
, sparc_g0
);
2722 EMIT_ALU_IMM (ins
, sdiv
, TRUE
);
2723 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2727 case OP_IDIV_UN_IMM
:
2728 sparc_wry (code
, sparc_g0
, sparc_g0
);
2729 EMIT_ALU_IMM (ins
, udiv
, FALSE
);
2732 /* Sign extend sreg1 into %y */
2733 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2734 sparc_wry (code
, sparc_o7
, sparc_g0
);
2735 sparc_sdiv (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, sparc_o7
);
2736 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2737 sparc_smul (code
, FALSE
, ins
->sreg2
, sparc_o7
, sparc_o7
);
2738 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2741 sparc_wry (code
, sparc_g0
, sparc_g0
);
2742 sparc_udiv (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, sparc_o7
);
2743 sparc_umul (code
, FALSE
, ins
->sreg2
, sparc_o7
, sparc_o7
);
2744 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2748 /* Sign extend sreg1 into %y */
2749 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2750 sparc_wry (code
, sparc_o7
, sparc_g0
);
2751 if (!sparc_is_imm13 (ins
->inst_imm
)) {
2752 sparc_set (code
, ins
->inst_imm
, GP_SCRATCH_REG
);
2753 sparc_sdiv (code
, TRUE
, ins
->sreg1
, GP_SCRATCH_REG
, sparc_o7
);
2754 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2755 sparc_smul (code
, FALSE
, sparc_o7
, GP_SCRATCH_REG
, sparc_o7
);
2758 sparc_sdiv_imm (code
, TRUE
, ins
->sreg1
, ins
->inst_imm
, sparc_o7
);
2759 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2760 sparc_smul_imm (code
, FALSE
, sparc_o7
, ins
->inst_imm
, sparc_o7
);
2762 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2765 sparc_or (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2769 EMIT_ALU_IMM (ins
, or, FALSE
);
2772 sparc_xor (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2776 EMIT_ALU_IMM (ins
, xor, FALSE
);
2779 sparc_sll (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2783 if (ins
->inst_imm
< (1 << 5))
2784 sparc_sll_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2786 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2787 sparc_sll (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2791 sparc_sra (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2795 if (ins
->inst_imm
< (1 << 5))
2796 sparc_sra_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2798 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2799 sparc_sra (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2803 case OP_ISHR_UN_IMM
:
2804 if (ins
->inst_imm
< (1 << 5))
2805 sparc_srl_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2807 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2808 sparc_srl (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2812 sparc_srl (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2815 sparc_sllx (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2818 if (ins
->inst_imm
< (1 << 6))
2819 sparc_sllx_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2821 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2822 sparc_sllx (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2826 sparc_srax (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2829 if (ins
->inst_imm
< (1 << 6))
2830 sparc_srax_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2832 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2833 sparc_srax (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2837 sparc_srlx (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2839 case OP_LSHR_UN_IMM
:
2840 if (ins
->inst_imm
< (1 << 6))
2841 sparc_srlx_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2843 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2844 sparc_srlx (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2848 /* can't use sparc_not */
2849 sparc_xnor (code
, FALSE
, ins
->sreg1
, sparc_g0
, ins
->dreg
);
2852 /* can't use sparc_neg */
2853 sparc_sub (code
, FALSE
, sparc_g0
, ins
->sreg1
, ins
->dreg
);
2856 sparc_smul (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2862 if ((ins
->inst_imm
== 1) && (ins
->sreg1
== ins
->dreg
))
2865 /* Transform multiplication into a shift */
2866 for (i
= 0; i
< 30; ++i
) {
2868 if (ins
->inst_imm
== imm
)
2872 sparc_sll_imm (code
, ins
->sreg1
, i
, ins
->dreg
);
2874 EMIT_ALU_IMM (ins
, smul
, FALSE
);
2878 sparc_smul (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2879 sparc_rdy (code
, sparc_g1
);
2880 sparc_sra_imm (code
, ins
->dreg
, 31, sparc_o7
);
2881 sparc_cmp (code
, sparc_g1
, sparc_o7
);
2882 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins
, sparc_bne
, "OverflowException", TRUE
, sparc_icc_short
);
2884 case OP_IMUL_OVF_UN
:
2885 sparc_umul (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2886 sparc_rdy (code
, sparc_o7
);
2887 sparc_cmp (code
, sparc_o7
, sparc_g0
);
2888 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins
, sparc_bne
, "OverflowException", TRUE
, sparc_icc_short
);
2891 sparc_set (code
, ins
->inst_c0
, ins
->dreg
);
2894 sparc_set (code
, ins
->inst_l
, ins
->dreg
);
2897 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2898 sparc_set_template (code
, ins
->dreg
);
2901 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2902 sparc_set_template (code
, ins
->dreg
);
2904 case OP_ICONV_TO_I4
:
2905 case OP_ICONV_TO_U4
:
2907 if (ins
->sreg1
!= ins
->dreg
)
2908 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
2912 if (ins
->sreg1
!= ins
->dreg
)
2913 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
2915 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
2916 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
2920 if (cfg
->method
->save_lmf
)
2923 code
= emit_load_volatile_arguments (cfg
, code
);
2924 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
2925 sparc_set_template (code
, sparc_o7
);
2926 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_g0
);
2927 /* Restore parent frame in delay slot */
2928 sparc_restore_imm (code
, sparc_g0
, 0, sparc_g0
);
2931 /* ensure ins->sreg1 is not NULL */
2932 /* Might be misaligned in case of vtypes so use a byte load */
2933 sparc_ldsb_imm (code
, ins
->sreg1
, 0, sparc_g0
);
2936 sparc_add_imm (code
, FALSE
, sparc_fp
, cfg
->sig_cookie
, sparc_o7
);
2937 sparc_sti_imm (code
, sparc_o7
, ins
->sreg1
, 0);
2945 call
= (MonoCallInst
*)ins
;
2946 g_assert (!call
->virtual);
2947 code
= emit_save_sp_to_lmf (cfg
, code
);
2948 if (ins
->flags
& MONO_INST_HAS_METHOD
)
2949 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
);
2951 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
);
2953 code
= emit_vret_token (cfg
->generic_sharing_context
, ins
, code
);
2954 code
= emit_move_return_value (ins
, code
);
2960 case OP_VOIDCALL_REG
:
2962 call
= (MonoCallInst
*)ins
;
2963 code
= emit_save_sp_to_lmf (cfg
, code
);
2964 sparc_jmpl (code
, ins
->sreg1
, sparc_g0
, sparc_callsite
);
2966 * We emit a special kind of nop in the delay slot to tell the
2967 * trampoline code that this is a virtual call, thus an unbox
2968 * trampoline might need to be called.
2971 sparc_or_imm (code
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
2975 code
= emit_vret_token (cfg
->generic_sharing_context
, ins
, code
);
2976 code
= emit_move_return_value (ins
, code
);
2978 case OP_FCALL_MEMBASE
:
2979 case OP_LCALL_MEMBASE
:
2980 case OP_VCALL_MEMBASE
:
2981 case OP_VCALL2_MEMBASE
:
2982 case OP_VOIDCALL_MEMBASE
:
2983 case OP_CALL_MEMBASE
:
2984 call
= (MonoCallInst
*)ins
;
2985 code
= emit_save_sp_to_lmf (cfg
, code
);
2986 if (sparc_is_imm13 (ins
->inst_offset
)) {
2987 sparc_ldi_imm (code
, ins
->inst_basereg
, ins
->inst_offset
, sparc_o7
);
2989 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
2990 sparc_ldi (code
, ins
->inst_basereg
, sparc_o7
, sparc_o7
);
2992 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_callsite
);
2994 sparc_or_imm (code
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
2998 code
= emit_vret_token (cfg
->generic_sharing_context
, ins
, code
);
2999 code
= emit_move_return_value (ins
, code
);
3002 if (mono_method_signature (cfg
->method
)->ret
->type
== MONO_TYPE_R4
)
3003 sparc_fdtos (code
, ins
->sreg1
, sparc_f0
);
3006 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3008 /* FIXME: Why not use fmovd ? */
3009 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3010 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3018 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3019 /* Perform stack touching */
3023 /* Keep alignment */
3024 /* Add 4 to compensate for the rounding of localloc_offset */
3025 sparc_add_imm (code
, FALSE
, ins
->sreg1
, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT
- 1, ins
->dreg
);
3026 sparc_set (code
, ~(MONO_ARCH_LOCALLOC_ALIGNMENT
- 1), sparc_o7
);
3027 sparc_and (code
, FALSE
, ins
->dreg
, sparc_o7
, ins
->dreg
);
3029 if ((ins
->flags
& MONO_INST_INIT
) && (ins
->sreg1
== ins
->dreg
)) {
3031 size_reg
= sparc_g4
;
3033 size_reg
= sparc_g1
;
3035 sparc_mov_reg_reg (code
, ins
->dreg
, size_reg
);
3038 size_reg
= ins
->sreg1
;
3040 sparc_sub (code
, FALSE
, sparc_sp
, ins
->dreg
, ins
->dreg
);
3041 /* Keep %sp valid at all times */
3042 sparc_mov_reg_reg (code
, ins
->dreg
, sparc_sp
);
3043 /* Round localloc_offset too so the result is at least 8 aligned */
3044 offset2
= ALIGN_TO (cfg
->arch
.localloc_offset
, 8);
3045 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS
+ offset2
));
3046 sparc_add_imm (code
, FALSE
, ins
->dreg
, MONO_SPARC_STACK_BIAS
+ offset2
, ins
->dreg
);
3048 if (ins
->flags
& MONO_INST_INIT
) {
3050 /* Initialize memory region */
3051 sparc_cmp_imm (code
, size_reg
, 0);
3053 sparc_branch (code
, 0, sparc_be
, 0);
3055 sparc_set (code
, 0, sparc_o7
);
3056 sparc_sub_imm (code
, 0, size_reg
, sparcv9
? 8 : 4, size_reg
);
3060 sparc_stx (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3062 sparc_st (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3063 sparc_cmp (code
, sparc_o7
, size_reg
);
3065 sparc_branch (code
, 0, sparc_bl
, 0);
3066 sparc_patch (br
[2], br
[1]);
3068 sparc_add_imm (code
, 0, sparc_o7
, sparcv9
? 8 : 4, sparc_o7
);
3069 sparc_patch (br
[0], code
);
3073 case OP_LOCALLOC_IMM
: {
3074 gint32 offset
= ins
->inst_imm
;
3077 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3078 /* Perform stack touching */
3082 /* To compensate for the rounding of localloc_offset */
3083 offset
+= sizeof (gpointer
);
3084 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3085 if (sparc_is_imm13 (offset
))
3086 sparc_sub_imm (code
, FALSE
, sparc_sp
, offset
, sparc_sp
);
3088 sparc_set (code
, offset
, sparc_o7
);
3089 sparc_sub (code
, FALSE
, sparc_sp
, sparc_o7
, sparc_sp
);
3091 /* Round localloc_offset too so the result is at least 8 aligned */
3092 offset2
= ALIGN_TO (cfg
->arch
.localloc_offset
, 8);
3093 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS
+ offset2
));
3094 sparc_add_imm (code
, FALSE
, sparc_sp
, MONO_SPARC_STACK_BIAS
+ offset2
, ins
->dreg
);
3095 if ((ins
->flags
& MONO_INST_INIT
) && (offset
> 0)) {
3101 while (i
< offset
) {
3103 sparc_stx_imm (code
, sparc_g0
, ins
->dreg
, i
);
3107 sparc_st_imm (code
, sparc_g0
, ins
->dreg
, i
);
3113 sparc_set (code
, offset
, sparc_o7
);
3114 sparc_sub_imm (code
, 0, sparc_o7
, sparcv9
? 8 : 4, sparc_o7
);
3115 /* beginning of loop */
3118 sparc_stx (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3120 sparc_st (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3121 sparc_cmp_imm (code
, sparc_o7
, 0);
3123 sparc_branch (code
, 0, sparc_bne
, 0);
3125 sparc_sub_imm (code
, 0, sparc_o7
, sparcv9
? 8 : 4, sparc_o7
);
3126 sparc_patch (br
[1], br
[0]);
3132 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3133 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3134 (gpointer
)"mono_arch_throw_exception");
3138 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3139 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3140 (gpointer
)"mono_arch_rethrow_exception");
3143 case OP_START_HANDLER
: {
3145 * The START_HANDLER instruction marks the beginning of a handler
3146 * block. It is called using a call instruction, so %o7 contains
3147 * the return address. Since the handler executes in the same stack
3148 * frame as the method itself, we can't use save/restore to save
3149 * the return address. Instead, we save it into a dedicated
3152 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3153 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3154 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3155 sparc_sti (code
, sparc_o7
, spvar
->inst_basereg
, GP_SCRATCH_REG
);
3158 sparc_sti_imm (code
, sparc_o7
, spvar
->inst_basereg
, spvar
->inst_offset
);
3161 case OP_ENDFILTER
: {
3162 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3163 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3164 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3165 sparc_ldi (code
, spvar
->inst_basereg
, GP_SCRATCH_REG
, sparc_o7
);
3168 sparc_ldi_imm (code
, spvar
->inst_basereg
, spvar
->inst_offset
, sparc_o7
);
3169 sparc_jmpl_imm (code
, sparc_o7
, 8, sparc_g0
);
3171 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3174 case OP_ENDFINALLY
: {
3175 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3176 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3177 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3178 sparc_ldi (code
, spvar
->inst_basereg
, GP_SCRATCH_REG
, sparc_o7
);
3181 sparc_ldi_imm (code
, spvar
->inst_basereg
, spvar
->inst_offset
, sparc_o7
);
3182 sparc_jmpl_imm (code
, sparc_o7
, 8, sparc_g0
);
3186 case OP_CALL_HANDLER
:
3187 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3188 /* This is a jump inside the method, so call_simple works even on V9 */
3189 sparc_call_simple (code
, 0);
3193 ins
->inst_c0
= (guint8
*)code
- cfg
->native_code
;
3195 case OP_RELAXED_NOP
:
3198 case OP_DUMMY_STORE
:
3199 case OP_NOT_REACHED
:
3203 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3204 if ((ins
->inst_target_bb
== bb
->next_bb
) && ins
== bb
->last_ins
)
3206 if (ins
->flags
& MONO_INST_BRLABEL
) {
3207 if (ins
->inst_i0
->inst_c0
) {
3208 gint32 disp
= (ins
->inst_i0
->inst_c0
- ((guint8
*)code
- cfg
->native_code
)) >> 2;
3209 g_assert (sparc_is_imm22 (disp
));
3210 sparc_branch (code
, 1, sparc_ba
, disp
);
3212 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_LABEL
, ins
->inst_i0
);
3213 sparc_branch (code
, 1, sparc_ba
, 0);
3216 if (ins
->inst_target_bb
->native_offset
) {
3217 gint32 disp
= (ins
->inst_target_bb
->native_offset
- ((guint8
*)code
- cfg
->native_code
)) >> 2;
3218 g_assert (sparc_is_imm22 (disp
));
3219 sparc_branch (code
, 1, sparc_ba
, disp
);
3221 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3222 sparc_branch (code
, 1, sparc_ba
, 0);
3228 sparc_jmp (code
, ins
->sreg1
, sparc_g0
);
3236 if (v64
&& (cfg
->opt
& MONO_OPT_CMOV
)) {
3237 sparc_clr_reg (code
, ins
->dreg
);
3238 sparc_movcc_imm (code
, sparc_xcc
, opcode_to_sparc_cond (ins
->opcode
), 1, ins
->dreg
);
3241 sparc_clr_reg (code
, ins
->dreg
);
3243 sparc_branchp (code
, 1, opcode_to_sparc_cond (ins
->opcode
), DEFAULT_ICC
, 0, 2);
3245 sparc_branch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 2);
3248 sparc_set (code
, 1, ins
->dreg
);
3256 if (v64
&& (cfg
->opt
& MONO_OPT_CMOV
)) {
3257 sparc_clr_reg (code
, ins
->dreg
);
3258 sparc_movcc_imm (code
, sparc_icc
, opcode_to_sparc_cond (ins
->opcode
), 1, ins
->dreg
);
3261 sparc_clr_reg (code
, ins
->dreg
);
3262 sparc_branchp (code
, 1, opcode_to_sparc_cond (ins
->opcode
), sparc_icc_short
, 0, 2);
3264 sparc_set (code
, 1, ins
->dreg
);
3267 case OP_COND_EXC_EQ
:
3268 case OP_COND_EXC_NE_UN
:
3269 case OP_COND_EXC_LT
:
3270 case OP_COND_EXC_LT_UN
:
3271 case OP_COND_EXC_GT
:
3272 case OP_COND_EXC_GT_UN
:
3273 case OP_COND_EXC_GE
:
3274 case OP_COND_EXC_GE_UN
:
3275 case OP_COND_EXC_LE
:
3276 case OP_COND_EXC_LE_UN
:
3277 case OP_COND_EXC_OV
:
3278 case OP_COND_EXC_NO
:
3280 case OP_COND_EXC_NC
:
3281 case OP_COND_EXC_IEQ
:
3282 case OP_COND_EXC_INE_UN
:
3283 case OP_COND_EXC_ILT
:
3284 case OP_COND_EXC_ILT_UN
:
3285 case OP_COND_EXC_IGT
:
3286 case OP_COND_EXC_IGT_UN
:
3287 case OP_COND_EXC_IGE
:
3288 case OP_COND_EXC_IGE_UN
:
3289 case OP_COND_EXC_ILE
:
3290 case OP_COND_EXC_ILE_UN
:
3291 case OP_COND_EXC_IOV
:
3292 case OP_COND_EXC_INO
:
3293 case OP_COND_EXC_IC
:
3294 case OP_COND_EXC_INC
:
3298 EMIT_COND_SYSTEM_EXCEPTION (ins
, opcode_to_sparc_cond (ins
->opcode
), ins
->inst_p1
);
3301 case OP_SPARC_COND_EXC_EQZ
:
3302 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brz
, ins
->inst_p1
);
3304 case OP_SPARC_COND_EXC_GEZ
:
3305 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brgez
, ins
->inst_p1
);
3307 case OP_SPARC_COND_EXC_GTZ
:
3308 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brgz
, ins
->inst_p1
);
3310 case OP_SPARC_COND_EXC_LEZ
:
3311 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brlez
, ins
->inst_p1
);
3313 case OP_SPARC_COND_EXC_LTZ
:
3314 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brlz
, ins
->inst_p1
);
3316 case OP_SPARC_COND_EXC_NEZ
:
3317 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brnz
, ins
->inst_p1
);
3331 EMIT_COND_BRANCH_PREDICTED (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3333 EMIT_COND_BRANCH (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3338 EMIT_COND_BRANCH_BPR (ins
, brz
, 1, 1, 1);
3340 case OP_SPARC_BRLEZ
:
3341 EMIT_COND_BRANCH_BPR (ins
, brlez
, 1, 1, 1);
3344 EMIT_COND_BRANCH_BPR (ins
, brlz
, 1, 1, 1);
3347 EMIT_COND_BRANCH_BPR (ins
, brnz
, 1, 1, 1);
3350 EMIT_COND_BRANCH_BPR (ins
, brgz
, 1, 1, 1);
3352 case OP_SPARC_BRGEZ
:
3353 EMIT_COND_BRANCH_BPR (ins
, brgez
, 1, 1, 1);
3356 /* floating point opcodes */
3358 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
3360 sparc_set_template (code
, sparc_o7
);
3362 sparc_sethi (code
, 0, sparc_o7
);
3364 sparc_lddf_imm (code
, sparc_o7
, 0, ins
->dreg
);
3367 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
3369 sparc_set_template (code
, sparc_o7
);
3371 sparc_sethi (code
, 0, sparc_o7
);
3373 sparc_ldf_imm (code
, sparc_o7
, 0, FP_SCRATCH_REG
);
3375 /* Extend to double */
3376 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3378 case OP_STORER8_MEMBASE_REG
:
3379 if (!sparc_is_imm13 (ins
->inst_offset
+ 4)) {
3380 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
3381 /* SPARCV9 handles misaligned fp loads/stores */
3382 if (!v64
&& (ins
->inst_offset
% 8)) {
3384 sparc_add (code
, FALSE
, ins
->inst_destbasereg
, sparc_o7
, sparc_o7
);
3385 sparc_stf (code
, ins
->sreg1
, sparc_o7
, sparc_g0
);
3386 sparc_stf_imm (code
, ins
->sreg1
+ 1, sparc_o7
, 4);
3388 sparc_stdf (code
, ins
->sreg1
, ins
->inst_destbasereg
, sparc_o7
);
3391 if (!v64
&& (ins
->inst_offset
% 8)) {
3393 sparc_stf_imm (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3394 sparc_stf_imm (code
, ins
->sreg1
+ 1, ins
->inst_destbasereg
, ins
->inst_offset
+ 4);
3396 sparc_stdf_imm (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3399 case OP_LOADR8_MEMBASE
:
3400 EMIT_LOAD_MEMBASE (ins
, lddf
);
3402 case OP_STORER4_MEMBASE_REG
:
3403 /* This requires a double->single conversion */
3404 sparc_fdtos (code
, ins
->sreg1
, FP_SCRATCH_REG
);
3405 if (!sparc_is_imm13 (ins
->inst_offset
)) {
3406 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
3407 sparc_stf (code
, FP_SCRATCH_REG
, ins
->inst_destbasereg
, sparc_o7
);
3410 sparc_stf_imm (code
, FP_SCRATCH_REG
, ins
->inst_destbasereg
, ins
->inst_offset
);
3412 case OP_LOADR4_MEMBASE
: {
3413 /* ldf needs a single precision register */
3414 int dreg
= ins
->dreg
;
3415 ins
->dreg
= FP_SCRATCH_REG
;
3416 EMIT_LOAD_MEMBASE (ins
, ldf
);
3418 /* Extend to double */
3419 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3422 case OP_ICONV_TO_R4
: {
3423 gint32 offset
= cfg
->arch
.float_spill_slot_offset
;
3425 if (!sparc_is_imm13 (offset
)) {
3426 sparc_set (code
, offset
, sparc_o7
);
3427 sparc_stx (code
, ins
->sreg1
, sparc_sp
, offset
);
3428 sparc_lddf (code
, sparc_sp
, offset
, FP_SCRATCH_REG
);
3430 sparc_stx_imm (code
, ins
->sreg1
, sparc_sp
, offset
);
3431 sparc_lddf_imm (code
, sparc_sp
, offset
, FP_SCRATCH_REG
);
3433 sparc_fxtos (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3435 if (!sparc_is_imm13 (offset
)) {
3436 sparc_set (code
, offset
, sparc_o7
);
3437 sparc_st (code
, ins
->sreg1
, sparc_sp
, sparc_o7
);
3438 sparc_ldf (code
, sparc_sp
, sparc_o7
, FP_SCRATCH_REG
);
3440 sparc_st_imm (code
, ins
->sreg1
, sparc_sp
, offset
);
3441 sparc_ldf_imm (code
, sparc_sp
, offset
, FP_SCRATCH_REG
);
3443 sparc_fitos (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3445 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3448 case OP_ICONV_TO_R8
: {
3449 gint32 offset
= cfg
->arch
.float_spill_slot_offset
;
3451 if (!sparc_is_imm13 (offset
)) {
3452 sparc_set (code
, offset
, sparc_o7
);
3453 sparc_stx (code
, ins
->sreg1
, sparc_sp
, sparc_o7
);
3454 sparc_lddf (code
, sparc_sp
, sparc_o7
, FP_SCRATCH_REG
);
3456 sparc_stx_imm (code
, ins
->sreg1
, sparc_sp
, offset
);
3457 sparc_lddf_imm (code
, sparc_sp
, offset
, FP_SCRATCH_REG
);
3459 sparc_fxtod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3461 if (!sparc_is_imm13 (offset
)) {
3462 sparc_set (code
, offset
, sparc_o7
);
3463 sparc_st (code
, ins
->sreg1
, sparc_sp
, sparc_o7
);
3464 sparc_ldf (code
, sparc_sp
, sparc_o7
, FP_SCRATCH_REG
);
3466 sparc_st_imm (code
, ins
->sreg1
, sparc_sp
, offset
);
3467 sparc_ldf_imm (code
, sparc_sp
, offset
, FP_SCRATCH_REG
);
3469 sparc_fitod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3473 case OP_FCONV_TO_I1
:
3474 case OP_FCONV_TO_U1
:
3475 case OP_FCONV_TO_I2
:
3476 case OP_FCONV_TO_U2
:
3481 case OP_FCONV_TO_I4
:
3482 case OP_FCONV_TO_U4
: {
3483 gint32 offset
= cfg
->arch
.float_spill_slot_offset
;
3484 sparc_fdtoi (code
, ins
->sreg1
, FP_SCRATCH_REG
);
3485 if (!sparc_is_imm13 (offset
)) {
3486 sparc_set (code
, offset
, sparc_o7
);
3487 sparc_stdf (code
, FP_SCRATCH_REG
, sparc_sp
, sparc_o7
);
3488 sparc_ld (code
, sparc_sp
, sparc_o7
, ins
->dreg
);
3490 sparc_stdf_imm (code
, FP_SCRATCH_REG
, sparc_sp
, offset
);
3491 sparc_ld_imm (code
, sparc_sp
, offset
, ins
->dreg
);
3494 switch (ins
->opcode
) {
3495 case OP_FCONV_TO_I1
:
3496 case OP_FCONV_TO_U1
:
3497 sparc_and_imm (code
, 0, ins
->dreg
, 0xff, ins
->dreg
);
3499 case OP_FCONV_TO_I2
:
3500 case OP_FCONV_TO_U2
:
3501 sparc_set (code
, 0xffff, sparc_o7
);
3502 sparc_and (code
, 0, ins
->dreg
, sparc_o7
, ins
->dreg
);
3509 case OP_FCONV_TO_I8
:
3510 case OP_FCONV_TO_U8
:
3512 g_assert_not_reached ();
3514 case OP_FCONV_TO_R4
:
3515 /* FIXME: Change precision ? */
3517 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3519 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3520 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3523 case OP_LCONV_TO_R_UN
: {
3525 g_assert_not_reached ();
3528 case OP_LCONV_TO_OVF_I
:
3529 case OP_LCONV_TO_OVF_I4_2
: {
3530 guint32
*br
[3], *label
[1];
3533 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3535 sparc_cmp_imm (code
, ins
->sreg1
, 0);
3537 sparc_branch (code
, 1, sparc_bneg
, 0);
3541 /* ms word must be 0 */
3542 sparc_cmp_imm (code
, ins
->sreg2
, 0);
3544 sparc_branch (code
, 1, sparc_be
, 0);
3549 EMIT_COND_SYSTEM_EXCEPTION (ins
, sparc_ba
, "OverflowException");
3552 sparc_patch (br
[0], code
);
3554 /* ms word must 0xfffffff */
3555 sparc_cmp_imm (code
, ins
->sreg2
, -1);
3557 sparc_branch (code
, 1, sparc_bne
, 0);
3559 sparc_patch (br
[2], label
[0]);
3562 sparc_patch (br
[1], code
);
3563 if (ins
->sreg1
!= ins
->dreg
)
3564 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
3568 sparc_faddd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3571 sparc_fsubd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3574 sparc_fmuld (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3577 sparc_fdivd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3581 sparc_fnegd (code
, ins
->sreg1
, ins
->dreg
);
3583 /* FIXME: why don't use fnegd ? */
3584 sparc_fnegs (code
, ins
->sreg1
, ins
->dreg
);
3588 sparc_fdivd (code
, ins
->sreg1
, ins
->sreg2
, FP_SCRATCH_REG
);
3589 sparc_fmuld (code
, ins
->sreg2
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3590 sparc_fsubd (code
, ins
->sreg1
, FP_SCRATCH_REG
, ins
->dreg
);
3593 sparc_fcmpd (code
, ins
->sreg1
, ins
->sreg2
);
3600 sparc_fcmpd (code
, ins
->sreg1
, ins
->sreg2
);
3601 sparc_clr_reg (code
, ins
->dreg
);
3602 switch (ins
->opcode
) {
3605 sparc_fbranch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 4);
3607 sparc_set (code
, 1, ins
->dreg
);
3608 sparc_fbranch (code
, 1, sparc_fbu
, 2);
3610 sparc_set (code
, 1, ins
->dreg
);
3613 sparc_fbranch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 2);
3615 sparc_set (code
, 1, ins
->dreg
);
3621 EMIT_FLOAT_COND_BRANCH (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3624 /* clt.un + brfalse */
3626 sparc_fbranch (code
, 1, sparc_fbul
, 0);
3629 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fba
, 1, 1);
3630 sparc_patch (p
, (guint8
*)code
);
3634 /* cgt.un + brfalse */
3636 sparc_fbranch (code
, 1, sparc_fbug
, 0);
3639 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fba
, 1, 1);
3640 sparc_patch (p
, (guint8
*)code
);
3644 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbne
, 1, 1);
3645 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3648 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbl
, 1, 1);
3649 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3652 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbg
, 1, 1);
3653 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3656 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbge
, 1, 1);
3657 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3660 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fble
, 1, 1);
3661 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3664 gint32 offset
= cfg
->arch
.float_spill_slot_offset
;
3665 if (!sparc_is_imm13 (offset
)) {
3666 sparc_set (code
, offset
, sparc_o7
);
3667 sparc_stdf (code
, ins
->sreg1
, sparc_sp
, sparc_o7
);
3668 sparc_lduh (code
, sparc_sp
, sparc_o7
, sparc_o7
);
3670 sparc_stdf_imm (code
, ins
->sreg1
, sparc_sp
, offset
);
3671 sparc_lduh_imm (code
, sparc_sp
, offset
, sparc_o7
);
3673 sparc_srl_imm (code
, sparc_o7
, 4, sparc_o7
);
3674 sparc_and_imm (code
, FALSE
, sparc_o7
, 2047, sparc_o7
);
3675 sparc_cmp_imm (code
, sparc_o7
, 2047);
3676 EMIT_COND_SYSTEM_EXCEPTION (ins
, sparc_be
, "ArithmeticException");
3678 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3680 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3681 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3686 case OP_MEMORY_BARRIER
:
3687 sparc_membar (code
, sparc_membar_all
);
3692 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
3694 g_warning ("%s:%d: unknown opcode %s\n", __FILE__
, __LINE__
, mono_inst_name (ins
->opcode
));
3696 g_assert_not_reached ();
3699 if ((((guint8
*)code
) - code_start
) > max_len
) {
3700 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3701 mono_inst_name (ins
->opcode
), max_len
, ((guint8
*)code
) - code_start
);
3702 g_assert_not_reached ();
3710 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
3714 mono_arch_register_lowlevel_calls (void)
3716 mono_register_jit_icall (mono_arch_get_lmf_addr
, "mono_arch_get_lmf_addr", NULL
, TRUE
);
3720 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
3722 MonoJumpInfo
*patch_info
;
3724 /* FIXME: Move part of this to arch independent code */
3725 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
3726 unsigned char *ip
= patch_info
->ip
.i
+ code
;
3729 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
3731 switch (patch_info
->type
) {
3732 case MONO_PATCH_INFO_NONE
:
3734 case MONO_PATCH_INFO_CLASS_INIT
: {
3735 guint32
*ip2
= (guint32
*)ip
;
3736 /* Might already been changed to a nop */
3738 sparc_set_template (ip2
, sparc_o7
);
3739 sparc_jmpl (ip2
, sparc_o7
, sparc_g0
, sparc_o7
);
3741 sparc_call_simple (ip2
, 0);
3745 case MONO_PATCH_INFO_METHOD_JUMP
: {
3746 guint32
*ip2
= (guint32
*)ip
;
3747 /* Might already been patched */
3748 sparc_set_template (ip2
, sparc_o7
);
3754 sparc_patch ((guint32
*)ip
, target
);
3759 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
3762 guint32
*code
= (guint32
*)p
;
3763 MonoMethodSignature
*sig
= mono_method_signature (cfg
->method
);
3766 /* Save registers to stack */
3767 for (i
= 0; i
< 6; ++i
)
3768 sparc_sti_imm (code
, sparc_i0
+ i
, sparc_fp
, ARGS_OFFSET
+ (i
* sizeof (gpointer
)));
3770 cinfo
= get_call_info (cfg
, sig
, FALSE
);
3772 /* Save float regs on V9, since they are caller saved */
3773 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3774 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3775 gint32 stack_offset
;
3777 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3779 if (ainfo
->storage
== ArgInFloatReg
) {
3780 if (!sparc_is_imm13 (stack_offset
))
3782 sparc_stf_imm (code
, ainfo
->reg
, sparc_fp
, stack_offset
);
3784 else if (ainfo
->storage
== ArgInDoubleReg
) {
3785 /* The offset is guaranteed to be aligned by the ABI rules */
3786 sparc_stdf_imm (code
, ainfo
->reg
, sparc_fp
, stack_offset
);
3790 sparc_set (code
, cfg
->method
, sparc_o0
);
3791 sparc_add_imm (code
, FALSE
, sparc_fp
, MONO_SPARC_STACK_BIAS
, sparc_o1
);
3793 mono_add_patch_info (cfg
, (guint8
*)code
-cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
3796 /* Restore float regs on V9 */
3797 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3798 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3799 gint32 stack_offset
;
3801 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3803 if (ainfo
->storage
== ArgInFloatReg
) {
3804 if (!sparc_is_imm13 (stack_offset
))
3806 sparc_ldf_imm (code
, sparc_fp
, stack_offset
, ainfo
->reg
);
3808 else if (ainfo
->storage
== ArgInDoubleReg
) {
3809 /* The offset is guaranteed to be aligned by the ABI rules */
3810 sparc_lddf_imm (code
, sparc_fp
, stack_offset
, ainfo
->reg
);
3828 mono_arch_instrument_epilog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
3830 guint32
*code
= (guint32
*)p
;
3831 int save_mode
= SAVE_NONE
;
3832 MonoMethod
*method
= cfg
->method
;
3834 switch (mono_type_get_underlying_type (mono_method_signature (method
)->ret
)->type
) {
3835 case MONO_TYPE_VOID
:
3836 /* special case string .ctor icall */
3837 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
3838 save_mode
= SAVE_ONE
;
3840 save_mode
= SAVE_NONE
;
3845 save_mode
= SAVE_ONE
;
3847 save_mode
= SAVE_TWO
;
3852 save_mode
= SAVE_FP
;
3854 case MONO_TYPE_VALUETYPE
:
3855 save_mode
= SAVE_STRUCT
;
3858 save_mode
= SAVE_ONE
;
3862 /* Save the result to the stack and also put it into the output registers */
3864 switch (save_mode
) {
3867 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 68);
3868 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 72);
3869 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3870 sparc_mov_reg_reg (code
, sparc_i1
, sparc_o2
);
3873 sparc_sti_imm (code
, sparc_i0
, sparc_fp
, ARGS_OFFSET
);
3874 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3878 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, ARGS_OFFSET
);
3880 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, 72);
3881 sparc_ld_imm (code
, sparc_fp
, 72, sparc_o1
);
3882 sparc_ld_imm (code
, sparc_fp
, 72 + 4, sparc_o2
);
3887 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3889 sparc_ld_imm (code
, sparc_fp
, 64, sparc_o1
);
3897 sparc_set (code
, cfg
->method
, sparc_o0
);
3899 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
3902 /* Restore result */
3904 switch (save_mode
) {
3906 sparc_ld_imm (code
, sparc_fp
, 68, sparc_i0
);
3907 sparc_ld_imm (code
, sparc_fp
, 72, sparc_i0
);
3910 sparc_ldi_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_i0
);
3913 sparc_lddf_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_f0
);
3924 mono_arch_emit_prolog (MonoCompile
*cfg
)
3926 MonoMethod
*method
= cfg
->method
;
3927 MonoMethodSignature
*sig
;
3933 cfg
->code_size
= 256;
3934 cfg
->native_code
= g_malloc (cfg
->code_size
);
3935 code
= (guint32
*)cfg
->native_code
;
3937 /* FIXME: Generate intermediate code instead */
3939 offset
= cfg
->stack_offset
;
3940 offset
+= (16 * sizeof (gpointer
)); /* register save area */
3942 offset
+= 4; /* struct/union return pointer */
3945 /* add parameter area size for called functions */
3946 if (cfg
->param_area
< (6 * sizeof (gpointer
)))
3947 /* Reserve space for the first 6 arguments even if it is unused */
3948 offset
+= 6 * sizeof (gpointer
);
3950 offset
+= cfg
->param_area
;
3952 /* align the stack size */
3953 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3956 * localloc'd memory is stored between the local variables (whose
3957 * size is given by cfg->stack_offset), and between the space reserved
3960 cfg
->arch
.localloc_offset
= offset
- cfg
->stack_offset
;
3962 cfg
->stack_offset
= offset
;
3964 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3965 /* Perform stack touching */
3969 if (!sparc_is_imm13 (- cfg
->stack_offset
)) {
3970 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3971 sparc_set (code
, (- cfg
->stack_offset
), GP_SCRATCH_REG
);
3972 sparc_save (code
, sparc_sp
, GP_SCRATCH_REG
, sparc_sp
);
3975 sparc_save_imm (code
, sparc_sp
, - cfg
->stack_offset
, sparc_sp
);
3978 if (strstr (cfg->method->name, "foo")) {
3979 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3980 sparc_call_simple (code, 0);
3985 sig
= mono_method_signature (method
);
3987 cinfo
= get_call_info (cfg
, sig
, FALSE
);
3989 /* Keep in sync with emit_load_volatile_arguments */
3990 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3991 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3992 gint32 stack_offset
;
3994 inst
= cfg
->args
[i
];
3996 if (sig
->hasthis
&& (i
== 0))
3997 arg_type
= &mono_defaults
.object_class
->byval_arg
;
3999 arg_type
= sig
->params
[i
- sig
->hasthis
];
4001 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
4003 /* Save the split arguments so they will reside entirely on the stack */
4004 if (ainfo
->storage
== ArgInSplitRegStack
) {
4005 /* Save the register to the stack */
4006 g_assert (inst
->opcode
== OP_REGOFFSET
);
4007 if (!sparc_is_imm13 (stack_offset
))
4009 sparc_st_imm (code
, sparc_i5
, inst
->inst_basereg
, stack_offset
);
4012 if (!v64
&& !arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
4013 /* Save the argument to a dword aligned stack location */
4015 * stack_offset contains the offset of the argument on the stack.
4016 * inst->inst_offset contains the dword aligned offset where the value
4019 if (ainfo
->storage
== ArgInIRegPair
) {
4020 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
4022 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4023 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4026 if (ainfo
->storage
== ArgInSplitRegStack
) {
4028 g_assert_not_reached ();
4030 if (stack_offset
!= inst
->inst_offset
) {
4031 /* stack_offset is not dword aligned, so we need to make a copy */
4032 sparc_st_imm (code
, sparc_i5
, inst
->inst_basereg
, inst
->inst_offset
);
4033 sparc_ld_imm (code
, sparc_fp
, stack_offset
+ 4, sparc_o7
);
4034 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4038 if (ainfo
->storage
== ArgOnStackPair
) {
4040 g_assert_not_reached ();
4042 if (stack_offset
!= inst
->inst_offset
) {
4043 /* stack_offset is not dword aligned, so we need to make a copy */
4044 sparc_ld_imm (code
, sparc_fp
, stack_offset
, sparc_o7
);
4045 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
);
4046 sparc_ld_imm (code
, sparc_fp
, stack_offset
+ 4, sparc_o7
);
4047 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4051 g_assert_not_reached ();
4054 if ((ainfo
->storage
== ArgInIReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4055 /* Argument in register, but need to be saved to stack */
4056 if (!sparc_is_imm13 (stack_offset
))
4058 if ((stack_offset
- ARGS_OFFSET
) & 0x1)
4059 sparc_stb_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4061 if ((stack_offset
- ARGS_OFFSET
) & 0x2)
4062 sparc_sth_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4064 if ((stack_offset
- ARGS_OFFSET
) & 0x4)
4065 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4068 sparc_stx_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4070 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4074 if ((ainfo
->storage
== ArgInIRegPair
) && (inst
->opcode
!= OP_REGVAR
)) {
4078 /* Argument in regpair, but need to be saved to stack */
4079 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
4081 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4082 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4084 else if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4085 if (!sparc_is_imm13 (stack_offset
))
4087 sparc_stf_imm (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4089 else if ((ainfo
->storage
== ArgInDoubleReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4090 /* The offset is guaranteed to be aligned by the ABI rules */
4091 sparc_stdf_imm (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4094 if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
== OP_REGVAR
)) {
4095 /* Need to move into the a double precision register */
4096 sparc_fstod (code
, ainfo
->reg
, ainfo
->reg
- 1);
4099 if ((ainfo
->storage
== ArgInSplitRegStack
) || (ainfo
->storage
== ArgOnStack
))
4100 if (inst
->opcode
== OP_REGVAR
)
4101 /* FIXME: Load the argument into memory */
4107 if (cfg
->method
->save_lmf
) {
4108 gint32 lmf_offset
= STACK_BIAS
- cfg
->arch
.lmf_offset
;
4111 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
4112 sparc_set_template (code
, sparc_o7
);
4113 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ip
));
4115 sparc_sti_imm (code
, sparc_sp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, sp
));
4117 sparc_sti_imm (code
, sparc_fp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebp
));
4119 /* FIXME: add a relocation for this */
4120 sparc_set (code
, cfg
->method
, sparc_o7
);
4121 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
));
4123 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4124 (gpointer
)"mono_arch_get_lmf_addr");
4127 code
= (guint32
*)mono_sparc_emit_save_lmf (code
, lmf_offset
);
4130 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4131 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
4133 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
4135 g_assert (cfg
->code_len
<= cfg
->code_size
);
4137 return (guint8
*)code
;
4141 mono_arch_emit_epilog (MonoCompile
*cfg
)
4143 MonoMethod
*method
= cfg
->method
;
4146 int max_epilog_size
= 16 + 20 * 4;
4148 if (cfg
->method
->save_lmf
)
4149 max_epilog_size
+= 128;
4151 if (mono_jit_trace_calls
!= NULL
)
4152 max_epilog_size
+= 50;
4154 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
4155 max_epilog_size
+= 50;
4157 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4158 cfg
->code_size
*= 2;
4159 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4160 mono_jit_stats
.code_reallocs
++;
4163 code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
4165 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4166 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
4168 if (cfg
->method
->save_lmf
) {
4169 gint32 lmf_offset
= STACK_BIAS
- cfg
->arch
.lmf_offset
;
4171 code
= mono_sparc_emit_restore_lmf (code
, lmf_offset
);
4175 * The V8 ABI requires that calls to functions which return a structure
4178 if (!v64
&& mono_method_signature (cfg
->method
)->pinvoke
&& MONO_TYPE_ISSTRUCT(mono_method_signature (cfg
->method
)->ret
))
4179 sparc_jmpl_imm (code
, sparc_i7
, 12, sparc_g0
);
4183 /* Only fold last instruction into the restore if the exit block has an in count of 1
4184 and the previous block hasn't been optimized away since it may have an in count > 1 */
4185 if (cfg
->bb_exit
->in_count
== 1 && cfg
->bb_exit
->in_bb
[0]->native_offset
!= cfg
->bb_exit
->native_offset
)
4189 * FIXME: The last instruction might have a branch pointing into it like in
4190 * int_ceq sparc_i0 <-
4194 /* Try folding last instruction into the restore */
4195 if (can_fold
&& (sparc_inst_op (code
[-2]) == 0x2) && (sparc_inst_op3 (code
[-2]) == 0x2) && sparc_inst_imm (code
[-2]) && (sparc_inst_rd (code
[-2]) == sparc_i0
)) {
4196 /* or reg, imm, %i0 */
4197 int reg
= sparc_inst_rs1 (code
[-2]);
4198 int imm
= (((gint32
)(sparc_inst_imm13 (code
[-2]))) << 19) >> 19;
4199 code
[-2] = code
[-1];
4201 sparc_restore_imm (code
, reg
, imm
, sparc_o0
);
4204 if (can_fold
&& (sparc_inst_op (code
[-2]) == 0x2) && (sparc_inst_op3 (code
[-2]) == 0x2) && (!sparc_inst_imm (code
[-2])) && (sparc_inst_rd (code
[-2]) == sparc_i0
)) {
4205 /* or reg, reg, %i0 */
4206 int reg1
= sparc_inst_rs1 (code
[-2]);
4207 int reg2
= sparc_inst_rs2 (code
[-2]);
4208 code
[-2] = code
[-1];
4210 sparc_restore (code
, reg1
, reg2
, sparc_o0
);
4213 sparc_restore_imm (code
, sparc_g0
, 0, sparc_g0
);
4215 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
4217 g_assert (cfg
->code_len
< cfg
->code_size
);
4222 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4224 MonoJumpInfo
*patch_info
;
4229 MonoClass
*exc_classes
[16];
4230 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
4232 /* Compute needed space */
4233 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4234 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
4239 * make sure we have enough space for exceptions
4242 code_size
= exc_count
* (20 * 4);
4244 code_size
= exc_count
* 24;
4247 while (cfg
->code_len
+ code_size
> (cfg
->code_size
- 16)) {
4248 cfg
->code_size
*= 2;
4249 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4250 mono_jit_stats
.code_reallocs
++;
4253 code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
4255 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4256 switch (patch_info
->type
) {
4257 case MONO_PATCH_INFO_EXC
: {
4258 MonoClass
*exc_class
;
4259 guint32
*buf
, *buf2
;
4260 guint32 throw_ip
, type_idx
;
4263 sparc_patch ((guint32
*)(cfg
->native_code
+ patch_info
->ip
.i
), code
);
4265 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4266 g_assert (exc_class
);
4267 type_idx
= exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
;
4268 throw_ip
= patch_info
->ip
.i
;
4270 /* Find a throw sequence for the same exception class */
4271 for (i
= 0; i
< nthrows
; ++i
)
4272 if (exc_classes
[i
] == exc_class
)
4276 guint32 throw_offset
= (((guint8
*)exc_throw_end
[i
] - cfg
->native_code
) - throw_ip
) >> 2;
4277 if (!sparc_is_imm13 (throw_offset
))
4278 sparc_set32 (code
, throw_offset
, sparc_o1
);
4280 disp
= (exc_throw_start
[i
] - (guint8
*)code
) >> 2;
4281 g_assert (sparc_is_imm22 (disp
));
4282 sparc_branch (code
, 0, sparc_ba
, disp
);
4283 if (sparc_is_imm13 (throw_offset
))
4284 sparc_set32 (code
, throw_offset
, sparc_o1
);
4287 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4290 /* Emit the template for setting o1 */
4292 if (sparc_is_imm13 (((((guint8
*)code
- cfg
->native_code
) - throw_ip
) >> 2) - 8))
4293 /* Can use a short form */
4296 sparc_set_template (code
, sparc_o1
);
4300 exc_classes
[nthrows
] = exc_class
;
4301 exc_throw_start
[nthrows
] = (guint8
*)code
;
4305 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4309 /* first arg = type token */
4310 /* Pass the type index to reduce the size of the sparc_set */
4311 if (!sparc_is_imm13 (type_idx
))
4312 sparc_set32 (code
, type_idx
, sparc_o0
);
4314 /* second arg = offset between the throw ip and the current ip */
4315 /* On sparc, the saved ip points to the call instruction */
4316 disp
= (((guint8
*)code
- cfg
->native_code
) - throw_ip
) >> 2;
4317 sparc_set32 (buf
, disp
, sparc_o1
);
4322 exc_throw_end
[nthrows
] = (guint8
*)code
;
4326 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
4327 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4328 patch_info
->ip
.i
= (guint8
*)code
- cfg
->native_code
;
4332 if (sparc_is_imm13 (type_idx
)) {
4333 /* Put it into the delay slot */
4336 sparc_set32 (code
, type_idx
, sparc_o0
);
4337 g_assert (code
- buf
== 1);
4348 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
4350 g_assert (cfg
->code_len
< cfg
->code_size
);
4354 gboolean lmf_addr_key_inited
= FALSE
;
4356 #ifdef MONO_SPARC_THR_TLS
4357 thread_key_t lmf_addr_key
;
4359 pthread_key_t lmf_addr_key
;
4363 mono_arch_get_lmf_addr (void)
4365 /* This is perf critical so we bypass the IO layer */
4366 /* The thr_... functions seem to be somewhat faster */
4367 #ifdef MONO_SPARC_THR_TLS
4369 thr_getspecific (lmf_addr_key
, &res
);
4372 return pthread_getspecific (lmf_addr_key
);
4376 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4379 * There seems to be no way to determine stack boundaries under solaris,
4380 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4383 #error "--with-sigaltstack=yes not supported on solaris"
4388 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
4390 if (!lmf_addr_key_inited
) {
4393 lmf_addr_key_inited
= TRUE
;
4395 #ifdef MONO_SPARC_THR_TLS
4396 res
= thr_keycreate (&lmf_addr_key
, NULL
);
4398 res
= pthread_key_create (&lmf_addr_key
, NULL
);
4400 g_assert (res
== 0);
4404 #ifdef MONO_SPARC_THR_TLS
4405 thr_setspecific (lmf_addr_key
, &tls
->lmf
);
4407 pthread_setspecific (lmf_addr_key
, &tls
->lmf
);
4412 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
4417 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4419 MonoInst
*ins
= NULL
;
4425 * mono_arch_get_argument_info:
4426 * @csig: a method signature
4427 * @param_count: the number of parameters to consider
4428 * @arg_info: an array to store the result infos
4430 * Gathers information on parameters such as size, alignment and
4431 * padding. arg_info should be large enought to hold param_count + 1 entries.
4433 * Returns the size of the activation frame.
4436 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
4442 cinfo
= get_call_info (NULL
, csig
, FALSE
);
4444 if (csig
->hasthis
) {
4445 ainfo
= &cinfo
->args
[0];
4446 arg_info
[0].offset
= ARGS_OFFSET
- MONO_SPARC_STACK_BIAS
+ ainfo
->offset
;
4449 for (k
= 0; k
< param_count
; k
++) {
4450 ainfo
= &cinfo
->args
[k
+ csig
->hasthis
];
4452 arg_info
[k
+ 1].offset
= ARGS_OFFSET
- MONO_SPARC_STACK_BIAS
+ ainfo
->offset
;
4453 arg_info
[k
+ 1].size
= mono_type_size (csig
->params
[k
], &align
);
4462 mono_arch_print_tree (MonoInst
*tree
, int arity
)
4467 MonoInst
* mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
4472 MonoInst
* mono_arch_get_thread_intrinsic (MonoCompile
* cfg
)
4478 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
4480 /* FIXME: implement */
4481 g_assert_not_reached ();