2 * mini-ia64.c: IA64 backend for the Mono code generator
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2003 Ximian, Inc.
15 #ifdef __INTEL_COMPILER
16 #include <ia64intrin.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/utils/mono-math.h>
26 #include "mini-ia64.h"
28 #include "jit-icalls.h"
31 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
33 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
36 * IA64 register usage:
37 * - local registers are used for global register allocation
38 * - r8..r11, r14..r30 is used for local register allocation
39 * - r31 is a scratch register used within opcode implementations
40 * - FIXME: Use out registers as well
41 * - the first three locals are used for saving ar.pfst, b0, and sp
42 * - compare instructions allways set p6 and p7
46 * There are a lot of places where generated code is disassembled/patched.
47 * The automatic bundling of instructions done by the code generation macros
48 * could complicate things, so it is best to call
49 * ia64_codegen_set_one_ins_per_bundle () at those places.
52 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG 31
55 #define GP_SCRATCH_REG2 30
56 #define FP_SCRATCH_REG 32
57 #define FP_SCRATCH_REG2 33
59 #define LOOP_ALIGNMENT 8
60 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
62 static const char* gregs
[] = {
63 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
64 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
65 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",
66 "r30", "r31", "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
67 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49",
68 "r50", "r51", "r52", "r53", "r54", "r55", "r56", "r57", "r58", "r59",
69 "r60", "r61", "r62", "r63", "r64", "r65", "r66", "r67", "r68", "r69",
70 "r70", "r71", "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", "r88", "r89",
72 "r90", "r91", "r92", "r93", "r94", "r95", "r96", "r97", "r98", "r99",
73 "r100", "r101", "r102", "r103", "r104", "r105", "r106", "r107", "r108", "r109",
74 "r110", "r111", "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
75 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127"
79 mono_arch_regname (int reg
)
87 static const char* fregs
[] = {
88 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
89 "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
90 "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
91 "f30", "f31", "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
92 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47", "f48", "f49",
93 "f50", "f51", "f52", "f53", "f54", "f55", "f56", "f57", "f58", "f59",
94 "f60", "f61", "f62", "f63", "f64", "f65", "f66", "f67", "f68", "f69",
95 "f70", "f71", "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
96 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87", "f88", "f89",
97 "f90", "f91", "f92", "f93", "f94", "f95", "f96", "f97", "f98", "f99",
98 "f100", "f101", "f102", "f103", "f104", "f105", "f106", "f107", "f108", "f109",
99 "f110", "f111", "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
100 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127"
104 mono_arch_fregname (int reg
)
112 G_GNUC_UNUSED
static void
117 G_GNUC_UNUSED
static gboolean
120 static int count
= 0;
123 if (count
== atoi (getenv ("COUNT"))) {
127 if (count
> atoi (getenv ("COUNT"))) {
135 debug_ins_sched (void)
138 return debug_count ();
148 return debug_count ();
155 ia64_patch (unsigned char* code
, gpointer target
);
162 ArgValuetypeAddrInIReg
,
180 /* Only if storage == ArgAggregate */
190 gboolean need_stack_align
;
196 #define DEBUG(a) if (cfg->verbose_level > 1) a
201 add_general (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
)
203 ainfo
->offset
= *stack_size
;
205 if (*gr
>= PARAM_REGS
) {
206 ainfo
->storage
= ArgOnStack
;
207 (*stack_size
) += sizeof (gpointer
);
210 ainfo
->storage
= ArgInIReg
;
216 #define FLOAT_PARAM_REGS 8
219 add_float (guint32
*gr
, guint32
*fr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
)
221 ainfo
->offset
= *stack_size
;
223 if (*gr
>= PARAM_REGS
) {
224 ainfo
->storage
= ArgOnStack
;
225 (*stack_size
) += sizeof (gpointer
);
228 ainfo
->storage
= is_double
? ArgInFloatReg
: ArgInFloatRegR4
;
229 ainfo
->reg
= 8 + *fr
;
236 add_valuetype (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, ArgInfo
*ainfo
, MonoType
*type
,
238 guint32
*gr
, guint32
*fr
, guint32
*stack_size
)
242 MonoMarshalType
*info
;
243 gboolean is_hfa
= TRUE
;
244 guint32 hfa_type
= 0;
246 klass
= mono_class_from_mono_type (type
);
247 if (type
->type
== MONO_TYPE_TYPEDBYREF
)
248 size
= 3 * sizeof (gpointer
);
249 else if (sig
->pinvoke
)
250 size
= mono_type_native_stack_size (&klass
->byval_arg
, NULL
);
252 size
= mini_type_stack_size (gsctx
, &klass
->byval_arg
, NULL
);
254 if (!sig
->pinvoke
|| (size
== 0)) {
255 /* Allways pass in memory */
256 ainfo
->offset
= *stack_size
;
257 *stack_size
+= ALIGN_TO (size
, 8);
258 ainfo
->storage
= ArgOnStack
;
263 /* Determine whenever it is a HFA (Homogeneous Floating Point Aggregate) */
264 info
= mono_marshal_load_type_info (klass
);
266 for (i
= 0; i
< info
->num_fields
; ++i
) {
267 guint32 ftype
= info
->fields
[i
].field
->type
->type
;
268 if (!(info
->fields
[i
].field
->type
->byref
) &&
269 ((ftype
== MONO_TYPE_R4
) || (ftype
== MONO_TYPE_R8
))) {
272 else if (hfa_type
!= ftype
)
281 ainfo
->storage
= ArgAggregate
;
282 ainfo
->atype
= AggregateNormal
;
285 ainfo
->atype
= hfa_type
== MONO_TYPE_R4
? AggregateSingleHFA
: AggregateDoubleHFA
;
287 if (info
->num_fields
<= 8) {
289 ainfo
->nregs
= info
->num_fields
;
290 ainfo
->nslots
= ainfo
->nregs
;
296 if ((*fr
) + info
->num_fields
> 8)
299 ainfo
->reg
= 8 + (*fr
);
300 ainfo
->nregs
= info
->num_fields
;
301 ainfo
->nslots
= ainfo
->nregs
;
302 (*fr
) += info
->num_fields
;
303 if (ainfo
->atype
== AggregateSingleHFA
) {
305 * FIXME: Have to keep track of the parameter slot number, which is
306 * not the same as *gr.
308 (*gr
) += ALIGN_TO (info
->num_fields
, 2) / 2;
310 (*gr
) += info
->num_fields
;
316 /* This also handles returning of TypedByRef used by some icalls */
319 ainfo
->reg
= IA64_R8
;
320 ainfo
->nregs
= (size
+ 7) / 8;
321 ainfo
->nslots
= ainfo
->nregs
;
328 ainfo
->offset
= *stack_size
;
329 ainfo
->nslots
= (size
+ 7) / 8;
331 if (((*gr
) + ainfo
->nslots
) <= 8) {
332 /* Fits entirely in registers */
333 ainfo
->nregs
= ainfo
->nslots
;
334 (*gr
) += ainfo
->nregs
;
338 ainfo
->nregs
= 8 - (*gr
);
340 (*stack_size
) += (ainfo
->nslots
- ainfo
->nregs
) * 8;
346 * Obtain information about a call according to the calling convention.
347 * For IA64, see the "Itanium Software Conventions and Runtime Architecture
348 * Gude" document for more information.
351 get_call_info (MonoCompile
*cfg
, MonoMemPool
*mp
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
355 int n
= sig
->hasthis
+ sig
->param_count
;
356 guint32 stack_size
= 0;
358 MonoGenericSharingContext
*gsctx
= cfg
? cfg
->generic_sharing_context
: NULL
;
361 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
363 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
370 ret_type
= mono_type_get_underlying_type (sig
->ret
);
371 ret_type
= mini_get_basic_type_from_generic (gsctx
, ret_type
);
372 switch (ret_type
->type
) {
373 case MONO_TYPE_BOOLEAN
:
384 case MONO_TYPE_FNPTR
:
385 case MONO_TYPE_CLASS
:
386 case MONO_TYPE_OBJECT
:
387 case MONO_TYPE_SZARRAY
:
388 case MONO_TYPE_ARRAY
:
389 case MONO_TYPE_STRING
:
390 cinfo
->ret
.storage
= ArgInIReg
;
391 cinfo
->ret
.reg
= IA64_R8
;
395 cinfo
->ret
.storage
= ArgInIReg
;
396 cinfo
->ret
.reg
= IA64_R8
;
400 cinfo
->ret
.storage
= ArgInFloatReg
;
403 case MONO_TYPE_GENERICINST
:
404 if (!mono_type_generic_inst_is_valuetype (sig
->ret
)) {
405 cinfo
->ret
.storage
= ArgInIReg
;
406 cinfo
->ret
.reg
= IA64_R8
;
410 case MONO_TYPE_VALUETYPE
:
411 case MONO_TYPE_TYPEDBYREF
: {
412 guint32 tmp_gr
= 0, tmp_fr
= 0, tmp_stacksize
= 0;
414 if (sig
->ret
->byref
) {
415 /* This seems to happen with ldfld wrappers */
416 cinfo
->ret
.storage
= ArgInIReg
;
418 add_valuetype (gsctx
, sig
, &cinfo
->ret
, sig
->ret
, TRUE
, &tmp_gr
, &tmp_fr
, &tmp_stacksize
);
419 if (cinfo
->ret
.storage
== ArgOnStack
)
420 /* The caller passes the address where the value is stored */
421 add_general (&gr
, &stack_size
, &cinfo
->ret
);
422 if (cinfo
->ret
.storage
== ArgInIReg
)
423 cinfo
->ret
.storage
= ArgValuetypeAddrInIReg
;
428 cinfo
->ret
.storage
= ArgNone
;
431 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
437 add_general (&gr
, &stack_size
, cinfo
->args
+ 0);
439 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
441 fr
= FLOAT_PARAM_REGS
;
443 /* Emit the signature cookie just before the implicit arguments */
444 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
447 for (i
= 0; i
< sig
->param_count
; ++i
) {
448 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
451 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
452 /* We allways pass the sig cookie on the stack for simplicity */
454 * Prevent implicit arguments + the sig cookie from being passed
458 fr
= FLOAT_PARAM_REGS
;
460 /* Emit the signature cookie just before the implicit arguments */
461 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
464 if (sig
->params
[i
]->byref
) {
465 add_general (&gr
, &stack_size
, ainfo
);
468 ptype
= mono_type_get_underlying_type (sig
->params
[i
]);
469 ptype
= mini_get_basic_type_from_generic (gsctx
, ptype
);
470 switch (ptype
->type
) {
471 case MONO_TYPE_BOOLEAN
:
474 add_general (&gr
, &stack_size
, ainfo
);
479 add_general (&gr
, &stack_size
, ainfo
);
483 add_general (&gr
, &stack_size
, ainfo
);
488 case MONO_TYPE_FNPTR
:
489 case MONO_TYPE_CLASS
:
490 case MONO_TYPE_OBJECT
:
491 case MONO_TYPE_STRING
:
492 case MONO_TYPE_SZARRAY
:
493 case MONO_TYPE_ARRAY
:
494 add_general (&gr
, &stack_size
, ainfo
);
496 case MONO_TYPE_GENERICINST
:
497 if (!mono_type_generic_inst_is_valuetype (sig
->params
[i
])) {
498 add_general (&gr
, &stack_size
, ainfo
);
502 case MONO_TYPE_VALUETYPE
:
503 case MONO_TYPE_TYPEDBYREF
:
505 /* We allways pass valuetypes on the stack */
506 add_valuetype (gsctx
, sig
, ainfo
, sig
->params
[i
], FALSE
, &gr
, &fr
, &stack_size
);
510 add_general (&gr
, &stack_size
, ainfo
);
513 add_float (&gr
, &fr
, &stack_size
, ainfo
, FALSE
);
516 add_float (&gr
, &fr
, &stack_size
, ainfo
, TRUE
);
519 g_assert_not_reached ();
523 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
525 fr
= FLOAT_PARAM_REGS
;
527 /* Emit the signature cookie just before the implicit arguments */
528 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
531 cinfo
->stack_usage
= stack_size
;
532 cinfo
->reg_usage
= gr
;
533 cinfo
->freg_usage
= fr
;
538 * mono_arch_get_argument_info:
539 * @csig: a method signature
540 * @param_count: the number of parameters to consider
541 * @arg_info: an array to store the result infos
543 * Gathers information on parameters such as size, alignment and
544 * padding. arg_info should be large enought to hold param_count + 1 entries.
546 * Returns the size of the argument area on the stack.
549 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
552 CallInfo
*cinfo
= get_call_info (NULL
, NULL
, csig
, FALSE
);
553 guint32 args_size
= cinfo
->stack_usage
;
555 /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
557 arg_info
[0].offset
= 0;
560 for (k
= 0; k
< param_count
; k
++) {
561 arg_info
[k
+ 1].offset
= ((k
+ csig
->hasthis
) * 8);
563 arg_info
[k
+ 1].size
= 0;
572 * Initialize the cpu to execute managed code.
575 mono_arch_cpu_init (void)
580 * Initialize architecture specific code.
583 mono_arch_init (void)
588 * Cleanup architecture specific code.
591 mono_arch_cleanup (void)
596 * This function returns the optimizations supported on this cpu.
599 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
607 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
611 MonoMethodSignature
*sig
;
612 MonoMethodHeader
*header
;
615 header
= mono_method_get_header (cfg
->method
);
617 sig
= mono_method_signature (cfg
->method
);
619 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
621 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
622 MonoInst
*ins
= cfg
->args
[i
];
624 ArgInfo
*ainfo
= &cinfo
->args
[i
];
626 if (ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
629 if (ainfo
->storage
== ArgInIReg
) {
630 /* The input registers are non-volatile */
631 ins
->opcode
= OP_REGVAR
;
632 ins
->dreg
= 32 + ainfo
->reg
;
636 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
637 MonoInst
*ins
= cfg
->varinfo
[i
];
638 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
641 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
644 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
645 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
648 if (mono_is_regsize_var (ins
->inst_vtype
)) {
649 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
650 g_assert (i
== vmv
->idx
);
651 vars
= g_list_prepend (vars
, vmv
);
655 vars
= mono_varlist_sort (cfg
, vars
, 0);
661 mono_ia64_alloc_stacked_registers (MonoCompile
*cfg
)
664 guint32 reserved_regs
;
665 MonoMethodHeader
*header
;
667 if (cfg
->arch
.reg_local0
> 0)
671 cinfo
= get_call_info (cfg
, cfg
->mempool
, mono_method_signature (cfg
->method
), FALSE
);
673 header
= mono_method_get_header (cfg
->method
);
675 /* Some registers are reserved for use by the prolog/epilog */
676 reserved_regs
= header
->num_clauses
? 4 : 3;
678 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
679 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)) {
680 /* One registers is needed by instrument_epilog to save the return value */
682 if (cinfo
->reg_usage
< 2)
683 /* Number of arguments passed to function call in instrument_prolog */
684 cinfo
->reg_usage
= 2;
687 cfg
->arch
.reg_in0
= 32;
688 cfg
->arch
.reg_local0
= cfg
->arch
.reg_in0
+ cinfo
->reg_usage
+ reserved_regs
;
689 cfg
->arch
.reg_out0
= cfg
->arch
.reg_local0
+ 16;
691 cfg
->arch
.reg_saved_ar_pfs
= cfg
->arch
.reg_local0
- 1;
692 cfg
->arch
.reg_saved_b0
= cfg
->arch
.reg_local0
- 2;
693 cfg
->arch
.reg_fp
= cfg
->arch
.reg_local0
- 3;
696 * Frames without handlers save sp to fp, frames with handlers save it into
697 * a dedicated register.
699 if (header
->num_clauses
)
700 cfg
->arch
.reg_saved_sp
= cfg
->arch
.reg_local0
- 4;
702 cfg
->arch
.reg_saved_sp
= cfg
->arch
.reg_fp
;
704 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
705 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)) {
706 cfg
->arch
.reg_saved_return_val
= cfg
->arch
.reg_local0
- reserved_regs
;
710 * Need to allocate at least 2 out register for use by OP_THROW / the system
711 * exception throwing code.
713 cfg
->arch
.n_out_regs
= MAX (cfg
->arch
.n_out_regs
, 2);
717 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
722 mono_ia64_alloc_stacked_registers (cfg
);
724 for (i
= cfg
->arch
.reg_local0
; i
< cfg
->arch
.reg_out0
; ++i
) {
727 regs
= g_list_prepend (regs
, (gpointer
)(gssize
)(i
));
734 * mono_arch_regalloc_cost:
736 * Return the cost, in number of memory references, of the action of
737 * allocating the variable VMV into a register during global register
741 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
743 /* FIXME: Increase costs linearly to avoid using all local registers */
749 mono_arch_allocate_vars (MonoCompile
*cfg
)
751 MonoMethodSignature
*sig
;
752 MonoMethodHeader
*header
;
755 guint32 locals_stack_size
, locals_stack_align
;
759 header
= mono_method_get_header (cfg
->method
);
761 sig
= mono_method_signature (cfg
->method
);
763 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
766 * Determine whenever the frame pointer can be eliminated.
767 * FIXME: Remove some of the restrictions.
769 cfg
->arch
.omit_fp
= TRUE
;
771 if (!debug_omit_fp ())
772 cfg
->arch
.omit_fp
= FALSE
;
774 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
775 cfg
->arch
.omit_fp
= FALSE
;
776 if (header
->num_clauses
)
777 cfg
->arch
.omit_fp
= FALSE
;
779 cfg
->arch
.omit_fp
= FALSE
;
780 if ((sig
->ret
->type
!= MONO_TYPE_VOID
) && (cinfo
->ret
.storage
== ArgAggregate
))
781 cfg
->arch
.omit_fp
= FALSE
;
782 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
783 cfg
->arch
.omit_fp
= FALSE
;
784 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
785 ArgInfo
*ainfo
= &cinfo
->args
[i
];
787 if (ainfo
->storage
== ArgOnStack
) {
789 * The stack offset can only be determined when the frame
792 cfg
->arch
.omit_fp
= FALSE
;
796 mono_ia64_alloc_stacked_registers (cfg
);
799 * We use the ABI calling conventions for managed code as well.
800 * Exception: valuetypes are never passed or returned in registers.
803 if (cfg
->arch
.omit_fp
) {
804 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
805 cfg
->frame_reg
= IA64_SP
;
806 offset
= ARGS_OFFSET
;
809 /* Locals are allocated backwards from %fp */
810 cfg
->frame_reg
= cfg
->arch
.reg_fp
;
814 if (cfg
->method
->save_lmf
) {
818 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
819 switch (cinfo
->ret
.storage
) {
821 cfg
->ret
->opcode
= OP_REGVAR
;
822 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
825 cfg
->ret
->opcode
= OP_REGVAR
;
826 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
828 case ArgValuetypeAddrInIReg
:
829 cfg
->vret_addr
->opcode
= OP_REGVAR
;
830 cfg
->vret_addr
->dreg
= cfg
->arch
.reg_in0
+ cinfo
->ret
.reg
;
833 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
834 if (cfg
->arch
.omit_fp
)
835 g_assert_not_reached ();
836 offset
= ALIGN_TO (offset
, 8);
837 offset
+= cinfo
->ret
.nslots
* 8;
838 cfg
->ret
->opcode
= OP_REGOFFSET
;
839 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
840 cfg
->ret
->inst_offset
= - offset
;
843 g_assert_not_reached ();
845 cfg
->ret
->dreg
= cfg
->ret
->inst_c0
;
848 /* Allocate locals */
849 offsets
= mono_allocate_stack_slots_full (cfg
, cfg
->arch
.omit_fp
? FALSE
: TRUE
, &locals_stack_size
, &locals_stack_align
);
850 if (locals_stack_align
) {
851 offset
= ALIGN_TO (offset
, locals_stack_align
);
853 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
854 if (offsets
[i
] != -1) {
855 MonoInst
*inst
= cfg
->varinfo
[i
];
856 inst
->opcode
= OP_REGOFFSET
;
857 inst
->inst_basereg
= cfg
->frame_reg
;
858 if (cfg
->arch
.omit_fp
)
859 inst
->inst_offset
= (offset
+ offsets
[i
]);
861 inst
->inst_offset
= - (offset
+ offsets
[i
]);
862 // printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
865 offset
+= locals_stack_size
;
867 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
)) {
868 if (cfg
->arch
.omit_fp
)
869 g_assert_not_reached ();
870 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
871 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
874 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
875 inst
= cfg
->args
[i
];
876 if (inst
->opcode
!= OP_REGVAR
) {
877 ArgInfo
*ainfo
= &cinfo
->args
[i
];
878 gboolean inreg
= TRUE
;
881 if (sig
->hasthis
&& (i
== 0))
882 arg_type
= &mono_defaults
.object_class
->byval_arg
;
884 arg_type
= sig
->params
[i
- sig
->hasthis
];
886 /* FIXME: VOLATILE is only set if the liveness pass runs */
887 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
890 inst
->opcode
= OP_REGOFFSET
;
892 switch (ainfo
->storage
) {
894 inst
->opcode
= OP_REGVAR
;
895 inst
->dreg
= cfg
->arch
.reg_in0
+ ainfo
->reg
;
898 case ArgInFloatRegR4
:
900 * Since float regs are volatile, we save the arguments to
901 * the stack in the prolog.
906 if (cfg
->arch
.omit_fp
)
907 g_assert_not_reached ();
908 inst
->opcode
= OP_REGOFFSET
;
909 inst
->inst_basereg
= cfg
->frame_reg
;
910 inst
->inst_offset
= ARGS_OFFSET
+ ainfo
->offset
;
919 if (!inreg
&& (ainfo
->storage
!= ArgOnStack
)) {
922 inst
->opcode
= OP_REGOFFSET
;
923 inst
->inst_basereg
= cfg
->frame_reg
;
924 /* These arguments are saved to the stack in the prolog */
925 switch (ainfo
->storage
) {
927 if (ainfo
->atype
== AggregateSingleHFA
)
928 size
= ainfo
->nslots
* 4;
930 size
= ainfo
->nslots
* 8;
933 size
= sizeof (gpointer
);
937 offset
= ALIGN_TO (offset
, sizeof (gpointer
));
939 if (cfg
->arch
.omit_fp
) {
940 inst
->inst_offset
= offset
;
944 inst
->inst_offset
= - offset
;
951 * FIXME: This doesn't work because some variables are allocated during local
955 if (cfg->arch.omit_fp && offset == 16)
959 cfg
->stack_offset
= offset
;
963 mono_arch_create_vars (MonoCompile
*cfg
)
965 MonoMethodSignature
*sig
;
968 sig
= mono_method_signature (cfg
->method
);
970 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
972 if (cinfo
->ret
.storage
== ArgAggregate
)
973 cfg
->ret_var_is_local
= TRUE
;
974 if (cinfo
->ret
.storage
== ArgValuetypeAddrInIReg
) {
975 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
976 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
977 printf ("vret_addr = ");
978 mono_print_ins (cfg
->vret_addr
);
984 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*tree
)
988 MONO_INST_NEW (cfg
, arg
, OP_NOP
);
989 arg
->sreg1
= tree
->dreg
;
993 arg
->opcode
= OP_MOVE
;
994 arg
->dreg
= mono_alloc_ireg (cfg
);
996 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, FALSE
);
999 arg
->opcode
= OP_FMOVE
;
1000 arg
->dreg
= mono_alloc_freg (cfg
);
1002 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, TRUE
);
1004 case ArgInFloatRegR4
:
1005 arg
->opcode
= OP_FCONV_TO_R4
;
1006 arg
->dreg
= mono_alloc_freg (cfg
);
1008 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, TRUE
);
1011 g_assert_not_reached ();
1014 MONO_ADD_INS (cfg
->cbb
, arg
);
1018 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1020 MonoMethodSignature
*tmp_sig
;
1022 /* Emit the signature cookie just before the implicit arguments */
1024 /* FIXME: Add support for signature tokens to AOT */
1025 cfg
->disable_aot
= TRUE
;
1027 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
1030 * mono_ArgIterator_Setup assumes the signature cookie is
1031 * passed first and all the arguments which were before it are
1032 * passed on the stack after the signature. So compensate by
1033 * passing a different signature.
1035 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1036 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1037 tmp_sig
->sentinelpos
= 0;
1038 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1040 MONO_INST_NEW (cfg
, sig_arg
, OP_ICONST
);
1041 sig_arg
->dreg
= mono_alloc_ireg (cfg
);
1042 sig_arg
->inst_p0
= tmp_sig
;
1043 MONO_ADD_INS (cfg
->cbb
, sig_arg
);
1045 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, IA64_SP
, 16 + cinfo
->sig_cookie
.offset
, sig_arg
->dreg
);
1049 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1052 MonoMethodSignature
*sig
;
1053 int i
, n
, stack_size
;
1059 mono_ia64_alloc_stacked_registers (cfg
);
1061 sig
= call
->signature
;
1062 n
= sig
->param_count
+ sig
->hasthis
;
1064 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, sig
->pinvoke
);
1066 if (cinfo
->ret
.storage
== ArgAggregate
) {
1071 * The valuetype is in registers after the call, need to be copied
1072 * to the stack. Save the address to a local here, so the call
1073 * instruction can access it.
1075 local
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1076 local
->flags
|= MONO_INST_VOLATILE
;
1077 cfg
->arch
.ret_var_addr_local
= local
;
1079 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1080 vtarg
->sreg1
= call
->vret_var
->dreg
;
1081 vtarg
->dreg
= local
->dreg
;
1082 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1085 if (cinfo
->ret
.storage
== ArgValuetypeAddrInIReg
) {
1086 add_outarg_reg (cfg
, call
, ArgInIReg
, cfg
->arch
.reg_out0
+ cinfo
->ret
.reg
, call
->vret_var
);
1089 for (i
= 0; i
< n
; ++i
) {
1092 ainfo
= cinfo
->args
+ i
;
1094 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1095 /* Emit the signature cookie just before the implicit arguments */
1096 emit_sig_cookie (cfg
, call
, cinfo
);
1099 in
= call
->args
[i
];
1101 if (sig
->hasthis
&& (i
== 0))
1102 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1104 arg_type
= sig
->params
[i
- sig
->hasthis
];
1106 if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(arg_type
))) {
1110 if (arg_type
->type
== MONO_TYPE_TYPEDBYREF
) {
1111 size
= sizeof (MonoTypedRef
);
1112 align
= sizeof (gpointer
);
1114 else if (sig
->pinvoke
)
1115 size
= mono_type_native_stack_size (&in
->klass
->byval_arg
, &align
);
1118 * Other backends use mono_type_stack_size (), but that
1119 * aligns the size to 8, which is larger than the size of
1120 * the source, leading to reads of invalid memory if the
1121 * source is at the end of address space.
1123 size
= mono_class_value_size (in
->klass
, &align
);
1129 MONO_INST_NEW (cfg
, arg
, OP_OUTARG_VT
);
1130 arg
->sreg1
= in
->dreg
;
1131 arg
->klass
= in
->klass
;
1132 arg
->backend
.size
= size
;
1133 arg
->inst_p0
= call
;
1134 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1135 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1137 MONO_ADD_INS (cfg
->cbb
, arg
);
1141 switch (ainfo
->storage
) {
1143 add_outarg_reg (cfg
, call
, ainfo
->storage
, cfg
->arch
.reg_out0
+ ainfo
->reg
, in
);
1146 case ArgInFloatRegR4
:
1147 add_outarg_reg (cfg
, call
, ainfo
->storage
, ainfo
->reg
, in
);
1150 if (arg_type
->type
== MONO_TYPE_R4
&& !arg_type
->byref
)
1151 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, IA64_SP
, 16 + ainfo
->offset
, in
->dreg
);
1152 else if (arg_type
->type
== MONO_TYPE_R8
&& !arg_type
->byref
)
1153 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, IA64_SP
, 16 + ainfo
->offset
, in
->dreg
);
1155 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, IA64_SP
, 16 + ainfo
->offset
, in
->dreg
);
1158 g_assert_not_reached ();
1163 /* Handle the case where there are no implicit arguments */
1164 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
)) {
1165 emit_sig_cookie (cfg
, call
, cinfo
);
1168 call
->stack_usage
= cinfo
->stack_usage
;
1169 cfg
->arch
.n_out_regs
= MAX (cfg
->arch
.n_out_regs
, cinfo
->reg_usage
);
1173 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1175 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1176 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
1177 int size
= ins
->backend
.size
;
1179 if (ainfo
->storage
== ArgAggregate
) {
1180 MonoInst
*load
, *store
;
1184 * Part of the structure is passed in registers.
1186 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
1187 slot
= ainfo
->reg
+ i
;
1189 if (ainfo
->atype
== AggregateSingleHFA
) {
1190 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
1191 load
->inst_basereg
= src
->dreg
;
1192 load
->inst_offset
= i
* 4;
1193 load
->dreg
= mono_alloc_freg (cfg
);
1195 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, ainfo
->reg
+ i
, TRUE
);
1196 } else if (ainfo
->atype
== AggregateDoubleHFA
) {
1197 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
1198 load
->inst_basereg
= src
->dreg
;
1199 load
->inst_offset
= i
* 8;
1200 load
->dreg
= mono_alloc_freg (cfg
);
1202 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, ainfo
->reg
+ i
, TRUE
);
1204 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
1205 load
->inst_basereg
= src
->dreg
;
1206 load
->inst_offset
= i
* 8;
1207 load
->dreg
= mono_alloc_ireg (cfg
);
1209 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, cfg
->arch
.reg_out0
+ ainfo
->reg
+ i
, FALSE
);
1211 MONO_ADD_INS (cfg
->cbb
, load
);
1215 * Part of the structure is passed on the stack.
1217 for (i
= ainfo
->nregs
; i
< ainfo
->nslots
; ++i
) {
1218 slot
= ainfo
->reg
+ i
;
1220 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
1221 load
->inst_basereg
= src
->dreg
;
1222 load
->inst_offset
= i
* sizeof (gpointer
);
1223 load
->dreg
= mono_alloc_preg (cfg
);
1224 MONO_ADD_INS (cfg
->cbb
, load
);
1226 MONO_INST_NEW (cfg
, store
, OP_STOREI8_MEMBASE_REG
);
1227 store
->sreg1
= load
->dreg
;
1228 store
->inst_destbasereg
= IA64_SP
;
1229 store
->inst_offset
= 16 + ainfo
->offset
+ (slot
- 8) * 8;
1230 MONO_ADD_INS (cfg
->cbb
, store
);
1233 mini_emit_memcpy (cfg
, IA64_SP
, 16 + ainfo
->offset
, src
->dreg
, 0, size
, 4);
1238 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1240 CallInfo
*cinfo
= get_call_info (cfg
, cfg
->mempool
, mono_method_signature (method
), FALSE
);
1242 switch (cinfo
->ret
.storage
) {
1244 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1247 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1250 g_assert_not_reached ();
1255 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1260 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1262 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1265 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1266 switch (ins
->opcode
) {
1274 if (ins
->dreg
== ins
->sreg1
) {
1275 MONO_DELETE_INS (bb
, ins
);
1281 * OP_MOVE sreg, dreg
1282 * OP_MOVE dreg, sreg
1284 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
1285 ins
->sreg1
== last_ins
->dreg
&&
1286 ins
->dreg
== last_ins
->sreg1
) {
1287 MONO_DELETE_INS (bb
, ins
);
1293 /* remove unnecessary multiplication with 1 */
1294 if (ins
->inst_imm
== 1) {
1295 if (ins
->dreg
!= ins
->sreg1
) {
1296 ins
->opcode
= OP_MOVE
;
1298 MONO_DELETE_INS (bb
, ins
);
1308 bb
->last_ins
= last_ins
;
1311 int cond_to_ia64_cmp
[][3] = {
1312 {OP_IA64_CMP_EQ
, OP_IA64_CMP4_EQ
, OP_IA64_FCMP_EQ
},
1313 {OP_IA64_CMP_NE
, OP_IA64_CMP4_NE
, OP_IA64_FCMP_NE
},
1314 {OP_IA64_CMP_LE
, OP_IA64_CMP4_LE
, OP_IA64_FCMP_LE
},
1315 {OP_IA64_CMP_GE
, OP_IA64_CMP4_GE
, OP_IA64_FCMP_GE
},
1316 {OP_IA64_CMP_LT
, OP_IA64_CMP4_LT
, OP_IA64_FCMP_LT
},
1317 {OP_IA64_CMP_GT
, OP_IA64_CMP4_GT
, OP_IA64_FCMP_GT
},
1318 {OP_IA64_CMP_LE_UN
, OP_IA64_CMP4_LE_UN
, OP_IA64_FCMP_LE_UN
},
1319 {OP_IA64_CMP_GE_UN
, OP_IA64_CMP4_GE_UN
, OP_IA64_FCMP_GE_UN
},
1320 {OP_IA64_CMP_LT_UN
, OP_IA64_CMP4_LT_UN
, OP_IA64_FCMP_LT_UN
},
1321 {OP_IA64_CMP_GT_UN
, OP_IA64_CMP4_GT_UN
, OP_IA64_FCMP_GT_UN
}
1325 opcode_to_ia64_cmp (int opcode
, int cmp_opcode
)
1327 return cond_to_ia64_cmp
[mono_opcode_to_cond (opcode
)][mono_opcode_to_type (opcode
, cmp_opcode
)];
1330 int cond_to_ia64_cmp_imm
[][3] = {
1331 {OP_IA64_CMP_EQ_IMM
, OP_IA64_CMP4_EQ_IMM
, 0},
1332 {OP_IA64_CMP_NE_IMM
, OP_IA64_CMP4_NE_IMM
, 0},
1333 {OP_IA64_CMP_GE_IMM
, OP_IA64_CMP4_GE_IMM
, 0},
1334 {OP_IA64_CMP_LE_IMM
, OP_IA64_CMP4_LE_IMM
, 0},
1335 {OP_IA64_CMP_GT_IMM
, OP_IA64_CMP4_GT_IMM
, 0},
1336 {OP_IA64_CMP_LT_IMM
, OP_IA64_CMP4_LT_IMM
, 0},
1337 {OP_IA64_CMP_GE_UN_IMM
, OP_IA64_CMP4_GE_UN_IMM
, 0},
1338 {OP_IA64_CMP_LE_UN_IMM
, OP_IA64_CMP4_LE_UN_IMM
, 0},
1339 {OP_IA64_CMP_GT_UN_IMM
, OP_IA64_CMP4_GT_UN_IMM
, 0},
1340 {OP_IA64_CMP_LT_UN_IMM
, OP_IA64_CMP4_LT_UN_IMM
, 0},
1344 opcode_to_ia64_cmp_imm (int opcode
, int cmp_opcode
)
1346 /* The condition needs to be reversed */
1347 return cond_to_ia64_cmp_imm
[mono_opcode_to_cond (opcode
)][mono_opcode_to_type (opcode
, cmp_opcode
)];
1350 #define NEW_INS(cfg,dest,op) do { \
1351 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1352 (dest)->opcode = (op); \
1353 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
1354 last_ins = (dest); \
1358 * mono_arch_lowering_pass:
1360 * Converts complex opcodes into simpler ones so that each IR instruction
1361 * corresponds to one machine instruction.
1364 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1366 MonoInst
*ins
, *n
, *next
, *temp
, *temp2
, *temp3
, *last_ins
= NULL
;
1369 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1370 switch (ins
->opcode
) {
1371 case OP_STOREI1_MEMBASE_IMM
:
1372 case OP_STOREI2_MEMBASE_IMM
:
1373 case OP_STOREI4_MEMBASE_IMM
:
1374 case OP_STOREI8_MEMBASE_IMM
:
1375 case OP_STORE_MEMBASE_IMM
:
1376 /* There are no store_membase instructions on ia64 */
1377 if (ins
->inst_offset
== 0) {
1379 } else if (ia64_is_imm14 (ins
->inst_offset
)) {
1380 NEW_INS (cfg
, temp2
, OP_ADD_IMM
);
1381 temp2
->sreg1
= ins
->inst_destbasereg
;
1382 temp2
->inst_imm
= ins
->inst_offset
;
1383 temp2
->dreg
= mono_alloc_ireg (cfg
);
1386 NEW_INS (cfg
, temp
, OP_I8CONST
);
1387 temp
->inst_c0
= ins
->inst_offset
;
1388 temp
->dreg
= mono_alloc_ireg (cfg
);
1390 NEW_INS (cfg
, temp2
, OP_LADD
);
1391 temp2
->sreg1
= ins
->inst_destbasereg
;
1392 temp2
->sreg2
= temp
->dreg
;
1393 temp2
->dreg
= mono_alloc_ireg (cfg
);
1396 switch (ins
->opcode
) {
1397 case OP_STOREI1_MEMBASE_IMM
:
1398 ins
->opcode
= OP_STOREI1_MEMBASE_REG
;
1400 case OP_STOREI2_MEMBASE_IMM
:
1401 ins
->opcode
= OP_STOREI2_MEMBASE_REG
;
1403 case OP_STOREI4_MEMBASE_IMM
:
1404 ins
->opcode
= OP_STOREI4_MEMBASE_REG
;
1406 case OP_STOREI8_MEMBASE_IMM
:
1407 case OP_STORE_MEMBASE_IMM
:
1408 ins
->opcode
= OP_STOREI8_MEMBASE_REG
;
1411 g_assert_not_reached ();
1414 if (ins
->inst_imm
== 0)
1415 ins
->sreg1
= IA64_R0
;
1417 NEW_INS (cfg
, temp3
, OP_I8CONST
);
1418 temp3
->inst_c0
= ins
->inst_imm
;
1419 temp3
->dreg
= mono_alloc_ireg (cfg
);
1420 ins
->sreg1
= temp3
->dreg
;
1423 ins
->inst_offset
= 0;
1425 ins
->inst_destbasereg
= temp2
->dreg
;
1427 case OP_STOREI1_MEMBASE_REG
:
1428 case OP_STOREI2_MEMBASE_REG
:
1429 case OP_STOREI4_MEMBASE_REG
:
1430 case OP_STOREI8_MEMBASE_REG
:
1431 case OP_STORER4_MEMBASE_REG
:
1432 case OP_STORER8_MEMBASE_REG
:
1433 case OP_STORE_MEMBASE_REG
:
1434 /* There are no store_membase instructions on ia64 */
1435 if (ins
->inst_offset
== 0) {
1438 else if (ia64_is_imm14 (ins
->inst_offset
)) {
1439 NEW_INS (cfg
, temp2
, OP_ADD_IMM
);
1440 temp2
->sreg1
= ins
->inst_destbasereg
;
1441 temp2
->inst_imm
= ins
->inst_offset
;
1442 temp2
->dreg
= mono_alloc_ireg (cfg
);
1445 NEW_INS (cfg
, temp
, OP_I8CONST
);
1446 temp
->inst_c0
= ins
->inst_offset
;
1447 temp
->dreg
= mono_alloc_ireg (cfg
);
1448 NEW_INS (cfg
, temp2
, OP_LADD
);
1449 temp2
->sreg1
= ins
->inst_destbasereg
;
1450 temp2
->sreg2
= temp
->dreg
;
1451 temp2
->dreg
= mono_alloc_ireg (cfg
);
1454 ins
->inst_offset
= 0;
1455 ins
->inst_destbasereg
= temp2
->dreg
;
1457 case OP_LOADI1_MEMBASE
:
1458 case OP_LOADU1_MEMBASE
:
1459 case OP_LOADI2_MEMBASE
:
1460 case OP_LOADU2_MEMBASE
:
1461 case OP_LOADI4_MEMBASE
:
1462 case OP_LOADU4_MEMBASE
:
1463 case OP_LOADI8_MEMBASE
:
1464 case OP_LOAD_MEMBASE
:
1465 case OP_LOADR4_MEMBASE
:
1466 case OP_LOADR8_MEMBASE
:
1467 case OP_ATOMIC_EXCHANGE_I4
:
1468 case OP_ATOMIC_EXCHANGE_I8
:
1469 case OP_ATOMIC_ADD_NEW_I4
:
1470 case OP_ATOMIC_ADD_NEW_I8
:
1471 case OP_ATOMIC_ADD_IMM_NEW_I4
:
1472 case OP_ATOMIC_ADD_IMM_NEW_I8
:
1473 /* There are no membase instructions on ia64 */
1474 if (ins
->inst_offset
== 0) {
1477 else if (ia64_is_imm14 (ins
->inst_offset
)) {
1478 NEW_INS (cfg
, temp2
, OP_ADD_IMM
);
1479 temp2
->sreg1
= ins
->inst_basereg
;
1480 temp2
->inst_imm
= ins
->inst_offset
;
1481 temp2
->dreg
= mono_alloc_ireg (cfg
);
1484 NEW_INS (cfg
, temp
, OP_I8CONST
);
1485 temp
->inst_c0
= ins
->inst_offset
;
1486 temp
->dreg
= mono_alloc_ireg (cfg
);
1487 NEW_INS (cfg
, temp2
, OP_LADD
);
1488 temp2
->sreg1
= ins
->inst_basereg
;
1489 temp2
->sreg2
= temp
->dreg
;
1490 temp2
->dreg
= mono_alloc_ireg (cfg
);
1493 ins
->inst_offset
= 0;
1494 ins
->inst_basereg
= temp2
->dreg
;
1514 case OP_ISHR_UN_IMM
:
1515 case OP_LSHR_UN_IMM
: {
1516 gboolean is_imm
= FALSE
;
1517 gboolean switched
= FALSE
;
1519 if (ins
->opcode
== OP_AND_IMM
&& ins
->inst_imm
== 255) {
1520 ins
->opcode
= OP_ZEXT_I1
;
1524 switch (ins
->opcode
) {
1528 is_imm
= ia64_is_imm14 (ins
->inst_imm
);
1533 is_imm
= ia64_is_imm14 (- (ins
->inst_imm
));
1535 /* A = B - IMM -> A = B + (-IMM) */
1536 ins
->inst_imm
= - ins
->inst_imm
;
1537 ins
->opcode
= OP_IADD_IMM
;
1548 is_imm
= ia64_is_imm8 (ins
->inst_imm
);
1557 case OP_ISHR_UN_IMM
:
1558 case OP_LSHR_UN_IMM
:
1559 is_imm
= (ins
->inst_imm
>= 0) && (ins
->inst_imm
< 64);
1567 ins
->sreg2
= ins
->sreg1
;
1571 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
1573 if (ins
->inst_imm
== 0)
1574 ins
->sreg2
= IA64_R0
;
1576 NEW_INS (cfg
, temp
, OP_I8CONST
);
1577 temp
->inst_c0
= ins
->inst_imm
;
1578 temp
->dreg
= mono_alloc_ireg (cfg
);
1579 ins
->sreg2
= temp
->dreg
;
1583 case OP_COMPARE_IMM
:
1584 case OP_ICOMPARE_IMM
:
1585 case OP_LCOMPARE_IMM
: {
1586 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1592 /* Branch opts can eliminate the branch */
1593 if (!next
|| (!(MONO_IS_COND_BRANCH_OP (next
) || MONO_IS_COND_EXC (next
) || MONO_IS_SETCC (next
)))) {
1594 ins
->opcode
= OP_NOP
;
1599 * The compare_imm instructions have switched up arguments, and
1600 * some of them take an imm between -127 and 128.
1603 cond
= mono_opcode_to_cond (next
->opcode
);
1604 if ((cond
== CMP_LT
) || (cond
== CMP_GE
))
1605 imm
= ia64_is_imm8 (ins
->inst_imm
- 1);
1606 else if ((cond
== CMP_LT_UN
) || (cond
== CMP_GE_UN
))
1607 imm
= ia64_is_imm8 (ins
->inst_imm
- 1) && (ins
->inst_imm
> 0);
1609 imm
= ia64_is_imm8 (ins
->inst_imm
);
1612 ins
->opcode
= opcode_to_ia64_cmp_imm (next
->opcode
, ins
->opcode
);
1613 ins
->sreg2
= ins
->sreg1
;
1616 ins
->opcode
= opcode_to_ia64_cmp (next
->opcode
, ins
->opcode
);
1618 if (ins
->inst_imm
== 0)
1619 ins
->sreg2
= IA64_R0
;
1621 NEW_INS (cfg
, temp
, OP_I8CONST
);
1622 temp
->inst_c0
= ins
->inst_imm
;
1623 temp
->dreg
= mono_alloc_ireg (cfg
);
1624 ins
->sreg2
= temp
->dreg
;
1628 if (MONO_IS_COND_BRANCH_OP (next
)) {
1629 next
->opcode
= OP_IA64_BR_COND
;
1630 if (! (next
->flags
& MONO_INST_BRLABEL
))
1631 next
->inst_target_bb
= next
->inst_true_bb
;
1632 } else if (MONO_IS_COND_EXC (next
)) {
1633 next
->opcode
= OP_IA64_COND_EXC
;
1634 } else if (MONO_IS_SETCC (next
)) {
1635 next
->opcode
= OP_IA64_CSET
;
1637 printf ("%s\n", mono_inst_name (next
->opcode
));
1647 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1651 /* Branch opts can eliminate the branch */
1652 if (!next
|| (!(MONO_IS_COND_BRANCH_OP (next
) || MONO_IS_COND_EXC (next
) || MONO_IS_SETCC (next
)))) {
1653 ins
->opcode
= OP_NOP
;
1657 ins
->opcode
= opcode_to_ia64_cmp (next
->opcode
, ins
->opcode
);
1659 if (MONO_IS_COND_BRANCH_OP (next
)) {
1660 next
->opcode
= OP_IA64_BR_COND
;
1661 if (! (next
->flags
& MONO_INST_BRLABEL
))
1662 next
->inst_target_bb
= next
->inst_true_bb
;
1663 } else if (MONO_IS_COND_EXC (next
)) {
1664 next
->opcode
= OP_IA64_COND_EXC
;
1665 } else if (MONO_IS_SETCC (next
)) {
1666 next
->opcode
= OP_IA64_CSET
;
1668 printf ("%s\n", mono_inst_name (next
->opcode
));
1679 /* The front end removes the fcompare, so introduce it again */
1680 NEW_INS (cfg
, temp
, opcode_to_ia64_cmp (ins
->opcode
, OP_FCOMPARE
));
1681 temp
->sreg1
= ins
->sreg1
;
1682 temp
->sreg2
= ins
->sreg2
;
1684 ins
->opcode
= OP_IA64_CSET
;
1690 gboolean found
= FALSE
;
1691 int shl_op
= ins
->opcode
== OP_IMUL_IMM
? OP_ISHL_IMM
: OP_SHL_IMM
;
1693 /* First the easy cases */
1694 if (ins
->inst_imm
== 1) {
1695 ins
->opcode
= OP_MOVE
;
1698 for (i
= 1; i
< 64; ++i
)
1699 if (ins
->inst_imm
== (((gint64
)1) << i
)) {
1700 ins
->opcode
= shl_op
;
1706 /* This could be optimized */
1709 for (i
= 0; i
< 64; ++i
) {
1710 if (ins
->inst_imm
& (((gint64
)1) << i
)) {
1711 NEW_INS (cfg
, temp
, shl_op
);
1712 temp
->dreg
= mono_alloc_ireg (cfg
);
1713 temp
->sreg1
= ins
->sreg1
;
1717 sum_reg
= temp
->dreg
;
1719 NEW_INS (cfg
, temp2
, OP_LADD
);
1720 temp2
->dreg
= mono_alloc_ireg (cfg
);
1721 temp2
->sreg1
= sum_reg
;
1722 temp2
->sreg2
= temp
->dreg
;
1723 sum_reg
= temp2
->dreg
;
1727 ins
->opcode
= OP_MOVE
;
1728 ins
->sreg1
= sum_reg
;
1732 case OP_LCONV_TO_OVF_U4
:
1733 NEW_INS (cfg
, temp
, OP_IA64_CMP4_LT
);
1734 temp
->sreg1
= ins
->sreg1
;
1735 temp
->sreg2
= IA64_R0
;
1737 NEW_INS (cfg
, temp
, OP_IA64_COND_EXC
);
1738 temp
->inst_p1
= (char*)"OverflowException";
1740 ins
->opcode
= OP_MOVE
;
1742 case OP_LCONV_TO_OVF_I4_UN
:
1743 NEW_INS (cfg
, temp
, OP_ICONST
);
1744 temp
->inst_c0
= 0x7fffffff;
1745 temp
->dreg
= mono_alloc_ireg (cfg
);
1747 NEW_INS (cfg
, temp2
, OP_IA64_CMP4_GT_UN
);
1748 temp2
->sreg1
= ins
->sreg1
;
1749 temp2
->sreg2
= temp
->dreg
;
1751 NEW_INS (cfg
, temp
, OP_IA64_COND_EXC
);
1752 temp
->inst_p1
= (char*)"OverflowException";
1754 ins
->opcode
= OP_MOVE
;
1756 case OP_FCONV_TO_I4
:
1757 case OP_FCONV_TO_I2
:
1758 case OP_FCONV_TO_U2
:
1759 case OP_FCONV_TO_I1
:
1760 case OP_FCONV_TO_U1
:
1761 NEW_INS (cfg
, temp
, OP_FCONV_TO_I8
);
1762 temp
->sreg1
= ins
->sreg1
;
1763 temp
->dreg
= ins
->dreg
;
1765 switch (ins
->opcode
) {
1766 case OP_FCONV_TO_I4
:
1767 ins
->opcode
= OP_SEXT_I4
;
1769 case OP_FCONV_TO_I2
:
1770 ins
->opcode
= OP_SEXT_I2
;
1772 case OP_FCONV_TO_U2
:
1773 ins
->opcode
= OP_ZEXT_I4
;
1775 case OP_FCONV_TO_I1
:
1776 ins
->opcode
= OP_SEXT_I1
;
1778 case OP_FCONV_TO_U1
:
1779 ins
->opcode
= OP_ZEXT_I1
;
1782 g_assert_not_reached ();
1784 ins
->sreg1
= ins
->dreg
;
1792 bb
->last_ins
= last_ins
;
1794 bb
->max_vreg
= cfg
->next_vreg
;
1798 * emit_load_volatile_arguments:
1800 * Load volatile arguments from the stack to the original input registers.
1801 * Required before a tail call.
1803 static Ia64CodegenState
1804 emit_load_volatile_arguments (MonoCompile
*cfg
, Ia64CodegenState code
)
1806 MonoMethod
*method
= cfg
->method
;
1807 MonoMethodSignature
*sig
;
1812 /* FIXME: Generate intermediate code instead */
1814 sig
= mono_method_signature (method
);
1816 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
1818 /* This is the opposite of the code in emit_prolog */
1819 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1820 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1821 gint32 stack_offset
;
1824 ins
= cfg
->args
[i
];
1826 if (sig
->hasthis
&& (i
== 0))
1827 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1829 arg_type
= sig
->params
[i
- sig
->hasthis
];
1831 arg_type
= mono_type_get_underlying_type (arg_type
);
1833 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
1835 /* Save volatile arguments to the stack */
1836 if (ins
->opcode
!= OP_REGVAR
) {
1837 switch (ainfo
->storage
) {
1840 /* FIXME: big offsets */
1841 g_assert (ins
->opcode
== OP_REGOFFSET
);
1842 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_basereg
);
1843 if (arg_type
->byref
)
1844 ia64_ld8 (code
, cfg
->arch
.reg_in0
+ ainfo
->reg
, GP_SCRATCH_REG
);
1846 switch (arg_type
->type
) {
1848 ia64_ldfs (code
, ainfo
->reg
, GP_SCRATCH_REG
);
1851 ia64_ldfd (code
, ainfo
->reg
, GP_SCRATCH_REG
);
1854 ia64_ld8 (code
, cfg
->arch
.reg_in0
+ ainfo
->reg
, GP_SCRATCH_REG
);
1866 if (ins
->opcode
== OP_REGVAR
) {
1867 /* Argument allocated to (non-volatile) register */
1868 switch (ainfo
->storage
) {
1870 if (ins
->dreg
!= cfg
->arch
.reg_in0
+ ainfo
->reg
)
1871 ia64_mov (code
, cfg
->arch
.reg_in0
+ ainfo
->reg
, ins
->dreg
);
1874 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16 + ainfo
->offset
, cfg
->frame_reg
);
1875 ia64_st8 (code
, GP_SCRATCH_REG
, ins
->dreg
);
1886 static Ia64CodegenState
1887 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, Ia64CodegenState code
)
1892 /* Move return value to the target register */
1893 switch (ins
->opcode
) {
1895 case OP_VOIDCALL_REG
:
1896 case OP_VOIDCALL_MEMBASE
:
1900 case OP_CALL_MEMBASE
:
1903 case OP_LCALL_MEMBASE
:
1904 g_assert (ins
->dreg
== IA64_R8
);
1908 case OP_FCALL_MEMBASE
:
1909 g_assert (ins
->dreg
== 8);
1910 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
)
1911 ia64_fnorm_d_sf (code
, ins
->dreg
, ins
->dreg
, 0);
1915 case OP_VCALL_MEMBASE
:
1918 case OP_VCALL2_MEMBASE
: {
1921 cinfo
= get_call_info (cfg
, cfg
->mempool
, ((MonoCallInst
*)ins
)->signature
, FALSE
);
1922 storage
= cinfo
->ret
.storage
;
1924 if (storage
== ArgAggregate
) {
1925 MonoInst
*local
= (MonoInst
*)cfg
->arch
.ret_var_addr_local
;
1927 /* Load address of stack space allocated for the return value */
1928 ia64_movl (code
, GP_SCRATCH_REG
, local
->inst_offset
);
1929 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, local
->inst_basereg
);
1930 ia64_ld8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
);
1932 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
1933 switch (cinfo
->ret
.atype
) {
1934 case AggregateNormal
:
1935 ia64_st8_inc_imm_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
+ i
, 8, 0);
1937 case AggregateSingleHFA
:
1938 ia64_stfs_inc_imm_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
+ i
, 4, 0);
1940 case AggregateDoubleHFA
:
1941 ia64_stfd_inc_imm_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
+ i
, 8, 0);
1944 g_assert_not_reached ();
1951 g_assert_not_reached ();
1957 #define add_patch_info(cfg,code,patch_type,data) do { \
1958 mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
1961 #define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
1962 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1964 add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
1966 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
1967 ia64_br_cond_pred (code, (predicate), 0); \
1970 static Ia64CodegenState
1971 emit_call (MonoCompile
*cfg
, Ia64CodegenState code
, guint32 patch_type
, gconstpointer data
)
1973 add_patch_info (cfg
, code
, patch_type
, data
);
1975 if ((patch_type
== MONO_PATCH_INFO_ABS
) || (patch_type
== MONO_PATCH_INFO_INTERNAL_METHOD
)) {
1977 /* mono_arch_patch_callsite will patch this */
1978 /* mono_arch_nullify_class_init_trampoline will patch this */
1979 ia64_movl (code
, GP_SCRATCH_REG
, 0);
1980 ia64_ld8_inc_imm (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, 8);
1981 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
1982 ia64_ld8 (code
, IA64_GP
, GP_SCRATCH_REG
);
1983 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
1986 /* Can't use a direct call since the displacement might be too small */
1987 /* mono_arch_patch_callsite will patch this */
1988 ia64_movl (code
, GP_SCRATCH_REG
, 0);
1989 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
1990 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
1996 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
1999 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2004 Ia64CodegenState code
;
2005 guint8
*code_start
= cfg
->native_code
+ cfg
->code_len
;
2006 MonoInst
*last_ins
= NULL
;
2007 guint last_offset
= 0;
2010 if (cfg
->opt
& MONO_OPT_LOOP
) {
2014 if (cfg
->verbose_level
> 2)
2015 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2017 cpos
= bb
->max_offset
;
2019 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
2023 offset
= code_start
- cfg
->native_code
;
2025 ia64_codegen_init (code
, code_start
);
2028 if (strstr (cfg
->method
->name
, "conv_ovf_i1") && (bb
->block_num
== 2))
2032 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2033 offset
= code
.buf
- cfg
->native_code
;
2035 max_len
= ((int)(((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
])) + 128;
2037 while (offset
+ max_len
+ 16 > cfg
->code_size
) {
2038 ia64_codegen_close (code
);
2040 offset
= code
.buf
- cfg
->native_code
;
2042 cfg
->code_size
*= 2;
2043 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2044 code_start
= cfg
->native_code
+ offset
;
2045 mono_jit_stats
.code_reallocs
++;
2047 ia64_codegen_init (code
, code_start
);
2050 mono_debug_record_line_number (cfg
, ins
, offset
);
2052 switch (ins
->opcode
) {
2055 if (ia64_is_imm14 (ins
->inst_c0
))
2056 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_c0
, IA64_R0
);
2058 ia64_movl (code
, ins
->dreg
, ins
->inst_c0
);
2061 add_patch_info (cfg
, code
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2062 ia64_movl (code
, ins
->dreg
, 0);
2065 ia64_mov (code
, ins
->dreg
, ins
->sreg1
);
2068 case OP_IA64_BR_COND
: {
2070 if (ins
->opcode
== OP_IA64_BR_COND
)
2072 if (ins
->flags
& MONO_INST_BRLABEL
) {
2073 if (ins
->inst_i0
->inst_c0
) {
2076 add_patch_info (cfg
, code
, MONO_PATCH_INFO_LABEL
, ins
->inst_i0
);
2077 ia64_br_cond_pred (code
, pred
, 0);
2080 if (ins
->inst_target_bb
->native_offset
) {
2081 guint8
*pos
= code
.buf
+ code
.nins
;
2083 ia64_br_cond_pred (code
, pred
, 0);
2084 ia64_begin_bundle (code
);
2085 ia64_patch (pos
, cfg
->native_code
+ ins
->inst_target_bb
->native_offset
);
2087 add_patch_info (cfg
, code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
2088 ia64_br_cond_pred (code
, pred
, 0);
2094 ia64_begin_bundle (code
);
2095 ins
->inst_c0
= code
.buf
- cfg
->native_code
;
2098 case OP_RELAXED_NOP
:
2100 case OP_DUMMY_STORE
:
2101 case OP_NOT_REACHED
:
2105 ia64_mov_to_br (code
, IA64_B6
, ins
->sreg1
);
2106 ia64_br_cond_reg (code
, IA64_B6
);
2110 ia64_add (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2114 ia64_sub (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2118 ia64_and (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2122 ia64_or (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2126 ia64_xor (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2130 ia64_sub (code
, ins
->dreg
, IA64_R0
, ins
->sreg1
);
2134 ia64_andcm_imm (code
, ins
->dreg
, -1, ins
->sreg1
);
2138 ia64_shl (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2142 ia64_shr (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2145 ia64_zxt4 (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2146 ia64_shr_u (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->sreg2
);
2149 ia64_shr_u (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2152 /* p6 and p7 is set if there is signed/unsigned overflow */
2154 /* Set p8-p9 == (sreg2 > 0) */
2155 ia64_cmp4_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2157 ia64_add (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2159 /* (sreg2 > 0) && (res < ins->sreg1) => signed overflow */
2160 ia64_cmp4_lt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2161 /* (sreg2 <= 0) && (res > ins->sreg1) => signed overflow */
2162 ia64_cmp4_lt_pred (code
, 9, 6, 10, ins
->sreg1
, GP_SCRATCH_REG
);
2164 /* res <u sreg1 => unsigned overflow */
2165 ia64_cmp4_ltu (code
, 7, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2167 /* FIXME: Predicate this since this is a side effect */
2168 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2171 /* p6 and p7 is set if there is signed/unsigned overflow */
2173 /* Set p8-p9 == (sreg2 > 0) */
2174 ia64_cmp4_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2176 ia64_sub (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2178 /* (sreg2 > 0) && (res > ins->sreg1) => signed overflow */
2179 ia64_cmp4_gt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2180 /* (sreg2 <= 0) && (res < ins->sreg1) => signed overflow */
2181 ia64_cmp4_lt_pred (code
, 9, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2183 /* sreg1 <u sreg2 => unsigned overflow */
2184 ia64_cmp4_ltu (code
, 7, 10, ins
->sreg1
, ins
->sreg2
);
2186 /* FIXME: Predicate this since this is a side effect */
2187 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2190 /* Same as OP_IADDCC */
2191 ia64_cmp_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2193 ia64_add (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2195 ia64_cmp_lt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2196 ia64_cmp_lt_pred (code
, 9, 6, 10, ins
->sreg1
, GP_SCRATCH_REG
);
2198 ia64_cmp_ltu (code
, 7, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2200 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2203 /* Same as OP_ISUBCC */
2205 ia64_cmp_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2207 ia64_sub (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2209 ia64_cmp_gt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2210 ia64_cmp_lt_pred (code
, 9, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2212 ia64_cmp_ltu (code
, 7, 10, ins
->sreg1
, ins
->sreg2
);
2214 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2219 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2224 ia64_and_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2228 ia64_or_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2232 ia64_xor_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2237 ia64_shl_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2242 ia64_shr_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2244 case OP_ISHR_UN_IMM
:
2245 ia64_zxt4 (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2246 ia64_shr_u_imm (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->inst_imm
);
2248 case OP_LSHR_UN_IMM
:
2249 ia64_shr_u_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2252 /* Based on gcc code */
2253 ia64_setf_sig (code
, FP_SCRATCH_REG
, ins
->sreg1
);
2254 ia64_setf_sig (code
, FP_SCRATCH_REG2
, ins
->sreg2
);
2255 ia64_xmpy_l (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
, FP_SCRATCH_REG2
);
2256 ia64_getf_sig (code
, ins
->dreg
, FP_SCRATCH_REG
);
2259 case OP_STOREI1_MEMBASE_REG
:
2260 ia64_st1_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2262 case OP_STOREI2_MEMBASE_REG
:
2263 ia64_st2_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2265 case OP_STOREI4_MEMBASE_REG
:
2266 ia64_st4_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2268 case OP_STOREI8_MEMBASE_REG
:
2269 case OP_STORE_MEMBASE_REG
:
2270 if (ins
->inst_offset
!= 0) {
2271 /* This is generated by local regalloc */
2272 if (ia64_is_imm14 (ins
->inst_offset
)) {
2273 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_destbasereg
);
2275 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2276 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_destbasereg
);
2278 ins
->inst_destbasereg
= GP_SCRATCH_REG
;
2280 ia64_st8_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2283 case OP_IA64_STOREI1_MEMBASE_INC_REG
:
2284 ia64_st1_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 1, 0);
2286 case OP_IA64_STOREI2_MEMBASE_INC_REG
:
2287 ia64_st2_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 2, 0);
2289 case OP_IA64_STOREI4_MEMBASE_INC_REG
:
2290 ia64_st4_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 4, 0);
2292 case OP_IA64_STOREI8_MEMBASE_INC_REG
:
2293 ia64_st8_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 8, 0);
2296 case OP_LOADU1_MEMBASE
:
2297 ia64_ld1 (code
, ins
->dreg
, ins
->inst_basereg
);
2299 case OP_LOADU2_MEMBASE
:
2300 ia64_ld2 (code
, ins
->dreg
, ins
->inst_basereg
);
2302 case OP_LOADU4_MEMBASE
:
2303 ia64_ld4 (code
, ins
->dreg
, ins
->inst_basereg
);
2305 case OP_LOADI1_MEMBASE
:
2306 ia64_ld1 (code
, ins
->dreg
, ins
->inst_basereg
);
2307 ia64_sxt1 (code
, ins
->dreg
, ins
->dreg
);
2309 case OP_LOADI2_MEMBASE
:
2310 ia64_ld2 (code
, ins
->dreg
, ins
->inst_basereg
);
2311 ia64_sxt2 (code
, ins
->dreg
, ins
->dreg
);
2313 case OP_LOADI4_MEMBASE
:
2314 ia64_ld4 (code
, ins
->dreg
, ins
->inst_basereg
);
2315 ia64_sxt4 (code
, ins
->dreg
, ins
->dreg
);
2317 case OP_LOAD_MEMBASE
:
2318 case OP_LOADI8_MEMBASE
:
2319 if (ins
->inst_offset
!= 0) {
2320 /* This is generated by local regalloc */
2321 if (ia64_is_imm14 (ins
->inst_offset
)) {
2322 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_basereg
);
2324 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2325 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_basereg
);
2327 ins
->inst_basereg
= GP_SCRATCH_REG
;
2329 ia64_ld8 (code
, ins
->dreg
, ins
->inst_basereg
);
2332 case OP_IA64_LOADU1_MEMBASE_INC
:
2333 ia64_ld1_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 1, 0);
2335 case OP_IA64_LOADU2_MEMBASE_INC
:
2336 ia64_ld2_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 2, 0);
2338 case OP_IA64_LOADU4_MEMBASE_INC
:
2339 ia64_ld4_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 4, 0);
2341 case OP_IA64_LOADI8_MEMBASE_INC
:
2342 ia64_ld8_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 8, 0);
2346 ia64_sxt1 (code
, ins
->dreg
, ins
->sreg1
);
2349 ia64_sxt2 (code
, ins
->dreg
, ins
->sreg1
);
2352 ia64_sxt4 (code
, ins
->dreg
, ins
->sreg1
);
2355 ia64_zxt1 (code
, ins
->dreg
, ins
->sreg1
);
2358 ia64_zxt2 (code
, ins
->dreg
, ins
->sreg1
);
2361 ia64_zxt4 (code
, ins
->dreg
, ins
->sreg1
);
2364 /* Compare opcodes */
2365 case OP_IA64_CMP4_EQ
:
2366 ia64_cmp4_eq (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2368 case OP_IA64_CMP4_NE
:
2369 ia64_cmp4_ne (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2371 case OP_IA64_CMP4_LE
:
2372 ia64_cmp4_le (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2374 case OP_IA64_CMP4_LT
:
2375 ia64_cmp4_lt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2377 case OP_IA64_CMP4_GE
:
2378 ia64_cmp4_ge (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2380 case OP_IA64_CMP4_GT
:
2381 ia64_cmp4_gt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2383 case OP_IA64_CMP4_LT_UN
:
2384 ia64_cmp4_ltu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2386 case OP_IA64_CMP4_LE_UN
:
2387 ia64_cmp4_leu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2389 case OP_IA64_CMP4_GT_UN
:
2390 ia64_cmp4_gtu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2392 case OP_IA64_CMP4_GE_UN
:
2393 ia64_cmp4_geu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2395 case OP_IA64_CMP_EQ
:
2396 ia64_cmp_eq (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2398 case OP_IA64_CMP_NE
:
2399 ia64_cmp_ne (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2401 case OP_IA64_CMP_LE
:
2402 ia64_cmp_le (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2404 case OP_IA64_CMP_LT
:
2405 ia64_cmp_lt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2407 case OP_IA64_CMP_GE
:
2408 ia64_cmp_ge (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2410 case OP_IA64_CMP_GT
:
2411 ia64_cmp_gt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2413 case OP_IA64_CMP_GT_UN
:
2414 ia64_cmp_gtu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2416 case OP_IA64_CMP_LT_UN
:
2417 ia64_cmp_ltu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2419 case OP_IA64_CMP_GE_UN
:
2420 ia64_cmp_geu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2422 case OP_IA64_CMP_LE_UN
:
2423 ia64_cmp_leu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2425 case OP_IA64_CMP4_EQ_IMM
:
2426 ia64_cmp4_eq_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2428 case OP_IA64_CMP4_NE_IMM
:
2429 ia64_cmp4_ne_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2431 case OP_IA64_CMP4_LE_IMM
:
2432 ia64_cmp4_le_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2434 case OP_IA64_CMP4_LT_IMM
:
2435 ia64_cmp4_lt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2437 case OP_IA64_CMP4_GE_IMM
:
2438 ia64_cmp4_ge_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2440 case OP_IA64_CMP4_GT_IMM
:
2441 ia64_cmp4_gt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2443 case OP_IA64_CMP4_LT_UN_IMM
:
2444 ia64_cmp4_ltu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2446 case OP_IA64_CMP4_LE_UN_IMM
:
2447 ia64_cmp4_leu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2449 case OP_IA64_CMP4_GT_UN_IMM
:
2450 ia64_cmp4_gtu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2452 case OP_IA64_CMP4_GE_UN_IMM
:
2453 ia64_cmp4_geu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2455 case OP_IA64_CMP_EQ_IMM
:
2456 ia64_cmp_eq_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2458 case OP_IA64_CMP_NE_IMM
:
2459 ia64_cmp_ne_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2461 case OP_IA64_CMP_LE_IMM
:
2462 ia64_cmp_le_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2464 case OP_IA64_CMP_LT_IMM
:
2465 ia64_cmp_lt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2467 case OP_IA64_CMP_GE_IMM
:
2468 ia64_cmp_ge_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2470 case OP_IA64_CMP_GT_IMM
:
2471 ia64_cmp_gt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2473 case OP_IA64_CMP_GT_UN_IMM
:
2474 ia64_cmp_gtu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2476 case OP_IA64_CMP_LT_UN_IMM
:
2477 ia64_cmp_ltu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2479 case OP_IA64_CMP_GE_UN_IMM
:
2480 ia64_cmp_geu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2482 case OP_IA64_CMP_LE_UN_IMM
:
2483 ia64_cmp_leu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2485 case OP_IA64_FCMP_EQ
:
2486 ia64_fcmp_eq_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2488 case OP_IA64_FCMP_NE
:
2489 ia64_fcmp_ne_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2491 case OP_IA64_FCMP_LT
:
2492 ia64_fcmp_lt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2494 case OP_IA64_FCMP_GT
:
2495 ia64_fcmp_gt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2497 case OP_IA64_FCMP_LE
:
2498 ia64_fcmp_le_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2500 case OP_IA64_FCMP_GE
:
2501 ia64_fcmp_ge_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2503 case OP_IA64_FCMP_GT_UN
:
2504 ia64_fcmp_gt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2505 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2507 case OP_IA64_FCMP_LT_UN
:
2508 ia64_fcmp_lt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2509 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2511 case OP_IA64_FCMP_GE_UN
:
2512 ia64_fcmp_ge_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2513 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2515 case OP_IA64_FCMP_LE_UN
:
2516 ia64_fcmp_le_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2517 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2520 case OP_COND_EXC_IOV
:
2521 case OP_COND_EXC_OV
:
2522 emit_cond_system_exception (cfg
, code
, "OverflowException", 6);
2524 case OP_COND_EXC_IC
:
2526 emit_cond_system_exception (cfg
, code
, "OverflowException", 7);
2528 case OP_IA64_COND_EXC
:
2529 emit_cond_system_exception (cfg
, code
, ins
->inst_p1
, 6);
2532 ia64_mov_pred (code
, 7, ins
->dreg
, IA64_R0
);
2533 ia64_no_stop (code
);
2534 ia64_add1_pred (code
, 6, ins
->dreg
, IA64_R0
, IA64_R0
);
2536 case OP_ICONV_TO_I1
:
2537 case OP_LCONV_TO_I1
:
2538 /* FIXME: Is this needed ? */
2539 ia64_sxt1 (code
, ins
->dreg
, ins
->sreg1
);
2541 case OP_ICONV_TO_I2
:
2542 case OP_LCONV_TO_I2
:
2543 /* FIXME: Is this needed ? */
2544 ia64_sxt2 (code
, ins
->dreg
, ins
->sreg1
);
2546 case OP_LCONV_TO_I4
:
2547 /* FIXME: Is this needed ? */
2548 ia64_sxt4 (code
, ins
->dreg
, ins
->sreg1
);
2550 case OP_ICONV_TO_U1
:
2551 case OP_LCONV_TO_U1
:
2552 /* FIXME: Is this needed */
2553 ia64_zxt1 (code
, ins
->dreg
, ins
->sreg1
);
2555 case OP_ICONV_TO_U2
:
2556 case OP_LCONV_TO_U2
:
2557 /* FIXME: Is this needed */
2558 ia64_zxt2 (code
, ins
->dreg
, ins
->sreg1
);
2560 case OP_LCONV_TO_U4
:
2561 /* FIXME: Is this needed */
2562 ia64_zxt4 (code
, ins
->dreg
, ins
->sreg1
);
2564 case OP_ICONV_TO_I8
:
2566 case OP_LCONV_TO_I8
:
2568 ia64_sxt4 (code
, ins
->dreg
, ins
->sreg1
);
2570 case OP_LCONV_TO_U8
:
2572 ia64_zxt4 (code
, ins
->dreg
, ins
->sreg1
);
2579 double d
= *(double *)ins
->inst_p0
;
2581 if ((d
== 0.0) && (mono_signbit (d
) == 0))
2582 ia64_fmov (code
, ins
->dreg
, 0);
2584 ia64_fmov (code
, ins
->dreg
, 1);
2586 add_patch_info (cfg
, code
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
2587 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2588 ia64_ldfd (code
, ins
->dreg
, GP_SCRATCH_REG
);
2593 float f
= *(float *)ins
->inst_p0
;
2595 if ((f
== 0.0) && (mono_signbit (f
) == 0))
2596 ia64_fmov (code
, ins
->dreg
, 0);
2598 ia64_fmov (code
, ins
->dreg
, 1);
2600 add_patch_info (cfg
, code
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
2601 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2602 ia64_ldfs (code
, ins
->dreg
, GP_SCRATCH_REG
);
2607 ia64_fmov (code
, ins
->dreg
, ins
->sreg1
);
2609 case OP_STORER8_MEMBASE_REG
:
2610 if (ins
->inst_offset
!= 0) {
2611 /* This is generated by local regalloc */
2612 if (ia64_is_imm14 (ins
->inst_offset
)) {
2613 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_destbasereg
);
2615 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2616 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_destbasereg
);
2618 ins
->inst_destbasereg
= GP_SCRATCH_REG
;
2620 ia64_stfd_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2622 case OP_STORER4_MEMBASE_REG
:
2623 ia64_fnorm_s_sf (code
, FP_SCRATCH_REG
, ins
->sreg1
, 0);
2624 ia64_stfs_hint (code
, ins
->inst_destbasereg
, FP_SCRATCH_REG
, 0);
2626 case OP_LOADR8_MEMBASE
:
2627 if (ins
->inst_offset
!= 0) {
2628 /* This is generated by local regalloc */
2629 if (ia64_is_imm14 (ins
->inst_offset
)) {
2630 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_basereg
);
2632 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2633 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_basereg
);
2635 ins
->inst_basereg
= GP_SCRATCH_REG
;
2637 ia64_ldfd (code
, ins
->dreg
, ins
->inst_basereg
);
2639 case OP_LOADR4_MEMBASE
:
2640 ia64_ldfs (code
, ins
->dreg
, ins
->inst_basereg
);
2641 ia64_fnorm_d_sf (code
, ins
->dreg
, ins
->dreg
, 0);
2643 case OP_ICONV_TO_R4
:
2644 case OP_LCONV_TO_R4
:
2645 ia64_setf_sig (code
, ins
->dreg
, ins
->sreg1
);
2646 ia64_fcvt_xf (code
, ins
->dreg
, ins
->dreg
);
2647 ia64_fnorm_s_sf (code
, ins
->dreg
, ins
->dreg
, 0);
2649 case OP_ICONV_TO_R8
:
2650 case OP_LCONV_TO_R8
:
2651 ia64_setf_sig (code
, ins
->dreg
, ins
->sreg1
);
2652 ia64_fcvt_xf (code
, ins
->dreg
, ins
->dreg
);
2653 ia64_fnorm_d_sf (code
, ins
->dreg
, ins
->dreg
, 0);
2655 case OP_FCONV_TO_R4
:
2656 ia64_fnorm_s_sf (code
, ins
->dreg
, ins
->sreg1
, 0);
2658 case OP_FCONV_TO_I8
:
2660 ia64_fcvt_fx_trunc_sf (code
, FP_SCRATCH_REG
, ins
->sreg1
, 0);
2661 ia64_getf_sig (code
, ins
->dreg
, FP_SCRATCH_REG
);
2664 ia64_fma_d_sf (code
, ins
->dreg
, ins
->sreg1
, 1, ins
->sreg2
, 0);
2667 ia64_fms_d_sf (code
, ins
->dreg
, ins
->sreg1
, 1, ins
->sreg2
, 0);
2670 ia64_fma_d_sf (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
, 0, 0);
2673 ia64_fmerge_ns (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg1
);
2677 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x080);
2678 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2680 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x040);
2681 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2682 /* Positive infinity */
2683 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x021);
2684 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2685 /* Negative infinity */
2686 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x022);
2687 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2692 /* ensure ins->sreg1 is not NULL */
2693 ia64_ld8 (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2696 ia64_adds_imm (code
, GP_SCRATCH_REG
, cfg
->sig_cookie
, cfg
->frame_reg
);
2697 ia64_st8 (code
, ins
->sreg1
, GP_SCRATCH_REG
);
2705 call
= (MonoCallInst
*)ins
;
2707 if (ins
->flags
& MONO_INST_HAS_METHOD
)
2708 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
);
2710 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
);
2712 code
= emit_move_return_value (cfg
, ins
, code
);
2720 case OP_VOIDCALL_REG
: {
2721 MonoCallInst
*call
= (MonoCallInst
*)ins
;
2726 * mono_arch_find_this_arg () needs to find the this argument in a global
2729 cinfo
= get_call_info (cfg
, cfg
->mempool
, call
->signature
, FALSE
);
2730 out_reg
= cfg
->arch
.reg_out0
;
2731 if (cinfo
->ret
.storage
== ArgValuetypeAddrInIReg
)
2733 ia64_mov (code
, IA64_R10
, out_reg
);
2736 ia64_mov (code
, IA64_R8
, ins
->sreg1
);
2737 ia64_ld8_inc_imm (code
, GP_SCRATCH_REG2
, IA64_R8
, 8);
2738 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
2739 ia64_ld8 (code
, IA64_GP
, IA64_R8
);
2740 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
2742 code
= emit_move_return_value (cfg
, ins
, code
);
2745 case OP_FCALL_MEMBASE
:
2746 case OP_LCALL_MEMBASE
:
2747 case OP_VCALL_MEMBASE
:
2748 case OP_VCALL2_MEMBASE
:
2749 case OP_VOIDCALL_MEMBASE
:
2750 case OP_CALL_MEMBASE
: {
2751 MonoCallInst
*call
= (MonoCallInst
*)ins
;
2756 * There are no membase instructions on ia64, but we can't
2757 * lower this since get_vcall_slot_addr () needs to decode it.
2760 /* Keep this in synch with get_vcall_slot_addr */
2761 ia64_mov (code
, IA64_R11
, ins
->sreg1
);
2762 if (ia64_is_imm14 (ins
->inst_offset
))
2763 ia64_adds_imm (code
, IA64_R8
, ins
->inst_offset
, ins
->sreg1
);
2765 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2766 ia64_add (code
, IA64_R8
, GP_SCRATCH_REG
, ins
->sreg1
);
2769 if (call
->method
&& ins
->inst_offset
< 0) {
2771 * This is a possible IMT call so save the IMT method in a global
2772 * register where mono_arch_find_imt_method () and its friends can
2775 ia64_movl (code
, IA64_R9
, call
->method
);
2779 * mono_arch_find_this_arg () needs to find the this argument in a global
2782 cinfo
= get_call_info (cfg
, cfg
->mempool
, call
->signature
, FALSE
);
2783 out_reg
= cfg
->arch
.reg_out0
;
2784 if (cinfo
->ret
.storage
== ArgValuetypeAddrInIReg
)
2786 ia64_mov (code
, IA64_R10
, out_reg
);
2788 ia64_begin_bundle (code
);
2789 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
2791 ia64_ld8 (code
, GP_SCRATCH_REG
, IA64_R8
);
2793 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
2796 * This nop will tell get_vcall_slot_addr that this is a virtual
2799 ia64_nop_i (code
, 0x12345);
2801 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
2803 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
2805 code
= emit_move_return_value (cfg
, ins
, code
);
2810 * Keep in sync with the code in emit_epilog.
2813 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
2816 g_assert (!cfg
->method
->save_lmf
);
2818 /* Load arguments into their original registers */
2819 code
= emit_load_volatile_arguments (cfg
, code
);
2821 if (cfg
->arch
.stack_alloc_size
) {
2822 if (cfg
->arch
.omit_fp
) {
2823 if (ia64_is_imm14 (cfg
->arch
.stack_alloc_size
))
2824 ia64_adds_imm (code
, IA64_SP
, (cfg
->arch
.stack_alloc_size
), IA64_SP
);
2826 ia64_movl (code
, GP_SCRATCH_REG
, cfg
->arch
.stack_alloc_size
);
2827 ia64_add (code
, IA64_SP
, GP_SCRATCH_REG
, IA64_SP
);
2831 ia64_mov (code
, IA64_SP
, cfg
->arch
.reg_saved_sp
);
2833 ia64_mov_to_ar_i (code
, IA64_PFS
, cfg
->arch
.reg_saved_ar_pfs
);
2834 ia64_mov_ret_to_br (code
, IA64_B0
, cfg
->arch
.reg_saved_b0
);
2836 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
2837 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2838 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
2839 ia64_br_cond_reg (code
, IA64_B6
);
2844 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, mono_break
);
2850 /* FIXME: Sigaltstack support */
2852 /* keep alignment */
2853 ia64_adds_imm (code
, GP_SCRATCH_REG
, MONO_ARCH_LOCALLOC_ALIGNMENT
- 1, ins
->sreg1
);
2854 ia64_movl (code
, GP_SCRATCH_REG2
, ~(MONO_ARCH_LOCALLOC_ALIGNMENT
- 1));
2855 ia64_and (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
2857 ia64_sub (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
2859 ia64_mov (code
, ins
->dreg
, IA64_SP
);
2861 /* An area at sp is reserved by the ABI for parameter passing */
2862 abi_offset
= - ALIGN_TO (cfg
->param_area
+ 16, MONO_ARCH_LOCALLOC_ALIGNMENT
);
2863 if (ia64_is_adds_imm (abi_offset
))
2864 ia64_adds_imm (code
, IA64_SP
, abi_offset
, IA64_SP
);
2866 ia64_movl (code
, GP_SCRATCH_REG2
, abi_offset
);
2867 ia64_add (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG2
);
2870 if (ins
->flags
& MONO_INST_INIT
) {
2872 ia64_add (code
, GP_SCRATCH_REG2
, ins
->dreg
, GP_SCRATCH_REG
);
2874 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
2877 ia64_st8_inc_imm_hint (code
, ins
->dreg
, IA64_R0
, 8, 0);
2878 ia64_cmp_lt (code
, 8, 9, ins
->dreg
, GP_SCRATCH_REG2
);
2879 ia64_br_cond_pred (code
, 8, -2);
2881 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
2883 ia64_sub (code
, ins
->dreg
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
2888 case OP_LOCALLOC_IMM
: {
2891 /* FIXME: Sigaltstack support */
2893 gssize size
= ins
->inst_imm
;
2894 size
= (size
+ (MONO_ARCH_FRAME_ALIGNMENT
- 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT
- 1);
2896 if (ia64_is_adds_imm (size
))
2897 ia64_adds_imm (code
, GP_SCRATCH_REG
, size
, IA64_R0
);
2899 ia64_movl (code
, GP_SCRATCH_REG
, size
);
2901 ia64_sub (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
2902 ia64_mov (code
, ins
->dreg
, IA64_SP
);
2904 /* An area at sp is reserved by the ABI for parameter passing */
2905 abi_offset
= - ALIGN_TO (cfg
->param_area
+ 16, MONO_ARCH_FRAME_ALIGNMENT
);
2906 if (ia64_is_adds_imm (abi_offset
))
2907 ia64_adds_imm (code
, IA64_SP
, abi_offset
, IA64_SP
);
2909 ia64_movl (code
, GP_SCRATCH_REG2
, abi_offset
);
2910 ia64_add (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG2
);
2913 if (ins
->flags
& MONO_INST_INIT
) {
2915 ia64_add (code
, GP_SCRATCH_REG2
, ins
->dreg
, GP_SCRATCH_REG
);
2917 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
2920 ia64_st8_inc_imm_hint (code
, ins
->dreg
, IA64_R0
, 8, 0);
2921 ia64_cmp_lt (code
, 8, 9, ins
->dreg
, GP_SCRATCH_REG2
);
2922 ia64_br_cond_pred (code
, 8, -2);
2924 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
2926 ia64_sub (code
, ins
->dreg
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
2932 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_offset
, IA64_TP
);
2933 ia64_ld8 (code
, ins
->dreg
, ins
->dreg
);
2936 /* Synchronization */
2937 case OP_MEMORY_BARRIER
:
2940 case OP_ATOMIC_ADD_IMM_NEW_I4
:
2941 g_assert (ins
->inst_offset
== 0);
2942 ia64_fetchadd4_acq_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_imm
, 0);
2943 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->dreg
);
2945 case OP_ATOMIC_ADD_IMM_NEW_I8
:
2946 g_assert (ins
->inst_offset
== 0);
2947 ia64_fetchadd8_acq_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_imm
, 0);
2948 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->dreg
);
2950 case OP_ATOMIC_EXCHANGE_I4
:
2951 ia64_xchg4_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
, 0);
2952 ia64_sxt4 (code
, ins
->dreg
, ins
->dreg
);
2954 case OP_ATOMIC_EXCHANGE_I8
:
2955 ia64_xchg8_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
, 0);
2957 case OP_ATOMIC_ADD_NEW_I4
: {
2958 guint8
*label
, *buf
;
2960 /* From libatomic_ops */
2963 ia64_begin_bundle (code
);
2964 label
= code
.buf
+ code
.nins
;
2965 ia64_ld4_acq (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2966 ia64_add (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, ins
->sreg2
);
2967 ia64_mov_to_ar_m (code
, IA64_CCV
, GP_SCRATCH_REG
);
2968 ia64_cmpxchg4_acq_hint (code
, GP_SCRATCH_REG2
, ins
->sreg1
, GP_SCRATCH_REG2
, 0);
2969 ia64_cmp4_eq (code
, 6, 7, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
2970 buf
= code
.buf
+ code
.nins
;
2971 ia64_br_cond_pred (code
, 7, 0);
2972 ia64_begin_bundle (code
);
2973 ia64_patch (buf
, label
);
2974 ia64_add (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->sreg2
);
2977 case OP_ATOMIC_ADD_NEW_I8
: {
2978 guint8
*label
, *buf
;
2980 /* From libatomic_ops */
2983 ia64_begin_bundle (code
);
2984 label
= code
.buf
+ code
.nins
;
2985 ia64_ld8_acq (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2986 ia64_add (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, ins
->sreg2
);
2987 ia64_mov_to_ar_m (code
, IA64_CCV
, GP_SCRATCH_REG
);
2988 ia64_cmpxchg8_acq_hint (code
, GP_SCRATCH_REG2
, ins
->sreg1
, GP_SCRATCH_REG2
, 0);
2989 ia64_cmp_eq (code
, 6, 7, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
2990 buf
= code
.buf
+ code
.nins
;
2991 ia64_br_cond_pred (code
, 7, 0);
2992 ia64_begin_bundle (code
);
2993 ia64_patch (buf
, label
);
2994 ia64_add (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->sreg2
);
2998 /* Exception handling */
2999 case OP_CALL_HANDLER
:
3001 * Using a call instruction would mess up the register stack, so
3002 * save the return address to a register and use a
3005 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
3006 ia64_mov (code
, IA64_R15
, IA64_R0
);
3007 ia64_mov_from_ip (code
, GP_SCRATCH_REG
);
3008 /* Add the length of OP_CALL_HANDLER */
3009 ia64_adds_imm (code
, GP_SCRATCH_REG
, 5 * 16, GP_SCRATCH_REG
);
3010 add_patch_info (cfg
, code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3011 ia64_movl (code
, GP_SCRATCH_REG2
, 0);
3012 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
3013 ia64_br_cond_reg (code
, IA64_B6
);
3014 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
3016 case OP_START_HANDLER
: {
3018 * We receive the return address in GP_SCRATCH_REG.
3020 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3023 * R15 determines our caller. It is used since it is writable using
3025 * R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
3026 * R15 != 0 means we are called by call_filter ().
3028 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
3029 ia64_cmp_eq (code
, 6, 7, IA64_R15
, IA64_R0
);
3031 ia64_br_cond_pred (code
, 6, 6);
3034 * Called by call_filter:
3035 * Allocate a new stack frame, and set the fp register from the
3036 * value passed in by the caller.
3037 * We allocate a similar frame as is done by the prolog, so
3038 * if an exception is thrown while executing the filter, the
3039 * unwinder can unwind through the filter frame using the unwind
3040 * info for the prolog.
3042 ia64_alloc (code
, cfg
->arch
.reg_saved_ar_pfs
, cfg
->arch
.reg_local0
- cfg
->arch
.reg_in0
, cfg
->arch
.reg_out0
- cfg
->arch
.reg_local0
, cfg
->arch
.n_out_regs
, 0);
3043 ia64_mov_from_br (code
, cfg
->arch
.reg_saved_b0
, IA64_B0
);
3044 ia64_mov (code
, cfg
->arch
.reg_saved_sp
, IA64_SP
);
3045 ia64_mov (code
, cfg
->frame_reg
, IA64_R15
);
3046 /* Signal to endfilter that we are called by call_filter */
3047 ia64_mov (code
, GP_SCRATCH_REG
, IA64_R0
);
3049 /* Branch target: */
3050 if (ia64_is_imm14 (spvar
->inst_offset
))
3051 ia64_adds_imm (code
, GP_SCRATCH_REG2
, spvar
->inst_offset
, cfg
->frame_reg
);
3053 ia64_movl (code
, GP_SCRATCH_REG2
, spvar
->inst_offset
);
3054 ia64_add (code
, GP_SCRATCH_REG2
, cfg
->frame_reg
, GP_SCRATCH_REG2
);
3057 /* Save the return address */
3058 ia64_st8_hint (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, 0);
3059 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
3064 case OP_ENDFILTER
: {
3065 /* FIXME: Return the value in ENDFILTER */
3066 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3068 /* Load the return address */
3069 if (ia64_is_imm14 (spvar
->inst_offset
)) {
3070 ia64_adds_imm (code
, GP_SCRATCH_REG
, spvar
->inst_offset
, cfg
->frame_reg
);
3072 ia64_movl (code
, GP_SCRATCH_REG
, spvar
->inst_offset
);
3073 ia64_add (code
, GP_SCRATCH_REG
, cfg
->frame_reg
, GP_SCRATCH_REG
);
3075 ia64_ld8_hint (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, 0);
3078 ia64_cmp_eq (code
, 6, 7, GP_SCRATCH_REG
, IA64_R0
);
3079 ia64_br_cond_pred (code
, 7, 4);
3081 /* Called by call_filter */
3083 ia64_mov_to_ar_i (code
, IA64_PFS
, cfg
->arch
.reg_saved_ar_pfs
);
3084 ia64_mov_to_br (code
, IA64_B0
, cfg
->arch
.reg_saved_b0
);
3085 ia64_br_ret_reg (code
, IA64_B0
);
3087 /* Called by CALL_HANDLER */
3088 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
3089 ia64_br_cond_reg (code
, IA64_B6
);
3093 ia64_mov (code
, cfg
->arch
.reg_out0
, ins
->sreg1
);
3094 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3095 (gpointer
)"mono_arch_throw_exception");
3098 * This might be the last instruction in the method, so add a dummy
3099 * instruction so the unwinder will work.
3101 ia64_break_i (code
, 0);
3104 ia64_mov (code
, cfg
->arch
.reg_out0
, ins
->sreg1
);
3105 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3106 (gpointer
)"mono_arch_rethrow_exception");
3108 ia64_break_i (code
, 0);
3112 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
3113 g_assert_not_reached ();
3116 if ((code
.buf
- cfg
->native_code
- offset
) > max_len
) {
3117 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3118 mono_inst_name (ins
->opcode
), max_len
, code
.buf
- cfg
->native_code
- offset
);
3119 g_assert_not_reached ();
3125 last_offset
= offset
;
3128 ia64_codegen_close (code
);
3130 cfg
->code_len
= code
.buf
- cfg
->native_code
;
3134 mono_arch_register_lowlevel_calls (void)
3138 static Ia64InsType ins_types_in_template
[32][3] = {
3139 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3140 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3141 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3142 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3143 {IA64_INS_TYPE_M
, IA64_INS_TYPE_LX
, IA64_INS_TYPE_LX
},
3144 {IA64_INS_TYPE_M
, IA64_INS_TYPE_LX
, IA64_INS_TYPE_LX
},
3147 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3148 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3149 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3150 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3151 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_I
},
3152 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_I
},
3153 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_F
},
3154 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_F
},
3155 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_B
},
3156 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_B
},
3157 {IA64_INS_TYPE_M
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3158 {IA64_INS_TYPE_M
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3161 {IA64_INS_TYPE_B
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3162 {IA64_INS_TYPE_B
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3163 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_B
},
3164 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_B
},
3167 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_B
},
3168 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_B
},
3173 static gboolean stops_in_template
[32][3] = {
3174 { FALSE
, FALSE
, FALSE
},
3175 { FALSE
, FALSE
, TRUE
},
3176 { FALSE
, TRUE
, FALSE
},
3177 { FALSE
, TRUE
, TRUE
},
3178 { FALSE
, FALSE
, FALSE
},
3179 { FALSE
, FALSE
, TRUE
},
3180 { FALSE
, FALSE
, FALSE
},
3181 { FALSE
, FALSE
, FALSE
},
3183 { FALSE
, FALSE
, FALSE
},
3184 { FALSE
, FALSE
, TRUE
},
3185 { TRUE
, FALSE
, FALSE
},
3186 { TRUE
, FALSE
, TRUE
},
3187 { FALSE
, FALSE
, FALSE
},
3188 { FALSE
, FALSE
, TRUE
},
3189 { FALSE
, FALSE
, FALSE
},
3190 { FALSE
, FALSE
, TRUE
},
3192 { FALSE
, FALSE
, FALSE
},
3193 { FALSE
, FALSE
, TRUE
},
3194 { FALSE
, FALSE
, FALSE
},
3195 { FALSE
, FALSE
, TRUE
},
3196 { FALSE
, FALSE
, FALSE
},
3197 { FALSE
, FALSE
, FALSE
},
3198 { FALSE
, FALSE
, FALSE
},
3199 { FALSE
, FALSE
, TRUE
},
3201 { FALSE
, FALSE
, FALSE
},
3202 { FALSE
, FALSE
, TRUE
},
3203 { FALSE
, FALSE
, FALSE
},
3204 { FALSE
, FALSE
, FALSE
},
3205 { FALSE
, FALSE
, FALSE
},
3206 { FALSE
, FALSE
, TRUE
},
3207 { FALSE
, FALSE
, FALSE
},
3208 { FALSE
, FALSE
, FALSE
}
3211 static int last_stop_in_template
[32] = {
3212 -1, 2, 1, 2, -1, 2, -1, -1,
3213 -1, 2, 0, 2, -1, 2, -1, 2,
3214 -1, 2, -1, 2, -1, -1, -1, 2,
3215 -1, 2, -1, -1, -1, 2, -1, -1
3218 static guint64 nops_for_ins_types
[6] = {
3227 #define ITYPE_MATCH(itype1, itype2) (((itype1) == (itype2)) || (((itype2) == IA64_INS_TYPE_A) && (((itype1) == IA64_INS_TYPE_I) || ((itype1) == IA64_INS_TYPE_M))))
3234 #define DEBUG_INS_SCHED(a) do { a; } while (0)
3236 #define DEBUG_INS_SCHED(a)
3240 ia64_analyze_deps (Ia64CodegenState
*code
, int *deps_start
, int *stops
)
3242 int i
, pos
, ins_index
, current_deps_start
, current_ins_start
, reg
;
3243 guint8
*deps
= code
->dep_info
;
3244 gboolean need_stop
, no_stop
;
3246 for (i
= 0; i
< code
->nins
; ++i
)
3250 current_deps_start
= 0;
3251 current_ins_start
= 0;
3252 deps_start
[ins_index
] = current_ins_start
;
3255 DEBUG_INS_SCHED (printf ("BEGIN.\n"));
3256 while (pos
< code
->dep_info_pos
) {
3258 switch (deps
[pos
]) {
3259 case IA64_END_OF_INS
:
3261 current_ins_start
= pos
+ 2;
3262 deps_start
[ins_index
] = current_ins_start
;
3264 DEBUG_INS_SCHED (printf ("(%d) END INS.\n", ins_index
- 1));
3269 reg
= deps
[pos
+ 1];
3271 DEBUG_INS_SCHED (printf ("READ GR: %d\n", reg
));
3272 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3273 if (deps
[i
] == IA64_WRITE_GR
&& deps
[i
+ 1] == reg
)
3277 reg
= code
->dep_info
[pos
+ 1];
3279 DEBUG_INS_SCHED (printf ("WRITE GR: %d\n", reg
));
3280 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3281 if (deps
[i
] == IA64_WRITE_GR
&& deps
[i
+ 1] == reg
)
3285 reg
= deps
[pos
+ 1];
3287 DEBUG_INS_SCHED (printf ("READ PR: %d\n", reg
));
3288 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3289 if (((deps
[i
] == IA64_WRITE_PR
) || (deps
[i
] == IA64_WRITE_PR_FLOAT
)) && deps
[i
+ 1] == reg
)
3292 case IA64_READ_PR_BRANCH
:
3293 reg
= deps
[pos
+ 1];
3295 /* Writes to prs by non-float instructions are visible to branches */
3296 DEBUG_INS_SCHED (printf ("READ PR BRANCH: %d\n", reg
));
3297 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3298 if (deps
[i
] == IA64_WRITE_PR_FLOAT
&& deps
[i
+ 1] == reg
)
3302 reg
= code
->dep_info
[pos
+ 1];
3304 DEBUG_INS_SCHED (printf ("WRITE PR: %d\n", reg
));
3305 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3306 if (((deps
[i
] == IA64_WRITE_PR
) || (deps
[i
] == IA64_WRITE_PR_FLOAT
)) && deps
[i
+ 1] == reg
)
3309 case IA64_WRITE_PR_FLOAT
:
3310 reg
= code
->dep_info
[pos
+ 1];
3312 DEBUG_INS_SCHED (printf ("WRITE PR FP: %d\n", reg
));
3313 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3314 if (((deps
[i
] == IA64_WRITE_GR
) || (deps
[i
] == IA64_WRITE_PR_FLOAT
)) && deps
[i
+ 1] == reg
)
3318 reg
= deps
[pos
+ 1];
3320 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg
));
3321 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3322 if (deps
[i
] == IA64_WRITE_BR
&& deps
[i
+ 1] == reg
)
3326 reg
= code
->dep_info
[pos
+ 1];
3328 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg
));
3329 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3330 if (deps
[i
] == IA64_WRITE_BR
&& deps
[i
+ 1] == reg
)
3333 case IA64_READ_BR_BRANCH
:
3334 reg
= deps
[pos
+ 1];
3336 /* Writes to brs are visible to branches */
3337 DEBUG_INS_SCHED (printf ("READ BR BRACH: %d\n", reg
));
3340 reg
= deps
[pos
+ 1];
3342 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg
));
3343 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3344 if (deps
[i
] == IA64_WRITE_FR
&& deps
[i
+ 1] == reg
)
3348 reg
= code
->dep_info
[pos
+ 1];
3350 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg
));
3351 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3352 if (deps
[i
] == IA64_WRITE_FR
&& deps
[i
+ 1] == reg
)
3356 reg
= deps
[pos
+ 1];
3358 DEBUG_INS_SCHED (printf ("READ AR: %d\n", reg
));
3359 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3360 if (deps
[i
] == IA64_WRITE_AR
&& deps
[i
+ 1] == reg
)
3364 reg
= code
->dep_info
[pos
+ 1];
3366 DEBUG_INS_SCHED (printf ("WRITE AR: %d\n", reg
));
3367 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3368 if (deps
[i
] == IA64_WRITE_AR
&& deps
[i
+ 1] == reg
)
3373 * Explicitly indicate that a stop is not required. Useful for
3374 * example when two predicated instructions with negated predicates
3375 * write the same registers.
3380 g_assert_not_reached ();
3384 if (need_stop
&& !no_stop
) {
3385 g_assert (ins_index
> 0);
3386 stops
[ins_index
- 1] = 1;
3388 DEBUG_INS_SCHED (printf ("STOP\n"));
3389 current_deps_start
= current_ins_start
;
3391 /* Skip remaining deps for this instruction */
3392 while (deps
[pos
] != IA64_END_OF_INS
)
3397 if (code
->nins
> 0) {
3398 /* No dependency info for the last instruction */
3399 stops
[code
->nins
- 1] = 1;
3402 deps_start
[code
->nins
] = code
->dep_info_pos
;
3406 ia64_real_emit_bundle (Ia64CodegenState
*code
, int *deps_start
, int *stops
, int n
, guint64
template, guint64 ins1
, guint64 ins2
, guint64 ins3
, guint8 nops
)
3408 int stop_pos
, i
, deps_to_shift
, dep_shift
;
3410 g_assert (n
<= code
->nins
);
3412 // if (n > 1) printf ("FOUND: %ld.\n", template);
3414 ia64_emit_bundle_template (code
, template, ins1
, ins2
, ins3
);
3416 stop_pos
= last_stop_in_template
[template] + 1;
3420 /* Compute the number of 'real' instructions before the stop */
3421 deps_to_shift
= stop_pos
;
3422 if (stop_pos
>= 3 && (nops
& (1 << 2)))
3424 if (stop_pos
>= 2 && (nops
& (1 << 1)))
3426 if (stop_pos
>= 1 && (nops
& (1 << 0)))
3430 * We have to keep some dependencies whose instructions have been shifted
3431 * out of the buffer. So nullify the end_of_ins markers in the dependency
3434 for (i
= deps_start
[deps_to_shift
]; i
< deps_start
[n
]; i
+= 2)
3435 if (code
->dep_info
[i
] == IA64_END_OF_INS
)
3436 code
->dep_info
[i
] = IA64_NONE
;
3438 g_assert (deps_start
[deps_to_shift
] <= code
->dep_info_pos
);
3439 memcpy (code
->dep_info
, &code
->dep_info
[deps_start
[deps_to_shift
]], code
->dep_info_pos
- deps_start
[deps_to_shift
]);
3440 code
->dep_info_pos
= code
->dep_info_pos
- deps_start
[deps_to_shift
];
3442 dep_shift
= deps_start
[deps_to_shift
];
3443 for (i
= 0; i
< code
->nins
+ 1 - n
; ++i
)
3444 deps_start
[i
] = deps_start
[n
+ i
] - dep_shift
;
3446 /* Determine the exact positions of instructions with unwind ops */
3447 if (code
->unw_op_count
) {
3449 int curr_ins
, curr_ins_pos
;
3452 curr_ins_pos
= ((code
->buf
- code
->region_start
- 16) / 16) * 3;
3453 for (i
= 0; i
< 3; ++i
) {
3454 if (! (nops
& (1 << i
))) {
3455 ins_pos
[curr_ins
] = curr_ins_pos
+ i
;
3460 for (i
= code
->unw_op_pos
; i
< code
->unw_op_count
; ++i
) {
3461 if (code
->unw_ops_pos
[i
] < n
) {
3462 code
->unw_ops
[i
].when
= ins_pos
[code
->unw_ops_pos
[i
]];
3463 //printf ("UNW-OP: %d -> %d\n", code->unw_ops_pos [i], code->unw_ops [i].when);
3466 if (code
->unw_op_pos
< code
->unw_op_count
)
3467 code
->unw_op_pos
+= n
;
3470 if (n
== code
->nins
) {
3475 memcpy (&code
->instructions
[0], &code
->instructions
[n
], (code
->nins
- n
) * sizeof (guint64
));
3476 memcpy (&code
->itypes
[0], &code
->itypes
[n
], (code
->nins
- n
) * sizeof (int));
3477 memcpy (&stops
[0], &stops
[n
], (code
->nins
- n
) * sizeof (int));
3483 ia64_emit_bundle (Ia64CodegenState
*code
, gboolean flush
)
3485 int i
, ins_type
, template, nins_to_emit
;
3486 int deps_start
[16];
3491 * We implement a simple scheduler which tries to put three instructions
3492 * per bundle, then two, then one.
3494 ia64_analyze_deps (code
, deps_start
, stops
);
3496 if ((code
->nins
>= 3) && !code
->one_ins_per_bundle
) {
3497 /* Find a suitable template */
3498 for (template = 0; template < 32; ++template) {
3499 if (stops_in_template
[template][0] != stops
[0] ||
3500 stops_in_template
[template][1] != stops
[1] ||
3501 stops_in_template
[template][2] != stops
[2])
3505 for (i
= 0; i
< 3; ++i
) {
3506 ins_type
= ins_types_in_template
[template][i
];
3507 switch (code
->itypes
[i
]) {
3508 case IA64_INS_TYPE_A
:
3509 found
&= (ins_type
== IA64_INS_TYPE_I
) || (ins_type
== IA64_INS_TYPE_M
);
3512 found
&= (ins_type
== code
->itypes
[i
]);
3518 found
= debug_ins_sched ();
3521 ia64_real_emit_bundle (code
, deps_start
, stops
, 3, template, code
->instructions
[0], code
->instructions
[1], code
->instructions
[2], 0);
3527 if (code
->nins
< IA64_INS_BUFFER_SIZE
&& !flush
)
3528 /* Wait for more instructions */
3531 /* If it didn't work out, try putting two instructions into one bundle */
3532 if ((code
->nins
>= 2) && !code
->one_ins_per_bundle
) {
3533 /* Try a nop at the end */
3534 for (template = 0; template < 32; ++template) {
3535 if (stops_in_template
[template][0] != stops
[0] ||
3536 ((stops_in_template
[template][1] != stops
[1]) &&
3537 (stops_in_template
[template][2] != stops
[1])))
3541 if (!ITYPE_MATCH (ins_types_in_template
[template][0], code
->itypes
[0]) ||
3542 !ITYPE_MATCH (ins_types_in_template
[template][1], code
->itypes
[1]))
3545 if (!debug_ins_sched ())
3548 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, template, code
->instructions
[0], code
->instructions
[1], nops_for_ins_types
[ins_types_in_template
[template][2]], 1 << 2);
3553 if (code
->nins
< IA64_INS_BUFFER_SIZE
&& !flush
)
3554 /* Wait for more instructions */
3557 if ((code
->nins
>= 2) && !code
->one_ins_per_bundle
) {
3558 /* Try a nop in the middle */
3559 for (template = 0; template < 32; ++template) {
3560 if (((stops_in_template
[template][0] != stops
[0]) &&
3561 (stops_in_template
[template][1] != stops
[0])) ||
3562 stops_in_template
[template][2] != stops
[1])
3565 if (!ITYPE_MATCH (ins_types_in_template
[template][0], code
->itypes
[0]) ||
3566 !ITYPE_MATCH (ins_types_in_template
[template][2], code
->itypes
[1]))
3569 if (!debug_ins_sched ())
3572 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, template, code
->instructions
[0], nops_for_ins_types
[ins_types_in_template
[template][1]], code
->instructions
[1], 1 << 1);
3577 if ((code
->nins
>= 2) && flush
&& !code
->one_ins_per_bundle
) {
3578 /* Try a nop at the beginning */
3579 for (template = 0; template < 32; ++template) {
3580 if ((stops_in_template
[template][1] != stops
[0]) ||
3581 (stops_in_template
[template][2] != stops
[1]))
3584 if (!ITYPE_MATCH (ins_types_in_template
[template][1], code
->itypes
[0]) ||
3585 !ITYPE_MATCH (ins_types_in_template
[template][2], code
->itypes
[1]))
3588 if (!debug_ins_sched ())
3591 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, template, nops_for_ins_types
[ins_types_in_template
[template][0]], code
->instructions
[0], code
->instructions
[1], 1 << 0);
3596 if (code
->nins
< IA64_INS_BUFFER_SIZE
&& !flush
)
3597 /* Wait for more instructions */
3601 nins_to_emit
= code
->nins
;
3605 while (nins_to_emit
> 0) {
3606 if (!debug_ins_sched ())
3608 switch (code
->itypes
[0]) {
3609 case IA64_INS_TYPE_A
:
3611 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIIS
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3613 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MII
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3615 case IA64_INS_TYPE_I
:
3617 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIIS
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3619 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MII
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3621 case IA64_INS_TYPE_M
:
3623 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIIS
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3625 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MII
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3627 case IA64_INS_TYPE_B
:
3629 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIBS
, IA64_NOP_M
, IA64_NOP_I
, code
->instructions
[0], 0);
3631 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIB
, IA64_NOP_M
, IA64_NOP_I
, code
->instructions
[0], 0);
3633 case IA64_INS_TYPE_F
:
3635 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MFIS
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3637 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MFI
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3639 case IA64_INS_TYPE_LX
:
3640 if (stops
[0] || stops
[1])
3641 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, IA64_TEMPLATE_MLXS
, IA64_NOP_M
, code
->instructions
[0], code
->instructions
[1], 0);
3643 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, IA64_TEMPLATE_MLX
, IA64_NOP_M
, code
->instructions
[0], code
->instructions
[1], 0);
3647 g_assert_not_reached ();
3653 unw_dyn_region_info_t
*
3654 mono_ia64_create_unwind_region (Ia64CodegenState
*code
)
3656 unw_dyn_region_info_t
*r
;
3658 g_assert (code
->nins
== 0);
3659 r
= g_malloc0 (_U_dyn_region_info_size (code
->unw_op_count
));
3660 memcpy (&r
->op
, &code
->unw_ops
, sizeof (unw_dyn_op_t
) * code
->unw_op_count
);
3661 r
->op_count
= code
->unw_op_count
;
3662 r
->insn_count
= ((code
->buf
- code
->region_start
) >> 4) * 3;
3663 code
->unw_op_count
= 0;
3664 code
->unw_op_pos
= 0;
3665 code
->region_start
= code
->buf
;
3671 ia64_patch (unsigned char* code
, gpointer target
)
3674 guint64 instructions
[3];
3675 guint8 gen_buf
[16];
3676 Ia64CodegenState gen
;
3681 * code encodes both the position inside the buffer and code.nins when
3682 * the instruction was emitted.
3684 ins_to_skip
= (guint64
)code
% 16;
3685 code
= (unsigned char*)((guint64
)code
& ~15);
3688 * Search for the first instruction which is 'patchable', skipping
3689 * ins_to_skip instructions.
3694 template = ia64_bundle_template (code
);
3695 instructions
[0] = ia64_bundle_ins1 (code
);
3696 instructions
[1] = ia64_bundle_ins2 (code
);
3697 instructions
[2] = ia64_bundle_ins3 (code
);
3699 ia64_codegen_init (gen
, gen_buf
);
3702 for (i
= 0; i
< 3; ++i
) {
3703 guint64 ins
= instructions
[i
];
3704 int opcode
= ia64_ins_opcode (ins
);
3706 if (ins
== nops_for_ins_types
[ins_types_in_template
[template][i
]])
3714 switch (ins_types_in_template
[template][i
]) {
3715 case IA64_INS_TYPE_A
:
3716 case IA64_INS_TYPE_M
:
3717 if ((opcode
== 8) && (ia64_ins_x2a (ins
) == 2) && (ia64_ins_ve (ins
) == 0)) {
3719 ia64_adds_imm_pred (gen
, ia64_ins_qp (ins
), ia64_ins_r1 (ins
), (guint64
)target
, ia64_ins_r3 (ins
));
3720 instructions
[i
] = gen
.instructions
[0];
3726 case IA64_INS_TYPE_B
:
3727 if ((opcode
== 4) && (ia64_ins_btype (ins
) == 0)) {
3729 gint64 disp
= ((guint8
*)target
- code
) >> 4;
3732 ia64_br_cond_hint_pred (gen
, ia64_ins_qp (ins
), disp
, 0, 0, 0);
3734 instructions
[i
] = gen
.instructions
[0];
3737 else if (opcode
== 5) {
3739 gint64 disp
= ((guint8
*)target
- code
) >> 4;
3742 ia64_br_call_hint_pred (gen
, ia64_ins_qp (ins
), ia64_ins_b1 (ins
), disp
, 0, 0, 0);
3743 instructions
[i
] = gen
.instructions
[0];
3749 case IA64_INS_TYPE_LX
:
3753 if ((opcode
== 6) && (ia64_ins_vc (ins
) == 0)) {
3755 ia64_movl_pred (gen
, ia64_ins_qp (ins
), ia64_ins_r1 (ins
), target
);
3756 instructions
[1] = gen
.instructions
[0];
3757 instructions
[2] = gen
.instructions
[1];
3770 ia64_codegen_init (gen
, code
);
3771 ia64_emit_bundle_template (&gen
, template, instructions
[0], instructions
[1], instructions
[2]);
3781 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
3783 MonoJumpInfo
*patch_info
;
3785 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
3786 unsigned char *ip
= patch_info
->ip
.i
+ code
;
3787 const unsigned char *target
;
3789 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
3791 if (patch_info
->type
== MONO_PATCH_INFO_NONE
)
3793 if (mono_compile_aot
) {
3797 ia64_patch (ip
, (gpointer
)target
);
3802 mono_arch_emit_prolog (MonoCompile
*cfg
)
3804 MonoMethod
*method
= cfg
->method
;
3805 MonoMethodSignature
*sig
;
3807 int alloc_size
, pos
, i
;
3808 Ia64CodegenState code
;
3811 sig
= mono_method_signature (method
);
3814 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
3816 cfg
->code_size
= MAX (((MonoMethodNormal
*)method
)->header
->code_size
* 4, 512);
3818 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
3819 cfg
->code_size
+= 1024;
3820 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
3821 cfg
->code_size
+= 1024;
3823 cfg
->native_code
= g_malloc (cfg
->code_size
);
3825 ia64_codegen_init (code
, cfg
->native_code
);
3827 alloc_size
= ALIGN_TO (cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3828 if (cfg
->param_area
)
3829 alloc_size
+= cfg
->param_area
;
3833 alloc_size
= ALIGN_TO (alloc_size
, MONO_ARCH_FRAME_ALIGNMENT
);
3835 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
3836 /* Force sp to be saved/restored */
3837 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
;
3839 cfg
->arch
.stack_alloc_size
= alloc_size
;
3843 if (method
->save_lmf
) {
3844 /* No LMF on IA64 */
3849 ia64_unw_save_reg (code
, UNW_IA64_AR_PFS
, UNW_IA64_GR
+ cfg
->arch
.reg_saved_ar_pfs
);
3850 ia64_alloc (code
, cfg
->arch
.reg_saved_ar_pfs
, cfg
->arch
.reg_local0
- cfg
->arch
.reg_in0
, cfg
->arch
.reg_out0
- cfg
->arch
.reg_local0
, cfg
->arch
.n_out_regs
, 0);
3851 ia64_unw_save_reg (code
, UNW_IA64_RP
, UNW_IA64_GR
+ cfg
->arch
.reg_saved_b0
);
3852 ia64_mov_from_br (code
, cfg
->arch
.reg_saved_b0
, IA64_B0
);
3854 if ((alloc_size
|| cinfo
->stack_usage
) && !cfg
->arch
.omit_fp
) {
3855 ia64_unw_save_reg (code
, UNW_IA64_SP
, UNW_IA64_GR
+ cfg
->arch
.reg_saved_sp
);
3856 ia64_mov (code
, cfg
->arch
.reg_saved_sp
, IA64_SP
);
3857 if (cfg
->frame_reg
!= cfg
->arch
.reg_saved_sp
)
3858 ia64_mov (code
, cfg
->frame_reg
, IA64_SP
);
3862 #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3863 int pagesize
= getpagesize ();
3865 if (alloc_size
>= pagesize
) {
3866 gint32 remaining_size
= alloc_size
;
3868 /* Generate stack touching code */
3869 ia64_mov (code
, GP_SCRATCH_REG
, IA64_SP
);
3870 while (remaining_size
>= pagesize
) {
3871 ia64_movl (code
, GP_SCRATCH_REG2
, pagesize
);
3872 ia64_sub (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
3873 ia64_ld8 (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
3874 remaining_size
-= pagesize
;
3878 if (ia64_is_imm14 (-alloc_size
)) {
3879 if (cfg
->arch
.omit_fp
)
3880 ia64_unw_add (code
, UNW_IA64_SP
, (-alloc_size
));
3881 ia64_adds_imm (code
, IA64_SP
, (-alloc_size
), IA64_SP
);
3884 ia64_movl (code
, GP_SCRATCH_REG
, -alloc_size
);
3885 if (cfg
->arch
.omit_fp
)
3886 ia64_unw_add (code
, UNW_IA64_SP
, (-alloc_size
));
3887 ia64_add (code
, IA64_SP
, GP_SCRATCH_REG
, IA64_SP
);
3891 ia64_begin_bundle (code
);
3893 /* Initialize unwind info */
3894 cfg
->arch
.r_pro
= mono_ia64_create_unwind_region (&code
);
3896 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
3897 if ((cinfo
->ret
.storage
== ArgInIReg
) && (cfg
->ret
->opcode
!= OP_REGVAR
)) {
3898 /* Save volatile arguments to the stack */
3903 /* Keep this in sync with emit_load_volatile_arguments */
3904 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3905 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3906 gint32 stack_offset
;
3909 inst
= cfg
->args
[i
];
3911 if (sig
->hasthis
&& (i
== 0))
3912 arg_type
= &mono_defaults
.object_class
->byval_arg
;
3914 arg_type
= sig
->params
[i
- sig
->hasthis
];
3916 arg_type
= mono_type_get_underlying_type (arg_type
);
3918 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3921 * FIXME: Native code might pass non register sized integers
3922 * without initializing the upper bits.
3924 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
&& !arg_type
->byref
&& ainfo
->storage
== ArgInIReg
) {
3925 int reg
= cfg
->arch
.reg_in0
+ ainfo
->reg
;
3927 switch (mono_type_to_load_membase (cfg
, arg_type
)) {
3928 case OP_LOADI1_MEMBASE
:
3929 ia64_sxt1 (code
, reg
, reg
);
3931 case OP_LOADU1_MEMBASE
:
3932 ia64_zxt1 (code
, reg
, reg
);
3934 case OP_LOADI2_MEMBASE
:
3935 ia64_sxt2 (code
, reg
, reg
);
3937 case OP_LOADU2_MEMBASE
:
3938 ia64_zxt2 (code
, reg
, reg
);
3945 /* Save volatile arguments to the stack */
3946 if (inst
->opcode
!= OP_REGVAR
) {
3947 switch (ainfo
->storage
) {
3950 case ArgInFloatRegR4
:
3951 g_assert (inst
->opcode
== OP_REGOFFSET
);
3952 if (ia64_is_adds_imm (inst
->inst_offset
))
3953 ia64_adds_imm (code
, GP_SCRATCH_REG
, inst
->inst_offset
, inst
->inst_basereg
);
3955 ia64_movl (code
, GP_SCRATCH_REG2
, inst
->inst_offset
);
3956 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
3958 if (arg_type
->byref
)
3959 ia64_st8_hint (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_in0
+ ainfo
->reg
, 0);
3961 switch (arg_type
->type
) {
3963 ia64_stfs_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
, 0);
3966 ia64_stfd_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
, 0);
3969 ia64_st8_hint (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_in0
+ ainfo
->reg
, 0);
3977 if (ainfo
->nslots
!= ainfo
->nregs
)
3980 g_assert (inst
->opcode
== OP_REGOFFSET
);
3981 ia64_adds_imm (code
, GP_SCRATCH_REG
, inst
->inst_offset
, inst
->inst_basereg
);
3982 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
3983 switch (ainfo
->atype
) {
3984 case AggregateNormal
:
3985 ia64_st8_inc_imm_hint (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_in0
+ ainfo
->reg
+ i
, sizeof (gpointer
), 0);
3987 case AggregateSingleHFA
:
3988 ia64_stfs_inc_imm_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
+ i
, 4, 0);
3990 case AggregateDoubleHFA
:
3991 ia64_stfd_inc_imm_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
+ i
, sizeof (gpointer
), 0);
3999 g_assert_not_reached ();
4003 if (inst
->opcode
== OP_REGVAR
) {
4004 /* Argument allocated to (non-volatile) register */
4005 switch (ainfo
->storage
) {
4007 if (inst
->dreg
!= cfg
->arch
.reg_in0
+ ainfo
->reg
)
4008 ia64_mov (code
, inst
->dreg
, cfg
->arch
.reg_in0
+ ainfo
->reg
);
4011 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16 + ainfo
->offset
, cfg
->frame_reg
);
4012 ia64_ld8 (code
, inst
->dreg
, GP_SCRATCH_REG
);
4020 if (method
->save_lmf
) {
4021 /* No LMF on IA64 */
4024 ia64_codegen_close (code
);
4026 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4027 code
.buf
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
.buf
, TRUE
);
4029 cfg
->code_len
= code
.buf
- cfg
->native_code
;
4031 g_assert (cfg
->code_len
< cfg
->code_size
);
4033 cfg
->arch
.prolog_end_offset
= cfg
->code_len
;
4039 mono_arch_emit_epilog (MonoCompile
*cfg
)
4041 MonoMethod
*method
= cfg
->method
;
4043 int max_epilog_size
= 16 * 4;
4044 Ia64CodegenState code
;
4049 if (mono_jit_trace_calls
!= NULL
)
4050 max_epilog_size
+= 1024;
4052 cfg
->arch
.epilog_begin_offset
= cfg
->code_len
;
4054 while (cfg
->code_len
+ max_epilog_size
> cfg
->code_size
) {
4055 cfg
->code_size
*= 2;
4056 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4057 mono_jit_stats
.code_reallocs
++;
4060 /* FIXME: Emit unwind info */
4062 buf
= cfg
->native_code
+ cfg
->code_len
;
4064 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4065 buf
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, buf
, TRUE
);
4067 ia64_codegen_init (code
, buf
);
4069 /* the code restoring the registers must be kept in sync with OP_JMP */
4072 if (method
->save_lmf
) {
4073 /* No LMF on IA64 */
4076 /* Load returned vtypes into registers if needed */
4077 cinfo
= get_call_info (cfg
, cfg
->mempool
, mono_method_signature (method
), FALSE
);
4078 ainfo
= &cinfo
->ret
;
4079 switch (ainfo
->storage
) {
4081 if (ainfo
->nslots
!= ainfo
->nregs
)
4084 g_assert (cfg
->ret
->opcode
== OP_REGOFFSET
);
4085 ia64_adds_imm (code
, GP_SCRATCH_REG
, cfg
->ret
->inst_offset
, cfg
->ret
->inst_basereg
);
4086 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
4087 switch (ainfo
->atype
) {
4088 case AggregateNormal
:
4089 ia64_ld8_inc_imm_hint (code
, ainfo
->reg
+ i
, GP_SCRATCH_REG
, sizeof (gpointer
), 0);
4091 case AggregateSingleHFA
:
4092 ia64_ldfs_inc_imm_hint (code
, ainfo
->reg
+ i
, GP_SCRATCH_REG
, 4, 0);
4094 case AggregateDoubleHFA
:
4095 ia64_ldfd_inc_imm_hint (code
, ainfo
->reg
+ i
, GP_SCRATCH_REG
, sizeof (gpointer
), 0);
4098 g_assert_not_reached ();
4106 ia64_begin_bundle (code
);
4108 code
.region_start
= cfg
->native_code
;
4110 /* Label the unwind state at the start of the exception throwing region */
4111 //ia64_unw_label_state (code, 1234);
4113 if (cfg
->arch
.stack_alloc_size
) {
4114 if (cfg
->arch
.omit_fp
) {
4115 if (ia64_is_imm14 (cfg
->arch
.stack_alloc_size
)) {
4116 ia64_unw_pop_frames (code
, 1);
4117 ia64_adds_imm (code
, IA64_SP
, (cfg
->arch
.stack_alloc_size
), IA64_SP
);
4119 ia64_movl (code
, GP_SCRATCH_REG
, cfg
->arch
.stack_alloc_size
);
4120 ia64_unw_pop_frames (code
, 1);
4121 ia64_add (code
, IA64_SP
, GP_SCRATCH_REG
, IA64_SP
);
4125 ia64_unw_pop_frames (code
, 1);
4126 ia64_mov (code
, IA64_SP
, cfg
->arch
.reg_saved_sp
);
4129 ia64_mov_to_ar_i (code
, IA64_PFS
, cfg
->arch
.reg_saved_ar_pfs
);
4130 ia64_mov_ret_to_br (code
, IA64_B0
, cfg
->arch
.reg_saved_b0
);
4131 ia64_br_ret_reg (code
, IA64_B0
);
4133 ia64_codegen_close (code
);
4135 cfg
->arch
.r_epilog
= mono_ia64_create_unwind_region (&code
);
4136 cfg
->arch
.r_pro
->next
= cfg
->arch
.r_epilog
;
4138 cfg
->code_len
= code
.buf
- cfg
->native_code
;
4140 g_assert (cfg
->code_len
< cfg
->code_size
);
4144 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4146 MonoJumpInfo
*patch_info
;
4148 Ia64CodegenState code
;
4149 gboolean empty
= TRUE
;
4150 //unw_dyn_region_info_t *r_exceptions;
4151 MonoClass
*exc_classes
[16];
4152 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
4153 guint32 code_size
= 0;
4155 /* Compute needed space */
4156 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4157 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
4159 if (patch_info
->type
== MONO_PATCH_INFO_R8
)
4160 code_size
+= 8 + 7; /* sizeof (double) + alignment */
4161 if (patch_info
->type
== MONO_PATCH_INFO_R4
)
4162 code_size
+= 4 + 7; /* sizeof (float) + alignment */
4168 while (cfg
->code_len
+ code_size
> (cfg
->code_size
- 16)) {
4169 cfg
->code_size
*= 2;
4170 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4171 mono_jit_stats
.code_reallocs
++;
4174 ia64_codegen_init (code
, cfg
->native_code
+ cfg
->code_len
);
4176 /* The unwind state here is the same as before the epilog */
4177 //ia64_unw_copy_state (code, 1234);
4179 /* add code to raise exceptions */
4180 /* FIXME: Optimize this */
4182 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4183 switch (patch_info
->type
) {
4184 case MONO_PATCH_INFO_EXC
: {
4185 MonoClass
*exc_class
;
4188 guint64 exc_token_index
;
4190 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4191 g_assert (exc_class
);
4192 exc_token_index
= mono_metadata_token_index (exc_class
->type_token
);
4193 throw_ip
= cfg
->native_code
+ patch_info
->ip
.i
;
4195 ia64_begin_bundle (code
);
4197 ia64_patch (cfg
->native_code
+ patch_info
->ip
.i
, code
.buf
);
4199 /* Find a throw sequence for the same exception class */
4200 for (i
= 0; i
< nthrows
; ++i
)
4201 if (exc_classes
[i
] == exc_class
)
4205 gint64 offset
= exc_throw_end
[i
] - 16 - throw_ip
;
4207 if (ia64_is_adds_imm (offset
))
4208 ia64_adds_imm (code
, cfg
->arch
.reg_out0
+ 1, offset
, IA64_R0
);
4210 ia64_movl (code
, cfg
->arch
.reg_out0
+ 1, offset
);
4212 buf
= code
.buf
+ code
.nins
;
4213 ia64_br_cond_pred (code
, 0, 0);
4214 ia64_begin_bundle (code
);
4215 ia64_patch (buf
, exc_throw_start
[i
]);
4217 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4222 ia64_movl (code
, cfg
->arch
.reg_out0
+ 1, 0);
4224 ia64_begin_bundle (code
);
4227 exc_classes
[nthrows
] = exc_class
;
4228 exc_throw_start
[nthrows
] = code
.buf
;
4232 if (ia64_is_adds_imm (exc_token_index
))
4233 ia64_adds_imm (code
, cfg
->arch
.reg_out0
+ 0, exc_token_index
, IA64_R0
);
4235 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, exc_token_index
);
4237 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
4238 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4239 patch_info
->ip
.i
= code
.buf
+ code
.nins
- cfg
->native_code
;
4242 ia64_movl (code
, GP_SCRATCH_REG
, 0);
4243 ia64_ld8_inc_imm (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, 8);
4244 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
4245 ia64_ld8 (code
, IA64_GP
, GP_SCRATCH_REG
);
4247 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
4249 /* Patch up the throw offset */
4250 ia64_begin_bundle (code
);
4252 ia64_patch (buf
, (gpointer
)(code
.buf
- 16 - throw_ip
));
4255 exc_throw_end
[nthrows
] = code
.buf
;
4269 /* The unwinder needs this to work */
4270 ia64_break_i (code
, 0);
4272 ia64_codegen_close (code
);
4275 //r_exceptions = mono_ia64_create_unwind_region (&code);
4276 //cfg->arch.r_epilog = r_exceptions;
4278 cfg
->code_len
= code
.buf
- cfg
->native_code
;
4280 g_assert (cfg
->code_len
< cfg
->code_size
);
4284 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
4286 Ia64CodegenState code
;
4287 CallInfo
*cinfo
= NULL
;
4288 MonoMethodSignature
*sig
;
4290 int i
, n
, stack_area
= 0;
4292 ia64_codegen_init (code
, p
);
4294 /* Keep this in sync with mono_arch_get_argument_info */
4296 if (enable_arguments
) {
4297 /* Allocate a new area on the stack and save arguments there */
4298 sig
= mono_method_signature (cfg
->method
);
4300 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
4302 n
= sig
->param_count
+ sig
->hasthis
;
4304 stack_area
= ALIGN_TO (n
* 8, 16);
4307 ia64_movl (code
, GP_SCRATCH_REG
, stack_area
);
4309 ia64_sub (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
4311 /* FIXME: Allocate out registers */
4313 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, IA64_SP
);
4315 /* Required by the ABI */
4316 ia64_adds_imm (code
, IA64_SP
, -16, IA64_SP
);
4318 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHODCONST
, cfg
->method
);
4319 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, 0);
4321 /* Save arguments to the stack */
4322 for (i
= 0; i
< n
; ++i
) {
4323 ins
= cfg
->args
[i
];
4325 if (ins
->opcode
== OP_REGVAR
) {
4326 ia64_movl (code
, GP_SCRATCH_REG
, (i
* 8));
4327 ia64_add (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_out0
+ 1, GP_SCRATCH_REG
);
4328 ia64_st8 (code
, GP_SCRATCH_REG
, ins
->dreg
);
4331 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
4332 ia64_add (code
, GP_SCRATCH_REG
, ins
->inst_basereg
, GP_SCRATCH_REG
);
4333 ia64_ld8 (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
4334 ia64_movl (code
, GP_SCRATCH_REG
, (i
* 8));
4335 ia64_add (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_out0
+ 1, GP_SCRATCH_REG
);
4336 ia64_st8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
4341 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, IA64_R0
);
4344 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, IA64_R0
);
4346 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHODCONST
, cfg
->method
);
4347 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, 0);
4349 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, (gpointer
)func
);
4351 if (enable_arguments
&& stack_area
) {
4352 ia64_movl (code
, GP_SCRATCH_REG
, stack_area
);
4354 ia64_add (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
4356 ia64_adds_imm (code
, IA64_SP
, 16, IA64_SP
);
4359 ia64_codegen_close (code
);
4365 mono_arch_instrument_epilog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
4367 Ia64CodegenState code
;
4368 CallInfo
*cinfo
= NULL
;
4369 MonoMethod
*method
= cfg
->method
;
4370 MonoMethodSignature
*sig
= mono_method_signature (cfg
->method
);
4372 ia64_codegen_init (code
, p
);
4374 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
4376 /* Save return value + pass it to func */
4377 switch (cinfo
->ret
.storage
) {
4381 ia64_mov (code
, cfg
->arch
.reg_saved_return_val
, cinfo
->ret
.reg
);
4382 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, cinfo
->ret
.reg
);
4385 ia64_adds_imm (code
, IA64_SP
, -16, IA64_SP
);
4386 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16, IA64_SP
);
4387 ia64_stfd_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
, 0);
4388 ia64_fmov (code
, 8 + 1, cinfo
->ret
.reg
);
4390 case ArgValuetypeAddrInIReg
:
4391 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, cfg
->arch
.reg_in0
+ cinfo
->ret
.reg
);
4400 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHODCONST
, method
);
4401 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, 0);
4402 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, (gpointer
)func
);
4404 /* Restore return value */
4405 switch (cinfo
->ret
.storage
) {
4409 ia64_mov (code
, cinfo
->ret
.reg
, cfg
->arch
.reg_saved_return_val
);
4412 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16, IA64_SP
);
4413 ia64_ldfd (code
, cinfo
->ret
.reg
, GP_SCRATCH_REG
);
4415 case ArgValuetypeAddrInIReg
:
4423 ia64_codegen_close (code
);
4429 mono_arch_save_unwind_info (MonoCompile
*cfg
)
4433 /* FIXME: Unregister this for dynamic methods */
4435 di
= g_malloc0 (sizeof (unw_dyn_info_t
));
4436 di
->start_ip
= (unw_word_t
) cfg
->native_code
;
4437 di
->end_ip
= (unw_word_t
) cfg
->native_code
+ cfg
->code_len
;
4439 di
->format
= UNW_INFO_FORMAT_DYNAMIC
;
4440 di
->u
.pi
.name_ptr
= (unw_word_t
)mono_method_full_name (cfg
->method
, TRUE
);
4441 di
->u
.pi
.regions
= cfg
->arch
.r_pro
;
4443 _U_dyn_register (di
);
4447 unw_dyn_region_info_t *region = di->u.pi.regions;
4449 printf ("Unwind info for method %s:\n", mono_method_full_name (cfg->method, TRUE));
4451 printf (" [Region: %d]\n", region->insn_count);
4452 region = region->next;
4459 mono_arch_flush_icache (guint8
*code
, gint size
)
4461 guint8
* p
= (guint8
*)((guint64
)code
& ~(0x3f));
4462 guint8
* end
= (guint8
*)((guint64
)code
+ size
);
4464 #ifdef __INTEL_COMPILER
4465 /* icc doesn't define an fc.i instrinsic, but fc==fc.i on itanium 2 */
4472 __asm__
__volatile__ ("fc.i %0"::"r"(p
));
4473 /* FIXME: This could be increased to 128 on some cpus */
4480 mono_arch_flush_register_windows (void)
4482 /* Not needed because of libunwind */
4486 mono_arch_is_inst_imm (gint64 imm
)
4488 /* The lowering pass will take care of it */
4494 * Determine whenever the trap whose info is in SIGINFO is caused by
4498 mono_arch_is_int_overflow (void *sigctx
, void *info
)
4500 /* Division is emulated with explicit overflow checks */
4505 mono_arch_get_patch_offset (guint8
*code
)
4513 mono_arch_get_vcall_slot (guint8
* code
, gpointer
*regs
, int *displacement
)
4515 guint8
*bundle2
= code
- 48;
4516 guint8
*bundle3
= code
- 32;
4517 guint8
*bundle4
= code
- 16;
4518 guint64 ins21
= ia64_bundle_ins1 (bundle2
);
4519 guint64 ins22
= ia64_bundle_ins2 (bundle2
);
4520 guint64 ins23
= ia64_bundle_ins3 (bundle2
);
4521 guint64 ins31
= ia64_bundle_ins1 (bundle3
);
4522 guint64 ins32
= ia64_bundle_ins2 (bundle3
);
4523 guint64 ins33
= ia64_bundle_ins3 (bundle3
);
4524 guint64 ins41
= ia64_bundle_ins1 (bundle4
);
4525 guint64 ins42
= ia64_bundle_ins2 (bundle4
);
4526 guint64 ins43
= ia64_bundle_ins3 (bundle4
);
4529 * Virtual calls are made with:
4531 * [MII] ld8 r31=[r8]
4535 * mov.sptk b6=r31,0x2000000000f32a80
4542 * br.call.sptk.few b0=b6;;
4545 if (((ia64_bundle_template (bundle3
) == IA64_TEMPLATE_MII
) ||
4546 (ia64_bundle_template (bundle3
) == IA64_TEMPLATE_MIIS
)) &&
4547 (ia64_bundle_template (bundle4
) == IA64_TEMPLATE_MIBS
) &&
4548 (ins31
== IA64_NOP_M
) &&
4549 (ia64_ins_opcode (ins32
) == 0) && (ia64_ins_x3 (ins32
) == 0) && (ia64_ins_x6 (ins32
) == 0x1) && (ia64_ins_y (ins32
) == 0) &&
4550 (ins33
== IA64_NOP_I
) &&
4551 (ins41
== IA64_NOP_M
) &&
4552 (ins42
== IA64_NOP_I
) &&
4553 (ia64_ins_opcode (ins43
) == 1) && (ia64_ins_b1 (ins43
) == 0) && (ia64_ins_b2 (ins43
) == 6) &&
4554 ((ins32
>> 6) & 0xfffff) == 0x12345) {
4555 g_assert (ins21
== IA64_NOP_M
);
4556 g_assert (ins23
== IA64_NOP_I
);
4557 g_assert (ia64_ins_opcode (ins22
) == 0);
4558 g_assert (ia64_ins_x3 (ins22
) == 7);
4559 g_assert (ia64_ins_x (ins22
) == 0);
4560 g_assert (ia64_ins_b1 (ins22
) == IA64_B6
);
4562 *displacement
= (gssize
)regs
[IA64_R8
] - (gssize
)regs
[IA64_R11
];
4564 return regs
[IA64_R11
];
4571 mono_arch_get_vcall_slot_addr (guint8
* code
, gpointer
*regs
)
4575 vt
= mono_arch_get_vcall_slot (code
, regs
, &displacement
);
4578 return (gpointer
*)(gpointer
)((char*)vt
+ displacement
);
4582 mono_arch_get_delegate_method_ptr_addr (guint8
* code
, gpointer
*regs
)
4590 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
4595 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
4599 #ifdef MONO_ARCH_HAVE_IMT
4602 * LOCKING: called with the domain lock held
4605 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
4606 gpointer fail_tramp
)
4610 guint8
*start
, *buf
;
4611 Ia64CodegenState code
;
4613 g_assert (!fail_tramp
);
4616 buf
= g_malloc0 (size
);
4617 ia64_codegen_init (code
, buf
);
4619 /* IA64_R9 contains the IMT method */
4621 for (i
= 0; i
< count
; ++i
) {
4622 MonoIMTCheckItem
*item
= imt_entries
[i
];
4623 ia64_begin_bundle (code
);
4624 item
->code_target
= (guint8
*)code
.buf
+ code
.nins
;
4625 if (item
->is_equals
) {
4626 if (item
->check_target_idx
) {
4627 if (!item
->compare_done
) {
4628 ia64_movl (code
, GP_SCRATCH_REG
, item
->key
);
4629 ia64_cmp_eq (code
, 6, 7, IA64_R9
, GP_SCRATCH_REG
);
4631 item
->jmp_code
= (guint8
*)code
.buf
+ code
.nins
;
4632 ia64_br_cond_pred (code
, 7, 0);
4634 ia64_movl (code
, GP_SCRATCH_REG
, &(vtable
->vtable
[item
->value
.vtable_slot
]));
4635 ia64_ld8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
);
4636 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
4637 ia64_br_cond_reg (code
, IA64_B6
);
4639 /* enable the commented code to assert on wrong method */
4640 #if ENABLE_WRONG_METHOD_CHECK
4641 g_assert_not_reached ();
4643 ia64_movl (code
, GP_SCRATCH_REG
, &(vtable
->vtable
[item
->value
.vtable_slot
]));
4644 ia64_ld8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
);
4645 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
4646 ia64_br_cond_reg (code
, IA64_B6
);
4647 #if ENABLE_WRONG_METHOD_CHECK
4648 g_assert_not_reached ();
4652 ia64_movl (code
, GP_SCRATCH_REG
, item
->key
);
4653 ia64_cmp_geu (code
, 6, 7, IA64_R9
, GP_SCRATCH_REG
);
4654 item
->jmp_code
= (guint8
*)code
.buf
+ code
.nins
;
4655 ia64_br_cond_pred (code
, 6, 0);
4658 /* patch the branches to get to the target items */
4659 for (i
= 0; i
< count
; ++i
) {
4660 MonoIMTCheckItem
*item
= imt_entries
[i
];
4661 if (item
->jmp_code
) {
4662 if (item
->check_target_idx
) {
4663 ia64_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
4668 ia64_codegen_close (code
);
4669 g_assert (code
.buf
- buf
<= size
);
4671 size
= code
.buf
- buf
;
4672 start
= mono_domain_code_reserve (domain
, size
);
4673 memcpy (start
, buf
, size
);
4675 mono_arch_flush_icache (start
, size
);
4677 mono_stats
.imt_thunks_size
+= size
;
4683 mono_arch_find_imt_method (gpointer
*regs
, guint8
*code
)
4685 return regs
[IA64_R9
];
4689 mono_arch_emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
4691 /* Done by the implementation of the CALL_MEMBASE opcodes */
4696 mono_arch_get_this_arg_from_call (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, gssize
*regs
, guint8
*code
)
4698 return (gpointer
)regs
[IA64_R10
];
4702 mono_arch_find_this_argument (gpointer
*regs
, MonoMethod
*method
, MonoGenericSharingContext
*gsctx
)
4704 return mono_arch_get_this_arg_from_call (gsctx
, mono_method_signature (method
), (gssize
*)regs
, NULL
);
4708 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
4714 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4716 MonoInst
*ins
= NULL
;
4718 if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
4719 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
4720 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
4723 * We don't use the generic version in mini_emit_inst_for_method () since we
4724 * ia64 has atomic_add_imm opcodes.
4726 if (strcmp (cmethod
->name
, "Increment") == 0) {
4729 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4730 opcode
= OP_ATOMIC_ADD_IMM_NEW_I4
;
4731 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4732 opcode
= OP_ATOMIC_ADD_IMM_NEW_I8
;
4734 g_assert_not_reached ();
4735 MONO_INST_NEW (cfg
, ins
, opcode
);
4736 ins
->dreg
= mono_alloc_preg (cfg
);
4738 ins
->inst_basereg
= args
[0]->dreg
;
4739 ins
->inst_offset
= 0;
4740 MONO_ADD_INS (cfg
->cbb
, ins
);
4741 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
4744 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4745 opcode
= OP_ATOMIC_ADD_IMM_NEW_I4
;
4746 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4747 opcode
= OP_ATOMIC_ADD_IMM_NEW_I8
;
4749 g_assert_not_reached ();
4750 MONO_INST_NEW (cfg
, ins
, opcode
);
4751 ins
->dreg
= mono_alloc_preg (cfg
);
4753 ins
->inst_basereg
= args
[0]->dreg
;
4754 ins
->inst_offset
= 0;
4755 MONO_ADD_INS (cfg
->cbb
, ins
);
4756 } else if (strcmp (cmethod
->name
, "Add") == 0) {
4758 gboolean is_imm
= FALSE
;
4761 if ((args
[1]->opcode
== OP_ICONST
) || (args
[1]->opcode
== OP_I8CONST
)) {
4762 imm
= (args
[1]->opcode
== OP_ICONST
) ? args
[1]->inst_c0
: args
[1]->inst_l
;
4764 is_imm
= (imm
== 1 || imm
== 4 || imm
== 8 || imm
== 16 || imm
== -1 || imm
== -4 || imm
== -8 || imm
== -16);
4768 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4769 opcode
= OP_ATOMIC_ADD_IMM_NEW_I4
;
4770 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4771 opcode
= OP_ATOMIC_ADD_IMM_NEW_I8
;
4773 g_assert_not_reached ();
4775 MONO_INST_NEW (cfg
, ins
, opcode
);
4776 ins
->dreg
= mono_alloc_ireg (cfg
);
4777 ins
->inst_basereg
= args
[0]->dreg
;
4778 ins
->inst_offset
= 0;
4779 ins
->inst_imm
= imm
;
4780 ins
->type
= (opcode
== OP_ATOMIC_ADD_IMM_NEW_I4
) ? STACK_I4
: STACK_I8
;
4782 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4783 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4784 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4785 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4787 g_assert_not_reached ();
4789 MONO_INST_NEW (cfg
, ins
, opcode
);
4790 ins
->dreg
= mono_alloc_ireg (cfg
);
4791 ins
->inst_basereg
= args
[0]->dreg
;
4792 ins
->inst_offset
= 0;
4793 ins
->sreg2
= args
[1]->dreg
;
4794 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4796 MONO_ADD_INS (cfg
->cbb
, ins
);
4804 mono_arch_print_tree (MonoInst
*tree
, int arity
)
4810 mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
4812 return mono_get_domain_intrinsic (cfg
);
4816 mono_arch_get_thread_intrinsic (MonoCompile
* cfg
)
4818 return mono_get_thread_intrinsic (cfg
);
4822 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
4824 /* FIXME: implement */
4825 g_assert_not_reached ();