2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
27 #define HAVE_AEABI_READ_TP 1
30 static gint lmf_tls_offset
= -1;
31 static gint lmf_addr_tls_offset
= -1;
33 /* This mutex protects architecture specific caches */
34 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
35 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
36 static CRITICAL_SECTION mini_arch_mutex
;
38 static int v5_supported
= 0;
39 static int thumb_supported
= 0;
43 * floating point support: on ARM it is a mess, there are at least 3
44 * different setups, each of which binary incompat with the other.
45 * 1) FPA: old and ugly, but unfortunately what current distros use
46 * the double binary format has the two words swapped. 8 double registers.
47 * Implemented usually by kernel emulation.
48 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
49 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
50 * 3) VFP: the new and actually sensible and useful FP support. Implemented
51 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
53 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
55 int mono_exc_esp_offset
= 0;
57 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
58 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
59 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
61 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
62 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
63 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
65 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
66 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
70 mono_arch_regname (int reg
)
72 static const char * rnames
[] = {
73 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
74 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
75 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
78 if (reg
>= 0 && reg
< 16)
84 mono_arch_fregname (int reg
)
86 static const char * rnames
[] = {
87 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
88 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
89 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
90 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
91 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
92 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
95 if (reg
>= 0 && reg
< 32)
101 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
103 int imm8
, rot_amount
;
104 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
105 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
108 g_assert (dreg
!= sreg
);
109 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
110 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
115 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
117 /* we can use r0-r3, since this is called only for incoming args on the stack */
118 if (size
> sizeof (gpointer
) * 4) {
120 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
121 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
122 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
123 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
124 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
125 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
126 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
127 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
128 ARM_B_COND (code
, ARMCOND_NE
, 0);
129 arm_patch (code
- 4, start_loop
);
132 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
133 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
135 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
136 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
142 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
143 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
144 doffset
= soffset
= 0;
146 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
147 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
153 g_assert (size
== 0);
158 emit_call_reg (guint8
*code
, int reg
)
161 ARM_BLX_REG (code
, reg
);
163 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
167 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
173 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
175 if (cfg
->method
->dynamic
) {
176 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
178 *(gpointer
*)code
= NULL
;
180 code
= emit_call_reg (code
, ARMREG_IP
);
188 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
190 switch (ins
->opcode
) {
193 case OP_FCALL_MEMBASE
:
195 if (ins
->dreg
!= ARM_FPA_F0
)
196 ARM_MVFD (code
, ins
->dreg
, ARM_FPA_F0
);
205 * mono_arch_get_argument_info:
206 * @csig: a method signature
207 * @param_count: the number of parameters to consider
208 * @arg_info: an array to store the result infos
210 * Gathers information on parameters such as size, alignment and
211 * padding. arg_info should be large enought to hold param_count + 1 entries.
213 * Returns the size of the activation frame.
216 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
218 int k
, frame_size
= 0;
219 guint32 size
, align
, pad
;
222 if (MONO_TYPE_ISSTRUCT (csig
->ret
)) {
223 frame_size
+= sizeof (gpointer
);
227 arg_info
[0].offset
= offset
;
230 frame_size
+= sizeof (gpointer
);
234 arg_info
[0].size
= frame_size
;
236 for (k
= 0; k
< param_count
; k
++) {
237 size
= mini_type_stack_size_full (NULL
, csig
->params
[k
], &align
, csig
->pinvoke
);
239 /* ignore alignment for now */
242 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
243 arg_info
[k
].pad
= pad
;
245 arg_info
[k
+ 1].pad
= 0;
246 arg_info
[k
+ 1].size
= size
;
248 arg_info
[k
+ 1].offset
= offset
;
252 align
= MONO_ARCH_FRAME_ALIGNMENT
;
253 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
254 arg_info
[k
].pad
= pad
;
260 decode_vcall_slot_from_ldr (guint32 ldr
, gpointer
*regs
, int *displacement
)
264 reg
= (ldr
>> 16 ) & 0xf;
265 offset
= ldr
& 0xfff;
266 if (((ldr
>> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
268 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
271 *displacement
= offset
;
276 mono_arch_get_vcall_slot (guint8
*code_ptr
, gpointer
*regs
, int *displacement
)
278 guint32
* code
= (guint32
*)code_ptr
;
280 /* Locate the address of the method-specific trampoline. The call using
281 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
282 looks something like this:
291 The call sequence could be also:
294 function pointer literal
298 Note that on ARM5+ we can use one instruction instead of the last two.
299 Therefore, we need to locate the 'ldr rA' instruction to know which
300 register was used to hold the method addrs.
303 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
306 /* Three possible code sequences can happen here:
310 * ldr pc, [rX - #offset]
316 * ldr pc, [rX - #offset]
318 * direct branch with bl:
322 * direct branch with mov:
326 * We only need to identify interface and virtual calls, the others can be ignored.
329 if (IS_LDR_PC (code
[-1]) && code
[-2] == ADD_LR_PC_4
)
330 return decode_vcall_slot_from_ldr (code
[-1], regs
, displacement
);
332 if (IS_LDR_PC (code
[0]) && code
[-1] == MOV_LR_PC
)
333 return decode_vcall_slot_from_ldr (code
[0], regs
, displacement
);
339 mono_arch_get_vcall_slot_addr (guint8
* code
, gpointer
*regs
)
343 vt
= mono_arch_get_vcall_slot (code
, regs
, &displacement
);
346 return (gpointer
*)((char*)vt
+ displacement
);
349 #define MAX_ARCH_DELEGATE_PARAMS 3
352 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
354 guint8
*code
, *start
;
356 /* FIXME: Support more cases */
357 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
361 static guint8
* cached
= NULL
;
362 mono_mini_arch_lock ();
364 mono_mini_arch_unlock ();
368 start
= code
= mono_global_codeman_reserve (12);
370 /* Replace the this argument with the target */
371 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
372 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, target
));
373 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
375 g_assert ((code
- start
) <= 12);
377 mono_arch_flush_icache (start
, 12);
379 mono_mini_arch_unlock ();
382 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
385 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
387 for (i
= 0; i
< sig
->param_count
; ++i
)
388 if (!mono_is_regsize_var (sig
->params
[i
]))
391 mono_mini_arch_lock ();
392 code
= cache
[sig
->param_count
];
394 mono_mini_arch_unlock ();
398 size
= 8 + sig
->param_count
* 4;
399 start
= code
= mono_global_codeman_reserve (size
);
401 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
402 /* slide down the arguments */
403 for (i
= 0; i
< sig
->param_count
; ++i
) {
404 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
406 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
408 g_assert ((code
- start
) <= size
);
410 mono_arch_flush_icache (start
, size
);
411 cache
[sig
->param_count
] = start
;
412 mono_mini_arch_unlock ();
420 mono_arch_get_this_arg_from_call (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, gssize
*regs
, guint8
*code
)
422 /* FIXME: handle returning a struct */
423 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
424 return (gpointer
)regs
[ARMREG_R1
];
425 return (gpointer
)regs
[ARMREG_R0
];
429 * Initialize the cpu to execute managed code.
432 mono_arch_cpu_init (void)
437 * Initialize architecture specific code.
440 mono_arch_init (void)
442 InitializeCriticalSection (&mini_arch_mutex
);
446 * Cleanup architecture specific code.
449 mono_arch_cleanup (void)
454 * This function returns the optimizations supported on this cpu.
457 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
461 thumb_supported
= TRUE
;
466 FILE *file
= fopen ("/proc/cpuinfo", "r");
468 while ((line
= fgets (buf
, 512, file
))) {
469 if (strncmp (line
, "Processor", 9) == 0) {
470 char *ver
= strstr (line
, "(v");
471 if (ver
&& (ver
[2] == '5' || ver
[2] == '6' || ver
[2] == '7')) {
476 if (strncmp (line
, "Features", 8) == 0) {
477 char *th
= strstr (line
, "thumb");
479 thumb_supported
= TRUE
;
487 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
491 /* no arm-specific optimizations yet */
497 is_regsize_var (MonoType
*t
) {
500 t
= mini_type_get_underlying_type (NULL
, t
);
507 case MONO_TYPE_FNPTR
:
509 case MONO_TYPE_OBJECT
:
510 case MONO_TYPE_STRING
:
511 case MONO_TYPE_CLASS
:
512 case MONO_TYPE_SZARRAY
:
513 case MONO_TYPE_ARRAY
:
515 case MONO_TYPE_GENERICINST
:
516 if (!mono_type_generic_inst_is_valuetype (t
))
519 case MONO_TYPE_VALUETYPE
:
526 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
531 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
532 MonoInst
*ins
= cfg
->varinfo
[i
];
533 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
536 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
539 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
542 /* we can only allocate 32 bit values */
543 if (is_regsize_var (ins
->inst_vtype
)) {
544 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
545 g_assert (i
== vmv
->idx
);
546 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
553 #define USE_EXTRA_TEMPS 0
556 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
559 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
560 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
561 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
562 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
563 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
))
564 /* V5 is reserved for passing the vtable/rgctx/IMT method */
565 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
566 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
567 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
573 * mono_arch_regalloc_cost:
575 * Return the cost, in number of memory references, of the action of
576 * allocating the variable VMV into a register during global register
580 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
586 #ifndef __GNUC_PREREQ
587 #define __GNUC_PREREQ(maj, min) (0)
591 mono_arch_flush_icache (guint8
*code
, gint size
)
594 sys_icache_invalidate (code
, size
);
595 #elif __GNUC_PREREQ(4, 1)
596 __clear_cache (code
, code
+ size
);
597 #elif defined(PLATFORM_ANDROID)
598 const int syscall
= 0xf0002;
606 : "r" (code
), "r" (code
+ size
), "r" (syscall
)
610 __asm
__volatile ("mov r0, %0\n"
613 "swi 0x9f0002 @ sys_cacheflush"
615 : "r" (code
), "r" (code
+ size
), "r" (0)
616 : "r0", "r1", "r3" );
631 guint16 vtsize
; /* in param area */
633 guint8 regtype
: 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
634 guint8 size
: 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
649 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
652 if (*gr
> ARMREG_R3
) {
653 ainfo
->offset
= *stack_size
;
654 ainfo
->reg
= ARMREG_SP
; /* in the caller */
655 ainfo
->regtype
= RegTypeBase
;
666 /* first word in r3 and the second on the stack */
667 ainfo
->offset
= *stack_size
;
668 ainfo
->reg
= ARMREG_SP
; /* in the caller */
669 ainfo
->regtype
= RegTypeBaseGen
;
671 } else if (*gr
>= ARMREG_R3
) {
676 ainfo
->offset
= *stack_size
;
677 ainfo
->reg
= ARMREG_SP
; /* in the caller */
678 ainfo
->regtype
= RegTypeBase
;
693 calculate_sizes (MonoMethodSignature
*sig
, gboolean is_pinvoke
)
696 int n
= sig
->hasthis
+ sig
->param_count
;
697 MonoType
*simpletype
;
698 guint32 stack_size
= 0;
699 CallInfo
*cinfo
= g_malloc0 (sizeof (CallInfo
) + sizeof (ArgInfo
) * n
);
703 /* FIXME: handle returning a struct */
704 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
705 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
706 cinfo
->struct_ret
= ARMREG_R0
;
711 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
714 DEBUG(printf("params: %d\n", sig
->param_count
));
715 for (i
= 0; i
< sig
->param_count
; ++i
) {
716 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
717 /* Prevent implicit arguments and sig_cookie from
718 being passed in registers */
720 /* Emit the signature cookie just before the implicit arguments */
721 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
723 DEBUG(printf("param %d: ", i
));
724 if (sig
->params
[i
]->byref
) {
725 DEBUG(printf("byref\n"));
726 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
730 simpletype
= mini_type_get_underlying_type (NULL
, sig
->params
[i
]);
731 switch (simpletype
->type
) {
732 case MONO_TYPE_BOOLEAN
:
735 cinfo
->args
[n
].size
= 1;
736 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
742 cinfo
->args
[n
].size
= 2;
743 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
748 cinfo
->args
[n
].size
= 4;
749 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
755 case MONO_TYPE_FNPTR
:
756 case MONO_TYPE_CLASS
:
757 case MONO_TYPE_OBJECT
:
758 case MONO_TYPE_STRING
:
759 case MONO_TYPE_SZARRAY
:
760 case MONO_TYPE_ARRAY
:
762 cinfo
->args
[n
].size
= sizeof (gpointer
);
763 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
766 case MONO_TYPE_GENERICINST
:
767 if (!mono_type_generic_inst_is_valuetype (sig
->params
[i
])) {
768 cinfo
->args
[n
].size
= sizeof (gpointer
);
769 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
774 case MONO_TYPE_TYPEDBYREF
:
775 case MONO_TYPE_VALUETYPE
: {
780 if (simpletype
->type
== MONO_TYPE_TYPEDBYREF
) {
781 size
= sizeof (MonoTypedRef
);
783 MonoClass
*klass
= mono_class_from_mono_type (sig
->params
[i
]);
785 size
= mono_class_native_size (klass
, NULL
);
787 size
= mono_class_value_size (klass
, NULL
);
789 DEBUG(printf ("load %d bytes struct\n",
790 mono_class_native_size (sig
->params
[i
]->data
.klass
, NULL
)));
793 align_size
+= (sizeof (gpointer
) - 1);
794 align_size
&= ~(sizeof (gpointer
) - 1);
795 nwords
= (align_size
+ sizeof (gpointer
) -1 ) / sizeof (gpointer
);
796 cinfo
->args
[n
].regtype
= RegTypeStructByVal
;
797 /* FIXME: align gr and stack_size if needed */
798 if (gr
> ARMREG_R3
) {
799 cinfo
->args
[n
].size
= 0;
800 cinfo
->args
[n
].vtsize
= nwords
;
802 int rest
= ARMREG_R3
- gr
+ 1;
803 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
804 cinfo
->args
[n
].size
= n_in_regs
;
805 cinfo
->args
[n
].vtsize
= nwords
- n_in_regs
;
806 cinfo
->args
[n
].reg
= gr
;
809 cinfo
->args
[n
].offset
= stack_size
;
810 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
811 stack_size
+= nwords
* sizeof (gpointer
);
818 cinfo
->args
[n
].size
= 8;
819 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, FALSE
);
823 g_error ("Can't trampoline 0x%x", sig
->params
[i
]->type
);
828 simpletype
= mini_type_get_underlying_type (NULL
, sig
->ret
);
829 switch (simpletype
->type
) {
830 case MONO_TYPE_BOOLEAN
:
841 case MONO_TYPE_FNPTR
:
842 case MONO_TYPE_CLASS
:
843 case MONO_TYPE_OBJECT
:
844 case MONO_TYPE_SZARRAY
:
845 case MONO_TYPE_ARRAY
:
846 case MONO_TYPE_STRING
:
847 cinfo
->ret
.reg
= ARMREG_R0
;
851 cinfo
->ret
.reg
= ARMREG_R0
;
855 cinfo
->ret
.reg
= ARMREG_R0
;
856 /* FIXME: cinfo->ret.reg = ???;
857 cinfo->ret.regtype = RegTypeFP;*/
859 case MONO_TYPE_GENERICINST
:
860 if (!mono_type_generic_inst_is_valuetype (sig
->ret
)) {
861 cinfo
->ret
.reg
= ARMREG_R0
;
865 case MONO_TYPE_VALUETYPE
:
867 case MONO_TYPE_TYPEDBYREF
:
871 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
875 /* align stack size to 8 */
876 DEBUG (printf (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
877 stack_size
= (stack_size
+ 7) & ~7;
879 cinfo
->stack_usage
= stack_size
;
885 * Set var information according to the calling convention. arm version.
886 * The locals var stuff should most likely be split in another method.
889 mono_arch_allocate_vars (MonoCompile
*cfg
)
891 MonoMethodSignature
*sig
;
892 MonoMethodHeader
*header
;
894 int i
, offset
, size
, align
, curinst
;
895 int frame_reg
= ARMREG_FP
;
897 /* FIXME: this will change when we use FP as gcc does */
898 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
900 /* allow room for the vararg method args: void* and long/double */
901 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
902 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (gpointer
)*8);
904 header
= mono_method_get_header (cfg
->method
);
907 * We use the frame register also for any method that has
908 * exception clauses. This way, when the handlers are called,
909 * the code will reference local variables using the frame reg instead of
910 * the stack pointer: if we had to restore the stack pointer, we'd
911 * corrupt the method frames that are already on the stack (since
912 * filters get called before stack unwinding happens) when the filter
913 * code would call any method (this also applies to finally etc.).
915 if ((cfg
->flags
& MONO_CFG_HAS_ALLOCA
) || header
->num_clauses
)
916 frame_reg
= ARMREG_FP
;
917 cfg
->frame_reg
= frame_reg
;
918 if (frame_reg
!= ARMREG_SP
) {
919 cfg
->used_int_regs
|= 1 << frame_reg
;
922 if (!cfg
->compile_aot
|| cfg
->uses_rgctx_reg
)
923 /* V5 is reserved for passing the vtable/rgctx/IMT method */
924 cfg
->used_int_regs
|= (1 << ARMREG_V5
);
926 sig
= mono_method_signature (cfg
->method
);
930 if (!MONO_TYPE_ISSTRUCT (sig
->ret
)) {
931 /* FIXME: handle long and FP values */
932 switch (mini_type_get_underlying_type (NULL
, sig
->ret
)->type
) {
936 cfg
->ret
->opcode
= OP_REGVAR
;
937 cfg
->ret
->inst_c0
= ARMREG_R0
;
941 /* local vars are at a positive offset from the stack pointer */
943 * also note that if the function uses alloca, we use FP
944 * to point at the local variables.
946 offset
= 0; /* linkage area */
947 /* align the offset to 16 bytes: not sure this is needed here */
949 //offset &= ~(8 - 1);
951 /* add parameter area size for called functions */
952 offset
+= cfg
->param_area
;
955 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
958 /* allow room to save the return value */
959 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
962 /* the MonoLMF structure is stored just below the stack pointer */
964 if (sig
->call_convention
== MONO_CALL_VARARG
) {
968 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
969 inst
= cfg
->vret_addr
;
970 offset
+= sizeof(gpointer
) - 1;
971 offset
&= ~(sizeof(gpointer
) - 1);
972 inst
->inst_offset
= offset
;
973 inst
->opcode
= OP_REGOFFSET
;
974 inst
->inst_basereg
= frame_reg
;
975 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
976 printf ("vret_addr =");
977 mono_print_ins (cfg
->vret_addr
);
979 offset
+= sizeof(gpointer
);
980 if (sig
->call_convention
== MONO_CALL_VARARG
)
981 cfg
->sig_cookie
+= sizeof (gpointer
);
984 curinst
= cfg
->locals_start
;
985 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
986 inst
= cfg
->varinfo
[i
];
987 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
)
990 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
991 * pinvoke wrappers when they call functions returning structure */
992 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
) {
994 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), &ualign
);
998 size
= mono_type_size (inst
->inst_vtype
, &align
);
1000 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1001 * since it loads/stores misaligned words, which don't do the right thing.
1003 if (align
< 4 && size
>= 4)
1005 offset
+= align
- 1;
1006 offset
&= ~(align
- 1);
1007 inst
->inst_offset
= offset
;
1008 inst
->opcode
= OP_REGOFFSET
;
1009 inst
->inst_basereg
= frame_reg
;
1011 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1016 inst
= cfg
->args
[curinst
];
1017 if (inst
->opcode
!= OP_REGVAR
) {
1018 inst
->opcode
= OP_REGOFFSET
;
1019 inst
->inst_basereg
= frame_reg
;
1020 offset
+= sizeof (gpointer
) - 1;
1021 offset
&= ~(sizeof (gpointer
) - 1);
1022 inst
->inst_offset
= offset
;
1023 offset
+= sizeof (gpointer
);
1024 if (sig
->call_convention
== MONO_CALL_VARARG
)
1025 cfg
->sig_cookie
+= sizeof (gpointer
);
1030 for (i
= 0; i
< sig
->param_count
; ++i
) {
1031 inst
= cfg
->args
[curinst
];
1032 if (inst
->opcode
!= OP_REGVAR
) {
1033 inst
->opcode
= OP_REGOFFSET
;
1034 inst
->inst_basereg
= frame_reg
;
1035 size
= mono_type_size (sig
->params
[i
], &align
);
1036 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1037 * since it loads/stores misaligned words, which don't do the right thing.
1039 if (align
< 4 && size
>= 4)
1041 offset
+= align
- 1;
1042 offset
&= ~(align
- 1);
1043 inst
->inst_offset
= offset
;
1045 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
< sig
->sentinelpos
))
1046 cfg
->sig_cookie
+= size
;
1051 /* align the offset to 8 bytes */
1056 cfg
->stack_offset
= offset
;
1060 mono_arch_create_vars (MonoCompile
*cfg
)
1062 MonoMethodSignature
*sig
;
1064 sig
= mono_method_signature (cfg
->method
);
1066 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1067 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1068 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1069 printf ("vret_addr = ");
1070 mono_print_ins (cfg
->vret_addr
);
1076 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1079 MonoMethodSignature
*sig
;
1083 sig
= call
->signature
;
1084 n
= sig
->param_count
+ sig
->hasthis
;
1086 cinfo
= calculate_sizes (sig
, sig
->pinvoke
);
1088 for (i
= 0; i
< n
; ++i
) {
1089 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1092 if (i
>= sig
->hasthis
)
1093 t
= sig
->params
[i
- sig
->hasthis
];
1095 t
= &mono_defaults
.int_class
->byval_arg
;
1096 t
= mini_type_get_underlying_type (NULL
, t
);
1098 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1103 in
= call
->args
[i
];
1105 switch (ainfo
->regtype
) {
1106 case RegTypeGeneral
:
1107 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1108 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1109 ins
->dreg
= mono_alloc_ireg (cfg
);
1110 ins
->sreg1
= in
->dreg
+ 1;
1111 MONO_ADD_INS (cfg
->cbb
, ins
);
1112 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1114 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1115 ins
->dreg
= mono_alloc_ireg (cfg
);
1116 ins
->sreg1
= in
->dreg
+ 2;
1117 MONO_ADD_INS (cfg
->cbb
, ins
);
1118 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1119 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
1120 #ifndef MONO_ARCH_SOFT_FLOAT
1124 if (ainfo
->size
== 4) {
1125 #ifdef MONO_ARCH_SOFT_FLOAT
1126 /* mono_emit_call_args () have already done the r8->r4 conversion */
1127 /* The converted value is in an int vreg */
1128 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1129 ins
->dreg
= mono_alloc_ireg (cfg
);
1130 ins
->sreg1
= in
->dreg
;
1131 MONO_ADD_INS (cfg
->cbb
, ins
);
1132 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1134 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1135 creg
= mono_alloc_ireg (cfg
);
1136 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1137 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1140 #ifdef MONO_ARCH_SOFT_FLOAT
1141 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
1142 ins
->dreg
= mono_alloc_ireg (cfg
);
1143 ins
->sreg1
= in
->dreg
;
1144 MONO_ADD_INS (cfg
->cbb
, ins
);
1145 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1147 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
1148 ins
->dreg
= mono_alloc_ireg (cfg
);
1149 ins
->sreg1
= in
->dreg
;
1150 MONO_ADD_INS (cfg
->cbb
, ins
);
1151 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1153 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1154 creg
= mono_alloc_ireg (cfg
);
1155 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1156 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1157 creg
= mono_alloc_ireg (cfg
);
1158 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
1159 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
1162 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1164 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1165 ins
->dreg
= mono_alloc_ireg (cfg
);
1166 ins
->sreg1
= in
->dreg
;
1167 MONO_ADD_INS (cfg
->cbb
, ins
);
1169 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1172 case RegTypeStructByAddr
:
1175 /* FIXME: where si the data allocated? */
1176 arg
->backend
.reg3
= ainfo
->reg
;
1177 call
->used_iregs
|= 1 << ainfo
->reg
;
1178 g_assert_not_reached ();
1181 case RegTypeStructByVal
:
1182 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1183 ins
->opcode
= OP_OUTARG_VT
;
1184 ins
->sreg1
= in
->dreg
;
1185 ins
->klass
= in
->klass
;
1186 ins
->inst_p0
= call
;
1187 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1188 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1189 MONO_ADD_INS (cfg
->cbb
, ins
);
1192 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1193 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1194 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
1195 if (t
->type
== MONO_TYPE_R8
) {
1196 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1198 #ifdef MONO_ARCH_SOFT_FLOAT
1199 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1201 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1205 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1208 case RegTypeBaseGen
:
1209 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1210 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? in
->dreg
+ 1 : in
->dreg
+ 2);
1211 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1212 ins
->dreg
= mono_alloc_ireg (cfg
);
1213 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? in
->dreg
+ 2 : in
->dreg
+ 1;
1214 MONO_ADD_INS (cfg
->cbb
, ins
);
1215 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
1216 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
1219 #ifdef MONO_ARCH_SOFT_FLOAT
1220 g_assert_not_reached ();
1223 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1224 creg
= mono_alloc_ireg (cfg
);
1225 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
1226 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1227 creg
= mono_alloc_ireg (cfg
);
1228 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
1229 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
1230 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1232 g_assert_not_reached ();
1239 arg
->backend
.reg3
= ainfo
->reg
;
1240 /* FP args are passed in int regs */
1241 call
->used_iregs
|= 1 << ainfo
->reg
;
1242 if (ainfo
->size
== 8) {
1243 arg
->opcode
= OP_OUTARG_R8
;
1244 call
->used_iregs
|= 1 << (ainfo
->reg
+ 1);
1246 arg
->opcode
= OP_OUTARG_R4
;
1249 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1253 g_assert_not_reached ();
1257 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1260 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1261 vtarg
->sreg1
= call
->vret_var
->dreg
;
1262 vtarg
->dreg
= mono_alloc_preg (cfg
);
1263 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1265 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
1268 call
->stack_usage
= cinfo
->stack_usage
;
1274 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1276 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1277 ArgInfo
*ainfo
= ins
->inst_p1
;
1278 int ovf_size
= ainfo
->vtsize
;
1279 int doffset
= ainfo
->offset
;
1280 int i
, soffset
, dreg
;
1283 for (i
= 0; i
< ainfo
->size
; ++i
) {
1284 dreg
= mono_alloc_ireg (cfg
);
1285 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1286 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
1287 soffset
+= sizeof (gpointer
);
1289 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1291 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, ovf_size
* sizeof (gpointer
), 0);
1295 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1297 MonoType
*ret
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
);
1300 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1303 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
1304 ins
->sreg1
= val
->dreg
+ 1;
1305 ins
->sreg2
= val
->dreg
+ 2;
1306 MONO_ADD_INS (cfg
->cbb
, ins
);
1309 #ifdef MONO_ARCH_SOFT_FLOAT
1310 if (ret
->type
== MONO_TYPE_R8
) {
1313 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
1314 ins
->dreg
= cfg
->ret
->dreg
;
1315 ins
->sreg1
= val
->dreg
;
1316 MONO_ADD_INS (cfg
->cbb
, ins
);
1319 if (ret
->type
== MONO_TYPE_R4
) {
1320 /* Already converted to an int in method_to_ir () */
1321 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1325 if (ret
->type
== MONO_TYPE_R4
|| ret
->type
== MONO_TYPE_R8
) {
1326 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1333 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1337 mono_arch_is_inst_imm (gint64 imm
)
1343 * Allow tracing to work with this interface (with an optional argument)
1347 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
1351 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
1352 ARM_MOV_REG_IMM8 (code
, ARMREG_R1
, 0); /* NULL ebp for now */
1353 code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, (guint32
)func
);
1354 code
= emit_call_reg (code
, ARMREG_R2
);
1367 mono_arch_instrument_epilog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
1370 int save_mode
= SAVE_NONE
;
1372 MonoMethod
*method
= cfg
->method
;
1373 int rtype
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
)->type
;
1374 int save_offset
= cfg
->param_area
;
1378 offset
= code
- cfg
->native_code
;
1379 /* we need about 16 instructions */
1380 if (offset
> (cfg
->code_size
- 16 * 4)) {
1381 cfg
->code_size
*= 2;
1382 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
1383 code
= cfg
->native_code
+ offset
;
1386 case MONO_TYPE_VOID
:
1387 /* special case string .ctor icall */
1388 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
1389 save_mode
= SAVE_ONE
;
1391 save_mode
= SAVE_NONE
;
1395 save_mode
= SAVE_TWO
;
1399 save_mode
= SAVE_FP
;
1401 case MONO_TYPE_VALUETYPE
:
1402 save_mode
= SAVE_STRUCT
;
1405 save_mode
= SAVE_ONE
;
1409 switch (save_mode
) {
1411 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1412 ARM_STR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
1413 if (enable_arguments
) {
1414 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_R1
);
1415 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
1419 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1420 if (enable_arguments
) {
1421 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
1425 /* FIXME: what reg? */
1426 if (enable_arguments
) {
1427 /* FIXME: what reg? */
1431 if (enable_arguments
) {
1432 /* FIXME: get the actual address */
1433 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
1441 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
1442 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, (guint32
)func
);
1443 code
= emit_call_reg (code
, ARMREG_IP
);
1445 switch (save_mode
) {
1447 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1448 ARM_LDR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
1451 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1465 * The immediate field for cond branches is big enough for all reasonable methods
1467 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1468 if (ins->flags & MONO_INST_BRLABEL) { \
1469 if (0 && ins->inst_i0->inst_c0) { \
1470 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1472 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1473 ARM_B_COND (code, (condcode), 0); \
1476 if (0 && ins->inst_true_bb->native_offset) { \
1477 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1479 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1480 ARM_B_COND (code, (condcode), 0); \
1484 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1486 /* emit an exception if condition is fail
1488 * We assign the extra code used to throw the implicit exceptions
1489 * to cfg->bb_exit as far as the big branch handling is concerned
1491 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1493 mono_add_patch_info (cfg, code - cfg->native_code, \
1494 MONO_PATCH_INFO_EXC, exc_name); \
1495 ARM_BL_COND (code, (condcode), 0); \
1498 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1501 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1506 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1508 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1510 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1511 switch (ins
->opcode
) {
1514 /* Already done by an arch-independent pass */
1516 case OP_LOAD_MEMBASE
:
1517 case OP_LOADI4_MEMBASE
:
1519 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1520 * OP_LOAD_MEMBASE offset(basereg), reg
1522 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
1523 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
1524 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1525 ins
->inst_offset
== last_ins
->inst_offset
) {
1526 if (ins
->dreg
== last_ins
->sreg1
) {
1527 MONO_DELETE_INS (bb
, ins
);
1530 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1531 ins
->opcode
= OP_MOVE
;
1532 ins
->sreg1
= last_ins
->sreg1
;
1536 * Note: reg1 must be different from the basereg in the second load
1537 * OP_LOAD_MEMBASE offset(basereg), reg1
1538 * OP_LOAD_MEMBASE offset(basereg), reg2
1540 * OP_LOAD_MEMBASE offset(basereg), reg1
1541 * OP_MOVE reg1, reg2
1543 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
1544 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
1545 ins
->inst_basereg
!= last_ins
->dreg
&&
1546 ins
->inst_basereg
== last_ins
->inst_basereg
&&
1547 ins
->inst_offset
== last_ins
->inst_offset
) {
1549 if (ins
->dreg
== last_ins
->dreg
) {
1550 MONO_DELETE_INS (bb
, ins
);
1553 ins
->opcode
= OP_MOVE
;
1554 ins
->sreg1
= last_ins
->dreg
;
1557 //g_assert_not_reached ();
1561 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1562 * OP_LOAD_MEMBASE offset(basereg), reg
1564 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1565 * OP_ICONST reg, imm
1567 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
1568 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
1569 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1570 ins
->inst_offset
== last_ins
->inst_offset
) {
1571 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1572 ins
->opcode
= OP_ICONST
;
1573 ins
->inst_c0
= last_ins
->inst_imm
;
1574 g_assert_not_reached (); // check this rule
1578 case OP_LOADU1_MEMBASE
:
1579 case OP_LOADI1_MEMBASE
:
1580 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
1581 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1582 ins
->inst_offset
== last_ins
->inst_offset
) {
1583 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
1584 ins
->sreg1
= last_ins
->sreg1
;
1587 case OP_LOADU2_MEMBASE
:
1588 case OP_LOADI2_MEMBASE
:
1589 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
1590 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1591 ins
->inst_offset
== last_ins
->inst_offset
) {
1592 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
1593 ins
->sreg1
= last_ins
->sreg1
;
1597 ins
->opcode
= OP_MOVE
;
1601 if (ins
->dreg
== ins
->sreg1
) {
1602 MONO_DELETE_INS (bb
, ins
);
1606 * OP_MOVE sreg, dreg
1607 * OP_MOVE dreg, sreg
1609 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
1610 ins
->sreg1
== last_ins
->dreg
&&
1611 ins
->dreg
== last_ins
->sreg1
) {
1612 MONO_DELETE_INS (bb
, ins
);
1620 bb
->last_ins
= last_ins
;
1624 * the branch_cc_table should maintain the order of these
1638 branch_cc_table
[] = {
1652 #define NEW_INS(cfg,dest,op) do { \
1653 MONO_INST_NEW ((cfg), (dest), (op)); \
1654 mono_bblock_insert_before_ins (bb, ins, (dest)); \
1658 map_to_reg_reg_op (int op
)
1667 case OP_COMPARE_IMM
:
1669 case OP_ICOMPARE_IMM
:
1683 case OP_LOAD_MEMBASE
:
1684 return OP_LOAD_MEMINDEX
;
1685 case OP_LOADI4_MEMBASE
:
1686 return OP_LOADI4_MEMINDEX
;
1687 case OP_LOADU4_MEMBASE
:
1688 return OP_LOADU4_MEMINDEX
;
1689 case OP_LOADU1_MEMBASE
:
1690 return OP_LOADU1_MEMINDEX
;
1691 case OP_LOADI2_MEMBASE
:
1692 return OP_LOADI2_MEMINDEX
;
1693 case OP_LOADU2_MEMBASE
:
1694 return OP_LOADU2_MEMINDEX
;
1695 case OP_LOADI1_MEMBASE
:
1696 return OP_LOADI1_MEMINDEX
;
1697 case OP_STOREI1_MEMBASE_REG
:
1698 return OP_STOREI1_MEMINDEX
;
1699 case OP_STOREI2_MEMBASE_REG
:
1700 return OP_STOREI2_MEMINDEX
;
1701 case OP_STOREI4_MEMBASE_REG
:
1702 return OP_STOREI4_MEMINDEX
;
1703 case OP_STORE_MEMBASE_REG
:
1704 return OP_STORE_MEMINDEX
;
1705 case OP_STORER4_MEMBASE_REG
:
1706 return OP_STORER4_MEMINDEX
;
1707 case OP_STORER8_MEMBASE_REG
:
1708 return OP_STORER8_MEMINDEX
;
1709 case OP_STORE_MEMBASE_IMM
:
1710 return OP_STORE_MEMBASE_REG
;
1711 case OP_STOREI1_MEMBASE_IMM
:
1712 return OP_STOREI1_MEMBASE_REG
;
1713 case OP_STOREI2_MEMBASE_IMM
:
1714 return OP_STOREI2_MEMBASE_REG
;
1715 case OP_STOREI4_MEMBASE_IMM
:
1716 return OP_STOREI4_MEMBASE_REG
;
1718 g_assert_not_reached ();
1722 * Remove from the instruction list the instructions that can't be
1723 * represented with very simple instructions with no register
1727 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1729 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
1730 int rot_amount
, imm8
, low_imm
;
1732 MONO_BB_FOR_EACH_INS (bb
, ins
) {
1734 switch (ins
->opcode
) {
1738 case OP_COMPARE_IMM
:
1739 case OP_ICOMPARE_IMM
:
1753 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
1754 NEW_INS (cfg
, temp
, OP_ICONST
);
1755 temp
->inst_c0
= ins
->inst_imm
;
1756 temp
->dreg
= mono_alloc_ireg (cfg
);
1757 ins
->sreg2
= temp
->dreg
;
1758 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
1760 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
1766 if (ins
->inst_imm
== 1) {
1767 ins
->opcode
= OP_MOVE
;
1770 if (ins
->inst_imm
== 0) {
1771 ins
->opcode
= OP_ICONST
;
1775 imm8
= mono_is_power_of_two (ins
->inst_imm
);
1777 ins
->opcode
= OP_SHL_IMM
;
1778 ins
->inst_imm
= imm8
;
1781 NEW_INS (cfg
, temp
, OP_ICONST
);
1782 temp
->inst_c0
= ins
->inst_imm
;
1783 temp
->dreg
= mono_alloc_ireg (cfg
);
1784 ins
->sreg2
= temp
->dreg
;
1785 ins
->opcode
= OP_IMUL
;
1791 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
1792 /* ARM sets the C flag to 1 if there was _no_ overflow */
1793 ins
->next
->opcode
= OP_COND_EXC_NC
;
1795 case OP_LOCALLOC_IMM
:
1796 NEW_INS (cfg
, temp
, OP_ICONST
);
1797 temp
->inst_c0
= ins
->inst_imm
;
1798 temp
->dreg
= mono_alloc_ireg (cfg
);
1799 ins
->sreg1
= temp
->dreg
;
1800 ins
->opcode
= OP_LOCALLOC
;
1802 case OP_LOAD_MEMBASE
:
1803 case OP_LOADI4_MEMBASE
:
1804 case OP_LOADU4_MEMBASE
:
1805 case OP_LOADU1_MEMBASE
:
1806 /* we can do two things: load the immed in a register
1807 * and use an indexed load, or see if the immed can be
1808 * represented as an ad_imm + a load with a smaller offset
1809 * that fits. We just do the first for now, optimize later.
1811 if (arm_is_imm12 (ins
->inst_offset
))
1813 NEW_INS (cfg
, temp
, OP_ICONST
);
1814 temp
->inst_c0
= ins
->inst_offset
;
1815 temp
->dreg
= mono_alloc_ireg (cfg
);
1816 ins
->sreg2
= temp
->dreg
;
1817 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
1819 case OP_LOADI2_MEMBASE
:
1820 case OP_LOADU2_MEMBASE
:
1821 case OP_LOADI1_MEMBASE
:
1822 if (arm_is_imm8 (ins
->inst_offset
))
1824 NEW_INS (cfg
, temp
, OP_ICONST
);
1825 temp
->inst_c0
= ins
->inst_offset
;
1826 temp
->dreg
= mono_alloc_ireg (cfg
);
1827 ins
->sreg2
= temp
->dreg
;
1828 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
1830 case OP_LOADR4_MEMBASE
:
1831 case OP_LOADR8_MEMBASE
:
1832 if (arm_is_fpimm8 (ins
->inst_offset
))
1834 low_imm
= ins
->inst_offset
& 0x1ff;
1835 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
1836 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
1837 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
1838 temp
->sreg1
= ins
->inst_basereg
;
1839 temp
->dreg
= mono_alloc_ireg (cfg
);
1840 ins
->inst_basereg
= temp
->dreg
;
1841 ins
->inst_offset
= low_imm
;
1844 /* VFP/FPA doesn't have indexed load instructions */
1845 g_assert_not_reached ();
1847 case OP_STORE_MEMBASE_REG
:
1848 case OP_STOREI4_MEMBASE_REG
:
1849 case OP_STOREI1_MEMBASE_REG
:
1850 if (arm_is_imm12 (ins
->inst_offset
))
1852 NEW_INS (cfg
, temp
, OP_ICONST
);
1853 temp
->inst_c0
= ins
->inst_offset
;
1854 temp
->dreg
= mono_alloc_ireg (cfg
);
1855 ins
->sreg2
= temp
->dreg
;
1856 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
1858 case OP_STOREI2_MEMBASE_REG
:
1859 if (arm_is_imm8 (ins
->inst_offset
))
1861 NEW_INS (cfg
, temp
, OP_ICONST
);
1862 temp
->inst_c0
= ins
->inst_offset
;
1863 temp
->dreg
= mono_alloc_ireg (cfg
);
1864 ins
->sreg2
= temp
->dreg
;
1865 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
1867 case OP_STORER4_MEMBASE_REG
:
1868 case OP_STORER8_MEMBASE_REG
:
1869 if (arm_is_fpimm8 (ins
->inst_offset
))
1871 low_imm
= ins
->inst_offset
& 0x1ff;
1872 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
1873 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
1874 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
1875 temp
->sreg1
= ins
->inst_destbasereg
;
1876 temp
->dreg
= mono_alloc_ireg (cfg
);
1877 ins
->inst_destbasereg
= temp
->dreg
;
1878 ins
->inst_offset
= low_imm
;
1881 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
1882 /* VFP/FPA doesn't have indexed store instructions */
1883 g_assert_not_reached ();
1885 case OP_STORE_MEMBASE_IMM
:
1886 case OP_STOREI1_MEMBASE_IMM
:
1887 case OP_STOREI2_MEMBASE_IMM
:
1888 case OP_STOREI4_MEMBASE_IMM
:
1889 NEW_INS (cfg
, temp
, OP_ICONST
);
1890 temp
->inst_c0
= ins
->inst_imm
;
1891 temp
->dreg
= mono_alloc_ireg (cfg
);
1892 ins
->sreg1
= temp
->dreg
;
1893 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
1895 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
1897 gboolean swap
= FALSE
;
1900 /* Some fp compares require swapped operands */
1901 g_assert (ins
->next
);
1902 switch (ins
->next
->opcode
) {
1904 ins
->next
->opcode
= OP_FBLT
;
1908 ins
->next
->opcode
= OP_FBLT_UN
;
1912 ins
->next
->opcode
= OP_FBGE
;
1916 ins
->next
->opcode
= OP_FBGE_UN
;
1924 ins
->sreg1
= ins
->sreg2
;
1933 bb
->last_ins
= last_ins
;
1934 bb
->max_vreg
= cfg
->next_vreg
;
1938 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
1940 /* sreg is a float, dreg is an integer reg */
1942 ARM_FIXZ (code
, dreg
, sreg
);
1943 #elif defined(ARM_FPU_VFP)
1945 ARM_TOSIZD (code
, ARM_VFP_F0
, sreg
);
1947 ARM_TOUIZD (code
, ARM_VFP_F0
, sreg
);
1948 ARM_FMRS (code
, dreg
, ARM_VFP_F0
);
1952 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
1953 else if (size
== 2) {
1954 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
1955 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
1959 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
1960 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
1961 } else if (size
== 2) {
1962 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
1963 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
1971 const guchar
*target
;
1976 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
1979 search_thunk_slot (void *data
, int csize
, int bsize
, void *user_data
) {
1980 PatchData
*pdata
= (PatchData
*)user_data
;
1981 guchar
*code
= data
;
1982 guint32
*thunks
= data
;
1983 guint32
*endthunks
= (guint32
*)(code
+ bsize
);
1985 int difflow
, diffhigh
;
1987 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
1988 difflow
= (char*)pdata
->code
- (char*)thunks
;
1989 diffhigh
= (char*)pdata
->code
- (char*)endthunks
;
1990 if (!((is_call_imm (thunks
) && is_call_imm (endthunks
)) || (is_call_imm (difflow
) && is_call_imm (diffhigh
))))
1994 * The thunk is composed of 3 words:
1995 * load constant from thunks [2] into ARM_IP
1998 * Note that the LR register is already setup
2000 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2001 if ((pdata
->found
== 2) || (pdata
->code
>= code
&& pdata
->code
<= code
+ csize
)) {
2002 while (thunks
< endthunks
) {
2003 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2004 if (thunks
[2] == (guint32
)pdata
->target
) {
2005 arm_patch (pdata
->code
, (guchar
*)thunks
);
2006 mono_arch_flush_icache (pdata
->code
, 4);
2009 } else if ((thunks
[0] == 0) && (thunks
[1] == 0) && (thunks
[2] == 0)) {
2010 /* found a free slot instead: emit thunk */
2011 /* ARMREG_IP is fine to use since this can't be an IMT call
2014 code
= (guchar
*)thunks
;
2015 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
2016 if (thumb_supported
)
2017 ARM_BX (code
, ARMREG_IP
);
2019 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
2020 thunks
[2] = (guint32
)pdata
->target
;
2021 mono_arch_flush_icache ((guchar
*)thunks
, 12);
2023 arm_patch (pdata
->code
, (guchar
*)thunks
);
2024 mono_arch_flush_icache (pdata
->code
, 4);
2028 /* skip 12 bytes, the size of the thunk */
2032 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2038 handle_thunk (int absolute
, guchar
*code
, const guchar
*target
) {
2039 MonoDomain
*domain
= mono_domain_get ();
2043 pdata
.target
= target
;
2044 pdata
.absolute
= absolute
;
2047 mono_domain_lock (domain
);
2048 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2051 /* this uses the first available slot */
2053 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2055 mono_domain_unlock (domain
);
2057 if (pdata
.found
!= 1)
2058 g_print ("thunk failed for %p from %p\n", target
, code
);
2059 g_assert (pdata
.found
== 1);
2063 arm_patch (guchar
*code
, const guchar
*target
)
2065 guint32
*code32
= (void*)code
;
2066 guint32 ins
= *code32
;
2067 guint32 prim
= (ins
>> 25) & 7;
2068 guint32 tval
= GPOINTER_TO_UINT (target
);
2070 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2071 if (prim
== 5) { /* 101b */
2072 /* the diff starts 8 bytes from the branch opcode */
2073 gint diff
= target
- code
- 8;
2075 gint tmask
= 0xffffffff;
2076 if (tval
& 1) { /* entering thumb mode */
2077 diff
= target
- 1 - code
- 8;
2078 g_assert (thumb_supported
);
2079 tbits
= 0xf << 28; /* bl->blx bit pattern */
2080 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
2081 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2085 tmask
= ~(1 << 24); /* clear the link bit */
2086 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2091 if (diff
<= 33554431) {
2093 ins
= (ins
& 0xff000000) | diff
;
2095 *code32
= ins
| tbits
;
2099 /* diff between 0 and -33554432 */
2100 if (diff
>= -33554432) {
2102 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
2104 *code32
= ins
| tbits
;
2109 handle_thunk (TRUE
, code
, target
);
2114 * The alternative call sequences looks like this:
2116 * ldr ip, [pc] // loads the address constant
2117 * b 1f // jumps around the constant
2118 * address constant embedded in the code
2123 * There are two cases for patching:
2124 * a) at the end of method emission: in this case code points to the start
2125 * of the call sequence
2126 * b) during runtime patching of the call site: in this case code points
2127 * to the mov pc, ip instruction
2129 * We have to handle also the thunk jump code sequence:
2133 * address constant // execution never reaches here
2135 if ((ins
& 0x0ffffff0) == 0x12fff10) {
2136 /* Branch and exchange: the address is constructed in a reg
2137 * We can patch BX when the code sequence is the following:
2138 * ldr ip, [pc, #0] ; 0x8
2145 guint8
*emit
= (guint8
*)ccode
;
2146 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2148 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2149 ARM_BX (emit
, ARMREG_IP
);
2151 /*patching from magic trampoline*/
2152 if (ins
== ccode
[3]) {
2153 g_assert (code32
[-4] == ccode
[0]);
2154 g_assert (code32
[-3] == ccode
[1]);
2155 g_assert (code32
[-1] == ccode
[2]);
2156 code32
[-2] = (guint32
)target
;
2159 /*patching from JIT*/
2160 if (ins
== ccode
[0]) {
2161 g_assert (code32
[1] == ccode
[1]);
2162 g_assert (code32
[3] == ccode
[2]);
2163 g_assert (code32
[4] == ccode
[3]);
2164 code32
[2] = (guint32
)target
;
2167 g_assert_not_reached ();
2168 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
2176 guint8
*emit
= (guint8
*)ccode
;
2177 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2179 ARM_BLX_REG (emit
, ARMREG_IP
);
2181 g_assert (code32
[-3] == ccode
[0]);
2182 g_assert (code32
[-2] == ccode
[1]);
2183 g_assert (code32
[0] == ccode
[2]);
2185 code32
[-1] = (guint32
)target
;
2188 guint32
*tmp
= ccode
;
2189 guint8
*emit
= (guint8
*)tmp
;
2190 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2191 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2192 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
2193 ARM_BX (emit
, ARMREG_IP
);
2194 if (ins
== ccode
[2]) {
2195 g_assert_not_reached (); // should be -2 ...
2196 code32
[-1] = (guint32
)target
;
2199 if (ins
== ccode
[0]) {
2200 /* handles both thunk jump code and the far call sequence */
2201 code32
[2] = (guint32
)target
;
2204 g_assert_not_reached ();
2206 // g_print ("patched with 0x%08x\n", ins);
2210 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2211 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2212 * to be used with the emit macros.
2213 * Return -1 otherwise.
2216 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
2219 for (i
= 0; i
< 31; i
+= 2) {
2220 res
= (val
<< (32 - i
)) | (val
>> i
);
2223 *rot_amount
= i
? 32 - i
: 0;
2230 * Emits in code a sequence of instructions that load the value 'val'
2231 * into the dreg register. Uses at most 4 instructions.
2234 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
2236 int imm8
, rot_amount
;
2238 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
2239 /* skip the constant pool */
2245 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
2246 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2247 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
2248 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2251 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
2253 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
2255 if (val
& 0xFF0000) {
2256 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2258 if (val
& 0xFF000000) {
2259 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2261 } else if (val
& 0xFF00) {
2262 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
2263 if (val
& 0xFF0000) {
2264 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2266 if (val
& 0xFF000000) {
2267 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2269 } else if (val
& 0xFF0000) {
2270 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
2271 if (val
& 0xFF000000) {
2272 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2275 //g_assert_not_reached ();
2281 * emit_load_volatile_arguments:
2283 * Load volatile arguments from the stack to the original input registers.
2284 * Required before a tail call.
2287 emit_load_volatile_arguments (MonoCompile
*cfg
, guint8
*code
)
2289 MonoMethod
*method
= cfg
->method
;
2290 MonoMethodSignature
*sig
;
2295 /* FIXME: Generate intermediate code instead */
2297 sig
= mono_method_signature (method
);
2299 /* This is the opposite of the code in emit_prolog */
2303 cinfo
= calculate_sizes (sig
, sig
->pinvoke
);
2305 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2306 ArgInfo
*ainfo
= &cinfo
->ret
;
2307 inst
= cfg
->vret_addr
;
2308 g_assert (arm_is_imm12 (inst
->inst_offset
));
2309 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2311 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2312 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2313 inst
= cfg
->args
[pos
];
2315 if (cfg
->verbose_level
> 2)
2316 g_print ("Loading argument %d (type: %d)\n", i
, ainfo
->regtype
);
2317 if (inst
->opcode
== OP_REGVAR
) {
2318 if (ainfo
->regtype
== RegTypeGeneral
)
2319 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
2320 else if (ainfo
->regtype
== RegTypeFP
) {
2321 g_assert_not_reached ();
2322 } else if (ainfo
->regtype
== RegTypeBase
) {
2326 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2327 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2329 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2330 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2334 g_assert_not_reached ();
2336 if (ainfo
->regtype
== RegTypeGeneral
) {
2337 switch (ainfo
->size
) {
2344 g_assert (arm_is_imm12 (inst
->inst_offset
));
2345 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2346 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
2347 ARM_LDR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
2350 if (arm_is_imm12 (inst
->inst_offset
)) {
2351 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2353 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
2354 ARM_LDR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
2358 } else if (ainfo
->regtype
== RegTypeBaseGen
) {
2361 } else if (ainfo
->regtype
== RegTypeBase
) {
2363 } else if (ainfo
->regtype
== RegTypeFP
) {
2364 g_assert_not_reached ();
2365 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
2366 int doffset
= inst
->inst_offset
;
2370 if (mono_class_from_mono_type (inst
->inst_vtype
))
2371 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), NULL
);
2372 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
2373 if (arm_is_imm12 (doffset
)) {
2374 ARM_LDR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
2376 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
2377 ARM_LDR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
2379 soffset
+= sizeof (gpointer
);
2380 doffset
+= sizeof (gpointer
);
2385 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
2402 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2407 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
2408 MonoInst
*last_ins
= NULL
;
2409 guint last_offset
= 0;
2411 int imm8
, rot_amount
;
2413 /* we don't align basic blocks of loops on arm */
2415 if (cfg
->verbose_level
> 2)
2416 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2418 cpos
= bb
->max_offset
;
2420 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
2421 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2422 //g_assert (!mono_compile_aot);
2425 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2426 /* this is not thread save, but good enough */
2427 /* fixme: howto handle overflows? */
2428 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2431 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
2432 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2433 (gpointer
)"mono_break");
2434 code
= emit_call_seq (cfg
, code
);
2437 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2438 offset
= code
- cfg
->native_code
;
2440 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
2442 if (offset
> (cfg
->code_size
- max_len
- 16)) {
2443 cfg
->code_size
*= 2;
2444 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2445 code
= cfg
->native_code
+ offset
;
2447 // if (ins->cil_code)
2448 // g_print ("cil code\n");
2449 mono_debug_record_line_number (cfg
, ins
, offset
);
2451 switch (ins
->opcode
) {
2452 case OP_MEMORY_BARRIER
:
2455 #ifdef HAVE_AEABI_READ_TP
2456 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2457 (gpointer
)"__aeabi_read_tp");
2458 code
= emit_call_seq (cfg
, code
);
2460 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_R0
, ins
->inst_offset
);
2462 g_assert_not_reached ();
2466 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2467 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2470 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2471 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2473 case OP_STOREI1_MEMBASE_IMM
:
2474 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
2475 g_assert (arm_is_imm12 (ins
->inst_offset
));
2476 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
2478 case OP_STOREI2_MEMBASE_IMM
:
2479 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
2480 g_assert (arm_is_imm8 (ins
->inst_offset
));
2481 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
2483 case OP_STORE_MEMBASE_IMM
:
2484 case OP_STOREI4_MEMBASE_IMM
:
2485 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
2486 g_assert (arm_is_imm12 (ins
->inst_offset
));
2487 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
2489 case OP_STOREI1_MEMBASE_REG
:
2490 g_assert (arm_is_imm12 (ins
->inst_offset
));
2491 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
2493 case OP_STOREI2_MEMBASE_REG
:
2494 g_assert (arm_is_imm8 (ins
->inst_offset
));
2495 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
2497 case OP_STORE_MEMBASE_REG
:
2498 case OP_STOREI4_MEMBASE_REG
:
2499 /* this case is special, since it happens for spill code after lowering has been called */
2500 if (arm_is_imm12 (ins
->inst_offset
)) {
2501 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
2503 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
2504 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
2507 case OP_STOREI1_MEMINDEX
:
2508 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
2510 case OP_STOREI2_MEMINDEX
:
2511 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
2513 case OP_STORE_MEMINDEX
:
2514 case OP_STOREI4_MEMINDEX
:
2515 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
2518 g_assert_not_reached ();
2520 case OP_LOAD_MEMINDEX
:
2521 case OP_LOADI4_MEMINDEX
:
2522 case OP_LOADU4_MEMINDEX
:
2523 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2525 case OP_LOADI1_MEMINDEX
:
2526 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2528 case OP_LOADU1_MEMINDEX
:
2529 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2531 case OP_LOADI2_MEMINDEX
:
2532 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2534 case OP_LOADU2_MEMINDEX
:
2535 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2537 case OP_LOAD_MEMBASE
:
2538 case OP_LOADI4_MEMBASE
:
2539 case OP_LOADU4_MEMBASE
:
2540 /* this case is special, since it happens for spill code after lowering has been called */
2541 if (arm_is_imm12 (ins
->inst_offset
)) {
2542 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
2544 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
2545 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
2548 case OP_LOADI1_MEMBASE
:
2549 g_assert (arm_is_imm8 (ins
->inst_offset
));
2550 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
2552 case OP_LOADU1_MEMBASE
:
2553 g_assert (arm_is_imm12 (ins
->inst_offset
));
2554 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
2556 case OP_LOADU2_MEMBASE
:
2557 g_assert (arm_is_imm8 (ins
->inst_offset
));
2558 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
2560 case OP_LOADI2_MEMBASE
:
2561 g_assert (arm_is_imm8 (ins
->inst_offset
));
2562 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
2564 case OP_ICONV_TO_I1
:
2565 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
2566 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
2568 case OP_ICONV_TO_I2
:
2569 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
2570 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
2572 case OP_ICONV_TO_U1
:
2573 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
2575 case OP_ICONV_TO_U2
:
2576 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
2577 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
2581 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
2583 case OP_COMPARE_IMM
:
2584 case OP_ICOMPARE_IMM
:
2585 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2586 g_assert (imm8
>= 0);
2587 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
2591 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2592 * So instead of emitting a trap, we emit a call a C function and place a
2595 //*(int*)code = 0xef9f0001;
2598 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2599 (gpointer
)"mono_break");
2600 code
= emit_call_seq (cfg
, code
);
2602 case OP_RELAXED_NOP
:
2607 case OP_DUMMY_STORE
:
2608 case OP_NOT_REACHED
:
2613 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2616 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2620 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2623 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2624 g_assert (imm8
>= 0);
2625 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2629 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2630 g_assert (imm8
>= 0);
2631 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2635 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2636 g_assert (imm8
>= 0);
2637 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2640 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2641 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2643 case OP_IADD_OVF_UN
:
2644 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2645 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2648 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2649 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2651 case OP_ISUB_OVF_UN
:
2652 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2653 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2655 case OP_ADD_OVF_CARRY
:
2656 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2657 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2659 case OP_ADD_OVF_UN_CARRY
:
2660 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2661 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2663 case OP_SUB_OVF_CARRY
:
2664 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2665 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2667 case OP_SUB_OVF_UN_CARRY
:
2668 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2669 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2673 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2676 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2677 g_assert (imm8
>= 0);
2678 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2681 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2685 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2689 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2690 g_assert (imm8
>= 0);
2691 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2695 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2696 g_assert (imm8
>= 0);
2697 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2699 case OP_ARM_RSBS_IMM
:
2700 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2701 g_assert (imm8
>= 0);
2702 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2704 case OP_ARM_RSC_IMM
:
2705 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2706 g_assert (imm8
>= 0);
2707 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2710 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2714 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2715 g_assert (imm8
>= 0);
2716 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2724 /* crappy ARM arch doesn't have a DIV instruction */
2725 g_assert_not_reached ();
2727 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2731 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2732 g_assert (imm8
>= 0);
2733 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2736 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2740 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
2741 g_assert (imm8
>= 0);
2742 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
2745 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2750 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
2751 else if (ins
->dreg
!= ins
->sreg1
)
2752 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
2755 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2760 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
2761 else if (ins
->dreg
!= ins
->sreg1
)
2762 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
2765 case OP_ISHR_UN_IMM
:
2767 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
2768 else if (ins
->dreg
!= ins
->sreg1
)
2769 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
2772 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2775 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
2778 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
2781 if (ins
->dreg
== ins
->sreg2
)
2782 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2784 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
2787 g_assert_not_reached ();
2790 /* FIXME: handle ovf/ sreg2 != dreg */
2791 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2792 /* FIXME: MUL doesn't set the C/O flags on ARM */
2794 case OP_IMUL_OVF_UN
:
2795 /* FIXME: handle ovf/ sreg2 != dreg */
2796 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2797 /* FIXME: MUL doesn't set the C/O flags on ARM */
2800 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
2803 /* Load the GOT offset */
2804 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2805 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
2807 *(gpointer
*)code
= NULL
;
2809 /* Load the value from the GOT */
2810 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
2812 case OP_ICONV_TO_I4
:
2813 case OP_ICONV_TO_U4
:
2815 if (ins
->dreg
!= ins
->sreg1
)
2816 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
2819 int saved
= ins
->sreg2
;
2820 if (ins
->sreg2
== ARM_LSW_REG
) {
2821 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
2824 if (ins
->sreg1
!= ARM_LSW_REG
)
2825 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
2826 if (saved
!= ARM_MSW_REG
)
2827 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
2832 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
2833 #elif defined(ARM_FPU_VFP)
2834 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
2837 case OP_FCONV_TO_R4
:
2839 ARM_MVFS (code
, ins
->dreg
, ins
->sreg1
);
2840 #elif defined(ARM_FPU_VFP)
2841 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
2842 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
2847 * Keep in sync with mono_arch_emit_epilog
2849 g_assert (!cfg
->method
->save_lmf
);
2851 code
= emit_load_volatile_arguments (cfg
, code
);
2853 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
);
2854 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
)) | ((1 << ARMREG_LR
)));
2855 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
2857 cfg
->disable_aot
= TRUE
;
2860 /* ensure ins->sreg1 is not NULL */
2861 ARM_LDR_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
2865 if (ppc_is_imm16 (cfg
->sig_cookie
+ cfg
->stack_usage
)) {
2866 ppc_addi (code
, ppc_r11
, cfg
->frame_reg
, cfg
->sig_cookie
+ cfg
->stack_usage
);
2868 ppc_load (code
, ppc_r11
, cfg
->sig_cookie
+ cfg
->stack_usage
);
2869 ppc_add (code
, ppc_r11
, cfg
->frame_reg
, ppc_r11
);
2871 ppc_stw (code
, ppc_r11
, 0, ins
->sreg1
);
2881 call
= (MonoCallInst
*)ins
;
2882 if (ins
->flags
& MONO_INST_HAS_METHOD
)
2883 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
2885 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
2886 code
= emit_call_seq (cfg
, code
);
2887 code
= emit_move_return_value (cfg
, ins
, code
);
2893 case OP_VOIDCALL_REG
:
2895 code
= emit_call_reg (code
, ins
->sreg1
);
2896 code
= emit_move_return_value (cfg
, ins
, code
);
2898 case OP_FCALL_MEMBASE
:
2899 case OP_LCALL_MEMBASE
:
2900 case OP_VCALL_MEMBASE
:
2901 case OP_VCALL2_MEMBASE
:
2902 case OP_VOIDCALL_MEMBASE
:
2903 case OP_CALL_MEMBASE
:
2904 g_assert (arm_is_imm12 (ins
->inst_offset
));
2905 g_assert (ins
->sreg1
!= ARMREG_LR
);
2906 call
= (MonoCallInst
*)ins
;
2907 if (call
->method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2908 ARM_ADD_REG_IMM8 (code
, ARMREG_LR
, ARMREG_PC
, 4);
2909 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
2911 * We can't embed the method in the code stream in PIC code, or
2913 * Instead, we put it in V5 in code emitted by
2914 * mono_arch_emit_imt_argument (), and embed NULL here to
2915 * signal the IMT thunk that the value is in V5.
2917 if (call
->dynamic_imt_arg
)
2918 *((gpointer
*)code
) = NULL
;
2920 *((gpointer
*)code
) = (gpointer
)call
->method
;
2923 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
2924 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
2926 code
= emit_move_return_value (cfg
, ins
, code
);
2929 /* keep alignment */
2930 int alloca_waste
= cfg
->param_area
;
2933 /* round the size to 8 bytes */
2934 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 7);
2935 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 7);
2937 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, alloca_waste
);
2938 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
2939 /* memzero the area: dreg holds the size, sp is the pointer */
2940 if (ins
->flags
& MONO_INST_INIT
) {
2941 guint8
*start_loop
, *branch_to_cond
;
2942 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
2943 branch_to_cond
= code
;
2946 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
2947 arm_patch (branch_to_cond
, code
);
2948 /* decrement by 4 and set flags */
2949 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 4);
2950 ARM_B_COND (code
, ARMCOND_GE
, 0);
2951 arm_patch (code
- 4, start_loop
);
2953 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ARMREG_SP
, alloca_waste
);
2957 if (ins
->sreg1
!= ARMREG_R0
)
2958 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
2959 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2960 (gpointer
)"mono_arch_throw_exception");
2961 code
= emit_call_seq (cfg
, code
);
2965 if (ins
->sreg1
!= ARMREG_R0
)
2966 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
2967 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2968 (gpointer
)"mono_arch_rethrow_exception");
2969 code
= emit_call_seq (cfg
, code
);
2972 case OP_START_HANDLER
: {
2973 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
2975 if (arm_is_imm12 (spvar
->inst_offset
)) {
2976 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
2978 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
2979 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
2983 case OP_ENDFILTER
: {
2984 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
2986 if (ins
->sreg1
!= ARMREG_R0
)
2987 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
2988 if (arm_is_imm12 (spvar
->inst_offset
)) {
2989 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
2991 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
2992 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
2993 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
2995 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
2998 case OP_ENDFINALLY
: {
2999 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3001 if (arm_is_imm12 (spvar
->inst_offset
)) {
3002 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
3004 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
3005 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3006 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
3008 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3011 case OP_CALL_HANDLER
:
3012 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3016 ins
->inst_c0
= code
- cfg
->native_code
;
3019 if (ins
->flags
& MONO_INST_BRLABEL
) {
3020 /*if (ins->inst_i0->inst_c0) {
3022 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3024 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_LABEL
, ins
->inst_i0
);
3028 /*if (ins->inst_target_bb->native_offset) {
3030 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3032 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3038 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
3042 * In the normal case we have:
3043 * ldr pc, [pc, ins->sreg1 << 2]
3046 * ldr lr, [pc, ins->sreg1 << 2]
3048 * After follows the data.
3049 * FIXME: add aot support.
3051 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
3052 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3053 if (offset
> (cfg
->code_size
- max_len
- 16)) {
3054 cfg
->code_size
+= max_len
;
3055 cfg
->code_size
*= 2;
3056 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3057 code
= cfg
->native_code
+ offset
;
3059 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
3061 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3065 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
3066 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
3070 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3071 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
3075 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3076 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
3080 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3081 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
3085 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3086 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
3088 case OP_COND_EXC_EQ
:
3089 case OP_COND_EXC_NE_UN
:
3090 case OP_COND_EXC_LT
:
3091 case OP_COND_EXC_LT_UN
:
3092 case OP_COND_EXC_GT
:
3093 case OP_COND_EXC_GT_UN
:
3094 case OP_COND_EXC_GE
:
3095 case OP_COND_EXC_GE_UN
:
3096 case OP_COND_EXC_LE
:
3097 case OP_COND_EXC_LE_UN
:
3098 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
3100 case OP_COND_EXC_IEQ
:
3101 case OP_COND_EXC_INE_UN
:
3102 case OP_COND_EXC_ILT
:
3103 case OP_COND_EXC_ILT_UN
:
3104 case OP_COND_EXC_IGT
:
3105 case OP_COND_EXC_IGT_UN
:
3106 case OP_COND_EXC_IGE
:
3107 case OP_COND_EXC_IGE_UN
:
3108 case OP_COND_EXC_ILE
:
3109 case OP_COND_EXC_ILE_UN
:
3110 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
3113 case OP_COND_EXC_IC
:
3114 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
3116 case OP_COND_EXC_OV
:
3117 case OP_COND_EXC_IOV
:
3118 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
3120 case OP_COND_EXC_NC
:
3121 case OP_COND_EXC_INC
:
3122 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
3124 case OP_COND_EXC_NO
:
3125 case OP_COND_EXC_INO
:
3126 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
3138 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
3141 /* floating point opcodes */
3144 if (cfg
->compile_aot
) {
3145 ARM_LDFD (code
, ins
->dreg
, ARMREG_PC
, 0);
3147 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3149 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
3152 /* FIXME: we can optimize the imm load by dealing with part of
3153 * the displacement in LDFD (aligning to 512).
3155 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3156 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3160 if (cfg
->compile_aot
) {
3161 ARM_LDFS (code
, ins
->dreg
, ARMREG_PC
, 0);
3163 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3166 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3167 ARM_LDFS (code
, ins
->dreg
, ARMREG_LR
, 0);
3170 case OP_STORER8_MEMBASE_REG
:
3171 /* This is generated by the local regalloc pass which runs after the lowering pass */
3172 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3173 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3174 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
3175 ARM_STFD (code
, ins
->sreg1
, ARMREG_LR
, 0);
3177 ARM_STFD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3180 case OP_LOADR8_MEMBASE
:
3181 /* This is generated by the local regalloc pass which runs after the lowering pass */
3182 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3183 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3184 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
3185 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3187 ARM_LDFD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3190 case OP_STORER4_MEMBASE_REG
:
3191 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3192 ARM_STFS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3194 case OP_LOADR4_MEMBASE
:
3195 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3196 ARM_LDFS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3198 case OP_ICONV_TO_R_UN
: {
3200 tmpreg
= ins
->dreg
== 0? 1: 0;
3201 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
3202 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
3203 ARM_B_COND (code
, ARMCOND_GE
, 8);
3204 /* save the temp register */
3205 ARM_SUB_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
3206 ARM_STFD (code
, tmpreg
, ARMREG_SP
, 0);
3207 ARM_LDFD (code
, tmpreg
, ARMREG_PC
, 12);
3208 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->dreg
, tmpreg
);
3209 ARM_LDFD (code
, tmpreg
, ARMREG_SP
, 0);
3210 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
3211 /* skip the constant pool */
3214 *(int*)code
= 0x41f00000;
3219 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3220 * adfltd fdest, fdest, ftemp
3224 case OP_ICONV_TO_R4
:
3225 ARM_FLTS (code
, ins
->dreg
, ins
->sreg1
);
3227 case OP_ICONV_TO_R8
:
3228 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
3230 #elif defined(ARM_FPU_VFP)
3232 if (cfg
->compile_aot
) {
3233 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
3235 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3237 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
3240 /* FIXME: we can optimize the imm load by dealing with part of
3241 * the displacement in LDFD (aligning to 512).
3243 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3244 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
3248 if (cfg
->compile_aot
) {
3249 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
3251 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3253 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3255 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3256 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
3257 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3260 case OP_STORER8_MEMBASE_REG
:
3261 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3262 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3264 case OP_LOADR8_MEMBASE
:
3265 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3266 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3268 case OP_STORER4_MEMBASE_REG
:
3269 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3270 ARM_FSTS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3272 case OP_LOADR4_MEMBASE
:
3273 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3274 ARM_FLDS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3276 case OP_ICONV_TO_R_UN
: {
3277 g_assert_not_reached ();
3280 case OP_ICONV_TO_R4
:
3281 g_assert_not_reached ();
3282 //ARM_FLTS (code, ins->dreg, ins->sreg1);
3284 case OP_ICONV_TO_R8
:
3285 g_assert_not_reached ();
3286 //ARM_FLTD (code, ins->dreg, ins->sreg1);
3289 case OP_FCONV_TO_I1
:
3290 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
3292 case OP_FCONV_TO_U1
:
3293 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
3295 case OP_FCONV_TO_I2
:
3296 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
3298 case OP_FCONV_TO_U2
:
3299 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
3301 case OP_FCONV_TO_I4
:
3303 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
3305 case OP_FCONV_TO_U4
:
3307 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
3309 case OP_FCONV_TO_I8
:
3310 case OP_FCONV_TO_U8
:
3311 g_assert_not_reached ();
3312 /* Implemented as helper calls */
3314 case OP_LCONV_TO_R_UN
:
3315 g_assert_not_reached ();
3316 /* Implemented as helper calls */
3318 case OP_LCONV_TO_OVF_I
:
3319 case OP_LCONV_TO_OVF_I4_2
: {
3320 guint32
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
3322 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3325 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
3326 high_bit_not_set
= code
;
3327 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
3329 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3330 valid_negative
= code
;
3331 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3332 invalid_negative
= code
;
3333 ARM_B_COND (code
, ARMCOND_AL
, 0);
3335 arm_patch (high_bit_not_set
, code
);
3337 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
3338 valid_positive
= code
;
3339 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3341 arm_patch (invalid_negative
, code
);
3342 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
3344 arm_patch (valid_negative
, code
);
3345 arm_patch (valid_positive
, code
);
3347 if (ins
->dreg
!= ins
->sreg1
)
3348 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3353 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3356 ARM_FPA_SUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3359 ARM_FPA_MUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3362 ARM_FPA_DVFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3365 ARM_MNFD (code
, ins
->dreg
, ins
->sreg1
);
3367 #elif defined(ARM_FPU_VFP)
3369 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3372 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3375 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3378 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3381 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
3386 g_assert_not_reached ();
3390 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
3391 #elif defined(ARM_FPU_VFP)
3392 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
3397 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
3398 #elif defined(ARM_FPU_VFP)
3399 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
3401 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
3402 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
3406 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
3407 #elif defined(ARM_FPU_VFP)
3408 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
3410 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3411 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
3415 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
3416 #elif defined(ARM_FPU_VFP)
3417 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
3419 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3420 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
3421 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
3426 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
3427 #elif defined(ARM_FPU_VFP)
3428 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
3430 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3431 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
3436 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
3437 #elif defined(ARM_FPU_VFP)
3438 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
3440 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3441 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
3442 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
3444 /* ARM FPA flags table:
3445 * N Less than ARMCOND_MI
3446 * Z Equal ARMCOND_EQ
3447 * C Greater Than or Equal ARMCOND_CS
3448 * V Unordered ARMCOND_VS
3451 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
3454 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
3457 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
3460 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
3461 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
3467 g_assert_not_reached ();
3470 /* FIXME does VFP requires both conds?
3471 * FPA requires EQ even thou the docs suggests that just CS is enough
3473 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
3474 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
3477 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
3478 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
3483 if (ins
->dreg
!= ins
->sreg1
)
3484 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
3486 g_assert_not_reached ();
3491 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
3492 g_assert_not_reached ();
3495 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
3496 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3497 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
3498 g_assert_not_reached ();
3504 last_offset
= offset
;
3507 cfg
->code_len
= code
- cfg
->native_code
;
3510 #endif /* DISABLE_JIT */
3512 #ifdef HAVE_AEABI_READ_TP
3513 void __aeabi_read_tp (void);
3517 mono_arch_register_lowlevel_calls (void)
3519 /* The signature doesn't matter */
3520 mono_register_jit_icall (mono_arm_throw_exception
, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE
);
3521 mono_register_jit_icall (mono_arm_throw_exception_by_token
, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE
);
3523 #ifdef HAVE_AEABI_READ_TP
3524 mono_register_jit_icall (__aeabi_read_tp
, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE
);
3528 #define patch_lis_ori(ip,val) do {\
3529 guint16 *__lis_ori = (guint16*)(ip); \
3530 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3531 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3535 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
3537 MonoJumpInfo
*patch_info
;
3538 gboolean compile_aot
= !run_cctors
;
3540 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
3541 unsigned char *ip
= patch_info
->ip
.i
+ code
;
3542 const unsigned char *target
;
3544 if (patch_info
->type
== MONO_PATCH_INFO_SWITCH
&& !compile_aot
) {
3545 gpointer
*jt
= (gpointer
*)(ip
+ 8);
3547 /* jt is the inlined jump table, 2 instructions after ip
3548 * In the normal case we store the absolute addresses,
3549 * otherwise the displacements.
3551 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++)
3552 jt
[i
] = code
+ (int)patch_info
->data
.table
->table
[i
];
3555 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
3558 switch (patch_info
->type
) {
3559 case MONO_PATCH_INFO_BB
:
3560 case MONO_PATCH_INFO_LABEL
:
3563 /* No need to patch these */
3568 switch (patch_info
->type
) {
3569 case MONO_PATCH_INFO_IP
:
3570 g_assert_not_reached ();
3571 patch_lis_ori (ip
, ip
);
3573 case MONO_PATCH_INFO_METHOD_REL
:
3574 g_assert_not_reached ();
3575 *((gpointer
*)(ip
)) = code
+ patch_info
->data
.offset
;
3577 case MONO_PATCH_INFO_METHODCONST
:
3578 case MONO_PATCH_INFO_CLASS
:
3579 case MONO_PATCH_INFO_IMAGE
:
3580 case MONO_PATCH_INFO_FIELD
:
3581 case MONO_PATCH_INFO_VTABLE
:
3582 case MONO_PATCH_INFO_IID
:
3583 case MONO_PATCH_INFO_SFLDA
:
3584 case MONO_PATCH_INFO_LDSTR
:
3585 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
3586 case MONO_PATCH_INFO_LDTOKEN
:
3587 g_assert_not_reached ();
3588 /* from OP_AOTCONST : lis + ori */
3589 patch_lis_ori (ip
, target
);
3591 case MONO_PATCH_INFO_R4
:
3592 case MONO_PATCH_INFO_R8
:
3593 g_assert_not_reached ();
3594 *((gconstpointer
*)(ip
+ 2)) = patch_info
->data
.target
;
3596 case MONO_PATCH_INFO_EXC_NAME
:
3597 g_assert_not_reached ();
3598 *((gconstpointer
*)(ip
+ 1)) = patch_info
->data
.name
;
3600 case MONO_PATCH_INFO_NONE
:
3601 case MONO_PATCH_INFO_BB_OVF
:
3602 case MONO_PATCH_INFO_EXC_OVF
:
3603 /* everything is dealt with at epilog output time */
3608 arm_patch (ip
, target
);
3613 * Stack frame layout:
3615 * ------------------- fp
3616 * MonoLMF structure or saved registers
3617 * -------------------
3619 * -------------------
3621 * -------------------
3622 * optional 8 bytes for tracing
3623 * -------------------
3624 * param area size is cfg->param_area
3625 * ------------------- sp
3628 mono_arch_emit_prolog (MonoCompile
*cfg
)
3630 MonoMethod
*method
= cfg
->method
;
3632 MonoMethodSignature
*sig
;
3634 int alloc_size
, pos
, max_offset
, i
, rot_amount
;
3639 int prev_sp_offset
, reg_offset
;
3641 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
3644 sig
= mono_method_signature (method
);
3645 cfg
->code_size
= 256 + sig
->param_count
* 20;
3646 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
3648 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
3650 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
3652 alloc_size
= cfg
->stack_offset
;
3655 if (!method
->save_lmf
) {
3656 /* We save SP by storing it into IP and saving IP */
3657 ARM_PUSH (code
, (cfg
->used_int_regs
| (1 << ARMREG_IP
) | (1 << ARMREG_LR
)));
3658 prev_sp_offset
= 8; /* ip and lr */
3659 for (i
= 0; i
< 16; ++i
) {
3660 if (cfg
->used_int_regs
& (1 << i
))
3661 prev_sp_offset
+= 4;
3663 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
3665 for (i
= 0; i
< 16; ++i
) {
3666 if ((cfg
->used_int_regs
& (1 << i
)) || (i
== ARMREG_IP
) || (i
== ARMREG_LR
)) {
3667 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
3672 ARM_PUSH (code
, 0x5ff0);
3673 prev_sp_offset
= 4 * 10; /* all but r0-r3, sp and pc */
3674 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
3676 for (i
= 0; i
< 16; ++i
) {
3677 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
3678 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
3682 pos
+= sizeof (MonoLMF
) - prev_sp_offset
;
3686 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3687 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
3688 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
3689 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
3692 /* the stack used in the pushed regs */
3693 if (prev_sp_offset
& 4)
3695 cfg
->stack_usage
= alloc_size
;
3697 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
3698 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
3700 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
3701 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
3703 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
3705 if (cfg
->frame_reg
!= ARMREG_SP
) {
3706 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
3707 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
3709 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3710 prev_sp_offset
+= alloc_size
;
3712 /* compute max_offset in order to use short forward jumps
3713 * we could skip do it on arm because the immediate displacement
3714 * for jumps is large enough, it may be useful later for constant pools
3717 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
3718 MonoInst
*ins
= bb
->code
;
3719 bb
->max_offset
= max_offset
;
3721 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
3724 MONO_BB_FOR_EACH_INS (bb
, ins
)
3725 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
3728 /* store runtime generic context */
3729 if (cfg
->rgctx_var
) {
3730 MonoInst
*ins
= cfg
->rgctx_var
;
3732 g_assert (ins
->opcode
== OP_REGOFFSET
);
3734 if (arm_is_imm12 (ins
->inst_offset
)) {
3735 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
3737 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3738 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
3742 /* load arguments allocated to register from the stack */
3745 cinfo
= calculate_sizes (sig
, sig
->pinvoke
);
3747 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
3748 ArgInfo
*ainfo
= &cinfo
->ret
;
3749 inst
= cfg
->vret_addr
;
3750 g_assert (arm_is_imm12 (inst
->inst_offset
));
3751 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3753 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3754 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3755 inst
= cfg
->args
[pos
];
3757 if (cfg
->verbose_level
> 2)
3758 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->regtype
);
3759 if (inst
->opcode
== OP_REGVAR
) {
3760 if (ainfo
->regtype
== RegTypeGeneral
)
3761 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
3762 else if (ainfo
->regtype
== RegTypeFP
) {
3763 g_assert_not_reached ();
3764 } else if (ainfo
->regtype
== RegTypeBase
) {
3765 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
3766 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
3768 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3769 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
3772 g_assert_not_reached ();
3774 if (cfg
->verbose_level
> 2)
3775 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
3777 /* the argument should be put on the stack: FIXME handle size != word */
3778 if (ainfo
->regtype
== RegTypeGeneral
) {
3779 switch (ainfo
->size
) {
3781 if (arm_is_imm12 (inst
->inst_offset
))
3782 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3784 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3785 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
3789 if (arm_is_imm8 (inst
->inst_offset
)) {
3790 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3792 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3793 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
3797 g_assert (arm_is_imm12 (inst
->inst_offset
));
3798 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3799 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
3800 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
3803 if (arm_is_imm12 (inst
->inst_offset
)) {
3804 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3806 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3807 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
3811 } else if (ainfo
->regtype
== RegTypeBaseGen
) {
3812 g_assert (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
));
3813 g_assert (arm_is_imm12 (inst
->inst_offset
));
3814 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
3815 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
3816 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
3817 } else if (ainfo
->regtype
== RegTypeBase
) {
3818 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
3819 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
3821 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
3822 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
3825 switch (ainfo
->size
) {
3827 if (arm_is_imm8 (inst
->inst_offset
)) {
3828 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
3830 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3831 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
3835 if (arm_is_imm8 (inst
->inst_offset
)) {
3836 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
3838 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3839 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
3843 if (arm_is_imm12 (inst
->inst_offset
)) {
3844 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
3846 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3847 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
3849 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
3850 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
3852 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
3853 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
3855 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
3856 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
3858 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
3859 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
3863 if (arm_is_imm12 (inst
->inst_offset
)) {
3864 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
3866 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3867 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
3871 } else if (ainfo
->regtype
== RegTypeFP
) {
3872 g_assert_not_reached ();
3873 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
3874 int doffset
= inst
->inst_offset
;
3878 size
= mini_type_stack_size_full (cfg
->generic_sharing_context
, inst
->inst_vtype
, NULL
, sig
->pinvoke
);
3879 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
3880 if (arm_is_imm12 (doffset
)) {
3881 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
3883 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
3884 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
3886 soffset
+= sizeof (gpointer
);
3887 doffset
+= sizeof (gpointer
);
3889 if (ainfo
->vtsize
) {
3890 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3891 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3892 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
3894 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
3895 g_assert_not_reached ();
3896 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3897 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, inst
->inst_offset
, ainfo
->reg
, 0);
3899 g_assert_not_reached ();
3904 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
3905 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->domain
);
3906 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3907 (gpointer
)"mono_jit_thread_attach");
3908 code
= emit_call_seq (cfg
, code
);
3911 if (method
->save_lmf
) {
3912 gboolean get_lmf_fast
= FALSE
;
3914 #ifdef HAVE_AEABI_READ_TP
3915 gint32 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
3917 if (lmf_addr_tls_offset
!= -1) {
3918 get_lmf_fast
= TRUE
;
3920 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3921 (gpointer
)"__aeabi_read_tp");
3922 code
= emit_call_seq (cfg
, code
);
3924 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, lmf_addr_tls_offset
);
3925 get_lmf_fast
= TRUE
;
3928 if (!get_lmf_fast
) {
3929 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3930 (gpointer
)"mono_get_lmf_addr");
3931 code
= emit_call_seq (cfg
, code
);
3933 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3934 /* lmf_offset is the offset from the previous stack pointer,
3935 * alloc_size is the total stack space allocated, so the offset
3936 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3937 * The pointer to the struct is put in r1 (new_lmf).
3938 * r2 is used as scratch
3939 * The callee-saved registers are already in the MonoLMF structure
3941 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, alloc_size
- lmf_offset
);
3942 /* r0 is the result from mono_get_lmf_addr () */
3943 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
3944 /* new_lmf->previous_lmf = *lmf_addr */
3945 ARM_LDR_IMM (code
, ARMREG_R2
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
3946 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
3947 /* *(lmf_addr) = r1 */
3948 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
3949 /* Skip method (only needed for trampoline LMF frames) */
3950 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, ebp
));
3951 /* save the current IP */
3952 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
3953 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, eip
));
3957 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
3959 cfg
->code_len
= code
- cfg
->native_code
;
3960 g_assert (cfg
->code_len
< cfg
->code_size
);
3967 mono_arch_emit_epilog (MonoCompile
*cfg
)
3969 MonoMethod
*method
= cfg
->method
;
3970 int pos
, i
, rot_amount
;
3971 int max_epilog_size
= 16 + 20*4;
3974 if (cfg
->method
->save_lmf
)
3975 max_epilog_size
+= 128;
3977 if (mono_jit_trace_calls
!= NULL
)
3978 max_epilog_size
+= 50;
3980 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
3981 max_epilog_size
+= 50;
3983 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
3984 cfg
->code_size
*= 2;
3985 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3986 mono_jit_stats
.code_reallocs
++;
3990 * Keep in sync with OP_JMP
3992 code
= cfg
->native_code
+ cfg
->code_len
;
3994 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
)) {
3995 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
3999 if (method
->save_lmf
) {
4001 /* all but r0-r3, sp and pc */
4002 pos
+= sizeof (MonoLMF
) - (4 * 10);
4004 /* r2 contains the pointer to the current LMF */
4005 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
);
4006 /* ip = previous_lmf */
4007 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4009 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
4010 /* *(lmf_addr) = previous_lmf */
4011 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4012 /* FIXME: speedup: there is no actual need to restore the registers if
4013 * we didn't actually change them (idea from Zoltan).
4016 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4017 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_R2
, (sizeof (MonoLMF
) - 10 * sizeof (gulong
)));
4018 ARM_POP_NWB (code
, 0xaff0); /* restore ip to sp and lr to pc */
4020 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
4021 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
4023 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
4024 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
4026 /* FIXME: add v4 thumb interworking support */
4027 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
) | (1 << ARMREG_PC
)));
4030 cfg
->code_len
= code
- cfg
->native_code
;
4032 g_assert (cfg
->code_len
< cfg
->code_size
);
4036 /* remove once throw_exception_by_name is eliminated */
4038 exception_id_by_name (const char *name
)
4040 if (strcmp (name
, "IndexOutOfRangeException") == 0)
4041 return MONO_EXC_INDEX_OUT_OF_RANGE
;
4042 if (strcmp (name
, "OverflowException") == 0)
4043 return MONO_EXC_OVERFLOW
;
4044 if (strcmp (name
, "ArithmeticException") == 0)
4045 return MONO_EXC_ARITHMETIC
;
4046 if (strcmp (name
, "DivideByZeroException") == 0)
4047 return MONO_EXC_DIVIDE_BY_ZERO
;
4048 if (strcmp (name
, "InvalidCastException") == 0)
4049 return MONO_EXC_INVALID_CAST
;
4050 if (strcmp (name
, "NullReferenceException") == 0)
4051 return MONO_EXC_NULL_REF
;
4052 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
4053 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
4054 g_error ("Unknown intrinsic exception %s\n", name
);
4059 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4061 MonoJumpInfo
*patch_info
;
4064 const guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
] = {NULL
};
4065 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
] = {0};
4066 int max_epilog_size
= 50;
4068 /* count the number of exception infos */
4071 * make sure we have enough space for exceptions
4073 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4074 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
4075 i
= exception_id_by_name (patch_info
->data
.target
);
4076 if (!exc_throw_found
[i
]) {
4077 max_epilog_size
+= 32;
4078 exc_throw_found
[i
] = TRUE
;
4083 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4084 cfg
->code_size
*= 2;
4085 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4086 mono_jit_stats
.code_reallocs
++;
4089 code
= cfg
->native_code
+ cfg
->code_len
;
4091 /* add code to raise exceptions */
4092 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4093 switch (patch_info
->type
) {
4094 case MONO_PATCH_INFO_EXC
: {
4095 MonoClass
*exc_class
;
4096 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
4098 i
= exception_id_by_name (patch_info
->data
.target
);
4099 if (exc_throw_pos
[i
]) {
4100 arm_patch (ip
, exc_throw_pos
[i
]);
4101 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4104 exc_throw_pos
[i
] = code
;
4106 arm_patch (ip
, code
);
4108 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4109 g_assert (exc_class
);
4111 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
4112 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
4113 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4114 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
4115 patch_info
->ip
.i
= code
- cfg
->native_code
;
4117 *(guint32
*)(gpointer
)code
= exc_class
->type_token
;
4127 cfg
->code_len
= code
- cfg
->native_code
;
4129 g_assert (cfg
->code_len
< cfg
->code_size
);
4133 static gboolean tls_offset_inited
= FALSE
;
4136 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
4138 if (!tls_offset_inited
) {
4139 tls_offset_inited
= TRUE
;
4141 lmf_tls_offset
= mono_get_lmf_tls_offset ();
4142 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
4147 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
4152 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4159 mono_arch_print_tree (MonoInst
*tree
, int arity
)
4165 mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
4167 return mono_get_domain_intrinsic (cfg
);
4171 mono_arch_get_thread_intrinsic (MonoCompile
* cfg
)
4173 return mono_get_thread_intrinsic (cfg
);
4177 mono_arch_get_patch_offset (guint8
*code
)
4184 mono_arch_flush_register_windows (void)
4189 mono_arch_fixup_jinfo (MonoCompile
*cfg
)
4193 #ifdef MONO_ARCH_HAVE_IMT
4196 mono_arch_emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
4198 if (cfg
->compile_aot
) {
4199 int method_reg
= mono_alloc_ireg (cfg
);
4202 call
->dynamic_imt_arg
= TRUE
;
4204 MONO_INST_NEW (cfg
, ins
, OP_AOTCONST
);
4205 ins
->dreg
= method_reg
;
4206 ins
->inst_p0
= call
->method
;
4207 ins
->inst_c1
= MONO_PATCH_INFO_METHODCONST
;
4208 MONO_ADD_INS (cfg
->cbb
, ins
);
4210 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
4211 } else if (cfg
->generic_context
) {
4213 /* Always pass in a register for simplicity */
4214 call
->dynamic_imt_arg
= TRUE
;
4216 cfg
->uses_rgctx_reg
= TRUE
;
4219 mono_call_inst_add_outarg_reg (cfg
, call
, imt_arg
->dreg
, ARMREG_V5
, FALSE
);
4222 int method_reg
= mono_alloc_preg (cfg
);
4224 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
4225 ins
->inst_p0
= call
->method
;
4226 ins
->dreg
= method_reg
;
4227 MONO_ADD_INS (cfg
->cbb
, ins
);
4229 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
4235 mono_arch_find_imt_method (gpointer
*regs
, guint8
*code
)
4237 guint32
*code_ptr
= (guint32
*)code
;
4239 /* The IMT value is stored in the code stream right after the LDC instruction. */
4240 if (!IS_LDR_PC (code_ptr
[0])) {
4241 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__
, code
, code_ptr
[2], code_ptr
[1], code_ptr
[0]);
4242 g_assert (IS_LDR_PC (code_ptr
[0]));
4244 if (code_ptr
[1] == 0)
4245 /* This is AOTed code, the IMT method is in V5 */
4246 return (MonoMethod
*)regs
[ARMREG_V5
];
4248 return (MonoMethod
*) code_ptr
[1];
4252 mono_arch_find_this_argument (gpointer
*regs
, MonoMethod
*method
, MonoGenericSharingContext
*gsctx
)
4254 return mono_arch_get_this_arg_from_call (gsctx
, mono_method_signature (method
), (gssize
*)regs
, NULL
);
4258 mono_arch_find_static_call_vtable (gpointer
*regs
, guint8
*code
)
4260 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
4263 #define ENABLE_WRONG_METHOD_CHECK 0
4264 #define BASE_SIZE (6 * 4)
4265 #define BSEARCH_ENTRY_SIZE (4 * 4)
4266 #define CMP_SIZE (3 * 4)
4267 #define BRANCH_SIZE (1 * 4)
4268 #define CALL_SIZE (2 * 4)
4269 #define WMC_SIZE (5 * 4)
4270 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4273 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
4275 guint32 delta
= DISTANCE (target
, code
);
4277 g_assert (delta
>= 0 && delta
<= 0xFFF);
4278 *target
= *target
| delta
;
4284 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
4285 gpointer fail_tramp
)
4287 int size
, i
, extra_space
= 0;
4288 arminstr_t
*code
, *start
, *vtable_target
= NULL
;
4289 gboolean large_offsets
= FALSE
;
4290 guint32
**constant_pool_starts
;
4293 constant_pool_starts
= g_new0 (guint32
*, count
);
4295 g_assert (!fail_tramp
);
4297 for (i
= 0; i
< count
; ++i
) {
4298 MonoIMTCheckItem
*item
= imt_entries
[i
];
4299 if (item
->is_equals
) {
4300 if (!arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
4301 item
->chunk_size
+= 32;
4302 large_offsets
= TRUE
;
4305 if (item
->check_target_idx
) {
4306 if (!item
->compare_done
)
4307 item
->chunk_size
+= CMP_SIZE
;
4308 item
->chunk_size
+= BRANCH_SIZE
;
4310 #if ENABLE_WRONG_METHOD_CHECK
4311 item
->chunk_size
+= WMC_SIZE
;
4314 item
->chunk_size
+= CALL_SIZE
;
4316 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
4317 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
4319 size
+= item
->chunk_size
;
4323 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
4325 start
= code
= mono_domain_code_reserve (domain
, size
);
4328 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable
->klass
->name_space
, vtable
->klass
->name
, count
, size
, start
, ((guint8
*)start
) + size
, vtable
);
4329 for (i
= 0; i
< count
; ++i
) {
4330 MonoIMTCheckItem
*item
= imt_entries
[i
];
4331 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, item
->key
->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
4336 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
4338 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
4339 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
4340 vtable_target
= code
;
4341 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
4343 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4344 ARM_CMP_REG_IMM8 (code
, ARMREG_R0
, 0);
4345 ARM_MOV_REG_REG_COND (code
, ARMREG_R0
, ARMREG_V5
, ARMCOND_EQ
);
4347 for (i
= 0; i
< count
; ++i
) {
4348 MonoIMTCheckItem
*item
= imt_entries
[i
];
4349 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
;
4350 gint32 vtable_offset
;
4352 item
->code_target
= (guint8
*)code
;
4354 if (item
->is_equals
) {
4355 if (item
->check_target_idx
) {
4356 if (!item
->compare_done
) {
4358 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
4359 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
4361 item
->jmp_code
= (guint8
*)code
;
4362 ARM_B_COND (code
, ARMCOND_NE
, 0);
4364 /*Enable the commented code to assert on wrong method*/
4365 #if ENABLE_WRONG_METHOD_CHECK
4367 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
4368 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
4369 ARM_B_COND (code
, ARMCOND_NE
, 1);
4375 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
4376 if (!arm_is_imm12 (vtable_offset
)) {
4378 * We need to branch to a computed address but we don't have
4379 * a free register to store it, since IP must contain the
4380 * vtable address. So we push the two values to the stack, and
4381 * load them both using LDM.
4383 /* Compute target address */
4384 vtable_offset_ins
= code
;
4385 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
4386 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
4387 /* Save it to the fourth slot */
4388 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
4389 /* Restore registers and branch */
4390 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
4392 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
4394 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
4396 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (gpointer
));
4397 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
4401 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)item
->key
);
4403 /*must emit after unconditional branch*/
4404 if (vtable_target
) {
4405 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)vtable
);
4406 item
->chunk_size
+= 4;
4407 vtable_target
= NULL
;
4410 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4411 constant_pool_starts
[i
] = code
;
4413 code
+= extra_space
;
4417 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
4418 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
4420 item
->jmp_code
= (guint8
*)code
;
4421 ARM_B_COND (code
, ARMCOND_GE
, 0);
4426 for (i
= 0; i
< count
; ++i
) {
4427 MonoIMTCheckItem
*item
= imt_entries
[i
];
4428 if (item
->jmp_code
) {
4429 if (item
->check_target_idx
)
4430 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
4432 if (i
> 0 && item
->is_equals
) {
4434 arminstr_t
*space_start
= constant_pool_starts
[i
];
4435 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
4436 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)imt_entries
[j
]->key
);
4443 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
4444 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
4449 g_free (constant_pool_starts
);
4451 mono_arch_flush_icache ((guint8
*)start
, size
);
4452 mono_stats
.imt_thunks_size
+= code
- start
;
4454 g_assert (DISTANCE (start
, code
) <= size
);
4461 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
4463 if (reg
>= 4 && reg
<= 11)
4464 return (gpointer
)ctx
->regs
[reg
- 4];
4465 else if (reg
== ARMREG_IP
)
4466 return (gpointer
)ctx
->regs
[8];
4467 else if (reg
== ARMREG_LR
)
4468 return (gpointer
)ctx
->regs
[9];
4470 g_assert_not_reached ();