2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode
);
102 static int stind_to_store_membase (int opcode
);
104 int mono_op_to_op_imm (int opcode
);
105 int mono_op_to_op_imm_noemul (int opcode
);
107 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
108 void mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
);
109 void mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
);
111 /* helper methods signature */
112 extern MonoMethodSignature
*helper_sig_class_init_trampoline
;
113 extern MonoMethodSignature
*helper_sig_domain_get
;
114 extern MonoMethodSignature
*helper_sig_generic_class_init_trampoline
;
115 extern MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
;
116 extern MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
;
119 * Instruction metadata
124 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
130 #if SIZEOF_REGISTER == 8
135 /* keep in sync with the enum in mini.h */
138 #include "mini-ops.h"
142 extern GHashTable
*jit_icall_name_hash
;
144 #define MONO_INIT_VARINFO(vi,id) do { \
145 (vi)->range.first_use.pos.bid = 0xffff; \
151 mono_alloc_ireg (MonoCompile
*cfg
)
153 return alloc_ireg (cfg
);
157 mono_alloc_freg (MonoCompile
*cfg
)
159 return alloc_freg (cfg
);
163 mono_alloc_preg (MonoCompile
*cfg
)
165 return alloc_preg (cfg
);
169 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
171 return alloc_dreg (cfg
, stack_type
);
175 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
181 switch (type
->type
) {
184 case MONO_TYPE_BOOLEAN
:
196 case MONO_TYPE_FNPTR
:
198 case MONO_TYPE_CLASS
:
199 case MONO_TYPE_STRING
:
200 case MONO_TYPE_OBJECT
:
201 case MONO_TYPE_SZARRAY
:
202 case MONO_TYPE_ARRAY
:
206 #if SIZEOF_REGISTER == 8
215 case MONO_TYPE_VALUETYPE
:
216 if (type
->data
.klass
->enumtype
) {
217 type
= mono_class_enum_basetype (type
->data
.klass
);
220 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
223 case MONO_TYPE_TYPEDBYREF
:
225 case MONO_TYPE_GENERICINST
:
226 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
230 g_assert (cfg
->generic_sharing_context
);
233 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
239 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
244 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
245 for (i
= 0; i
< bb
->in_count
; ++i
)
246 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
248 for (i
= 0; i
< bb
->out_count
; ++i
)
249 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
251 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
252 mono_print_ins_index (-1, tree
);
256 * Can't put this at the beginning, since other files reference stuff from this
261 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
263 #define GET_BBLOCK(cfg,tblock,ip) do { \
264 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
266 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
267 NEW_BBLOCK (cfg, (tblock)); \
268 (tblock)->cil_code = (ip); \
269 ADD_BBLOCK (cfg, (tblock)); \
273 #if defined(__i386__) || defined(__x86_64__)
274 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
275 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
276 (dest)->dreg = alloc_preg ((cfg)); \
277 (dest)->sreg1 = (sr1); \
278 (dest)->sreg2 = (sr2); \
279 (dest)->inst_imm = (imm); \
280 (dest)->backend.shift_amount = (shift); \
281 MONO_ADD_INS ((cfg)->cbb, (dest)); \
285 #if SIZEOF_REGISTER == 8
286 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
287 /* FIXME: Need to add many more cases */ \
288 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
290 int dr = alloc_preg (cfg); \
291 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
292 (ins)->sreg2 = widen->dreg; \
296 #define ADD_WIDEN_OP(ins, arg1, arg2)
299 #define ADD_BINOP(op) do { \
300 MONO_INST_NEW (cfg, ins, (op)); \
302 ins->sreg1 = sp [0]->dreg; \
303 ins->sreg2 = sp [1]->dreg; \
304 type_from_op (ins, sp [0], sp [1]); \
306 /* Have to insert a widening op */ \
307 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
308 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
309 MONO_ADD_INS ((cfg)->cbb, (ins)); \
311 mono_decompose_opcode ((cfg), (ins)); \
314 #define ADD_UNOP(op) do { \
315 MONO_INST_NEW (cfg, ins, (op)); \
317 ins->sreg1 = sp [0]->dreg; \
318 type_from_op (ins, sp [0], NULL); \
320 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
321 MONO_ADD_INS ((cfg)->cbb, (ins)); \
323 mono_decompose_opcode (cfg, ins); \
326 #define ADD_BINCOND(next_block) do { \
329 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
330 cmp->sreg1 = sp [0]->dreg; \
331 cmp->sreg2 = sp [1]->dreg; \
332 type_from_op (cmp, sp [0], sp [1]); \
334 type_from_op (ins, sp [0], sp [1]); \
335 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
336 GET_BBLOCK (cfg, tblock, target); \
337 link_bblock (cfg, bblock, tblock); \
338 ins->inst_true_bb = tblock; \
339 if ((next_block)) { \
340 link_bblock (cfg, bblock, (next_block)); \
341 ins->inst_false_bb = (next_block); \
342 start_new_bblock = 1; \
344 GET_BBLOCK (cfg, tblock, ip); \
345 link_bblock (cfg, bblock, tblock); \
346 ins->inst_false_bb = tblock; \
347 start_new_bblock = 2; \
349 if (sp != stack_start) { \
350 handle_stack_args (cfg, stack_start, sp - stack_start); \
351 CHECK_UNVERIFIABLE (cfg); \
353 MONO_ADD_INS (bblock, cmp); \
354 MONO_ADD_INS (bblock, ins); \
358 * link_bblock: Links two basic blocks
360 * links two basic blocks in the control flow graph, the 'from'
361 * argument is the starting block and the 'to' argument is the block
362 * the control flow ends to after 'from'.
365 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
367 MonoBasicBlock
**newa
;
371 if (from
->cil_code
) {
373 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
375 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
378 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
380 printf ("edge from entry to exit\n");
385 for (i
= 0; i
< from
->out_count
; ++i
) {
386 if (to
== from
->out_bb
[i
]) {
392 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
393 for (i
= 0; i
< from
->out_count
; ++i
) {
394 newa
[i
] = from
->out_bb
[i
];
402 for (i
= 0; i
< to
->in_count
; ++i
) {
403 if (from
== to
->in_bb
[i
]) {
409 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
410 for (i
= 0; i
< to
->in_count
; ++i
) {
411 newa
[i
] = to
->in_bb
[i
];
420 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
422 link_bblock (cfg
, from
, to
);
426 * mono_find_block_region:
428 * We mark each basic block with a region ID. We use that to avoid BB
429 * optimizations when blocks are in different regions.
432 * A region token that encodes where this region is, and information
433 * about the clause owner for this block.
435 * The region encodes the try/catch/filter clause that owns this block
436 * as well as the type. -1 is a special value that represents a block
437 * that is in none of try/catch/filter.
440 mono_find_block_region (MonoCompile
*cfg
, int offset
)
442 MonoMethod
*method
= cfg
->method
;
443 MonoMethodHeader
*header
= mono_method_get_header (method
);
444 MonoExceptionClause
*clause
;
447 /* first search for handlers and filters */
448 for (i
= 0; i
< header
->num_clauses
; ++i
) {
449 clause
= &header
->clauses
[i
];
450 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
451 (offset
< (clause
->handler_offset
)))
452 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
454 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
455 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
456 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
457 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
458 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
460 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
464 /* search the try blocks */
465 for (i
= 0; i
< header
->num_clauses
; ++i
) {
466 clause
= &header
->clauses
[i
];
467 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
468 return ((i
+ 1) << 8) | clause
->flags
;
475 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
477 MonoMethod
*method
= cfg
->method
;
478 MonoMethodHeader
*header
= mono_method_get_header (method
);
479 MonoExceptionClause
*clause
;
480 MonoBasicBlock
*handler
;
484 for (i
= 0; i
< header
->num_clauses
; ++i
) {
485 clause
= &header
->clauses
[i
];
486 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
487 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
488 if (clause
->flags
== type
) {
489 handler
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
491 res
= g_list_append (res
, handler
);
499 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
503 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
507 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
508 /* prevent it from being register allocated */
509 var
->flags
|= MONO_INST_INDIRECT
;
511 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
515 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
517 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
521 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
525 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
529 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
530 /* prevent it from being register allocated */
531 var
->flags
|= MONO_INST_INDIRECT
;
533 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
539 * Returns the type used in the eval stack when @type is loaded.
540 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
543 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
547 inst
->klass
= klass
= mono_class_from_mono_type (type
);
549 inst
->type
= STACK_MP
;
554 switch (type
->type
) {
556 inst
->type
= STACK_INV
;
560 case MONO_TYPE_BOOLEAN
:
566 inst
->type
= STACK_I4
;
571 case MONO_TYPE_FNPTR
:
572 inst
->type
= STACK_PTR
;
574 case MONO_TYPE_CLASS
:
575 case MONO_TYPE_STRING
:
576 case MONO_TYPE_OBJECT
:
577 case MONO_TYPE_SZARRAY
:
578 case MONO_TYPE_ARRAY
:
579 inst
->type
= STACK_OBJ
;
583 inst
->type
= STACK_I8
;
587 inst
->type
= STACK_R8
;
589 case MONO_TYPE_VALUETYPE
:
590 if (type
->data
.klass
->enumtype
) {
591 type
= mono_class_enum_basetype (type
->data
.klass
);
595 inst
->type
= STACK_VTYPE
;
598 case MONO_TYPE_TYPEDBYREF
:
599 inst
->klass
= mono_defaults
.typed_reference_class
;
600 inst
->type
= STACK_VTYPE
;
602 case MONO_TYPE_GENERICINST
:
603 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
606 case MONO_TYPE_MVAR
:
607 /* FIXME: all the arguments must be references for now,
608 * later look inside cfg and see if the arg num is
611 g_assert (cfg
->generic_sharing_context
);
612 inst
->type
= STACK_OBJ
;
615 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
620 * The following tables are used to quickly validate the IL code in type_from_op ().
623 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
624 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
625 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
626 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
627 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
628 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
629 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
630 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
631 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
636 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
639 /* reduce the size of this table */
641 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
642 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
643 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
644 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
645 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
646 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
647 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
648 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
649 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
653 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
654 /* Inv i L p F & O vt */
656 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
657 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
658 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
659 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
660 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
661 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
662 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
665 /* reduce the size of this table */
667 shift_table
[STACK_MAX
] [STACK_MAX
] = {
668 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
669 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
670 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
671 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
672 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
673 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
674 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
675 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
679 * Tables to map from the non-specific opcode to the matching
680 * type-specific opcode.
682 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
684 binops_op_map
[STACK_MAX
] = {
685 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
688 /* handles from CEE_NEG to CEE_CONV_U8 */
690 unops_op_map
[STACK_MAX
] = {
691 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
694 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
696 ovfops_op_map
[STACK_MAX
] = {
697 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
700 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
702 ovf2ops_op_map
[STACK_MAX
] = {
703 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
706 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
708 ovf3ops_op_map
[STACK_MAX
] = {
709 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
712 /* handles from CEE_BEQ to CEE_BLT_UN */
714 beqops_op_map
[STACK_MAX
] = {
715 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
718 /* handles from CEE_CEQ to CEE_CLT_UN */
720 ceqops_op_map
[STACK_MAX
] = {
721 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
725 * Sets ins->type (the type on the eval stack) according to the
726 * type of the opcode and the arguments to it.
727 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
729 * FIXME: this function sets ins->type unconditionally in some cases, but
730 * it should set it to invalid for some types (a conv.x on an object)
733 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
735 switch (ins
->opcode
) {
742 /* FIXME: check unverifiable args for STACK_MP */
743 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
744 ins
->opcode
+= binops_op_map
[ins
->type
];
751 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
752 ins
->opcode
+= binops_op_map
[ins
->type
];
757 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
758 ins
->opcode
+= binops_op_map
[ins
->type
];
763 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
764 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
765 ins
->opcode
= OP_LCOMPARE
;
766 else if (src1
->type
== STACK_R8
)
767 ins
->opcode
= OP_FCOMPARE
;
769 ins
->opcode
= OP_ICOMPARE
;
771 case OP_ICOMPARE_IMM
:
772 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
773 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
774 ins
->opcode
= OP_LCOMPARE_IMM
;
786 ins
->opcode
+= beqops_op_map
[src1
->type
];
789 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
790 ins
->opcode
+= ceqops_op_map
[src1
->type
];
796 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
797 ins
->opcode
+= ceqops_op_map
[src1
->type
];
801 ins
->type
= neg_table
[src1
->type
];
802 ins
->opcode
+= unops_op_map
[ins
->type
];
805 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
806 ins
->type
= src1
->type
;
808 ins
->type
= STACK_INV
;
809 ins
->opcode
+= unops_op_map
[ins
->type
];
815 ins
->type
= STACK_I4
;
816 ins
->opcode
+= unops_op_map
[src1
->type
];
819 ins
->type
= STACK_R8
;
820 switch (src1
->type
) {
823 ins
->opcode
= OP_ICONV_TO_R_UN
;
826 ins
->opcode
= OP_LCONV_TO_R_UN
;
830 case CEE_CONV_OVF_I1
:
831 case CEE_CONV_OVF_U1
:
832 case CEE_CONV_OVF_I2
:
833 case CEE_CONV_OVF_U2
:
834 case CEE_CONV_OVF_I4
:
835 case CEE_CONV_OVF_U4
:
836 ins
->type
= STACK_I4
;
837 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
839 case CEE_CONV_OVF_I_UN
:
840 case CEE_CONV_OVF_U_UN
:
841 ins
->type
= STACK_PTR
;
842 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
844 case CEE_CONV_OVF_I1_UN
:
845 case CEE_CONV_OVF_I2_UN
:
846 case CEE_CONV_OVF_I4_UN
:
847 case CEE_CONV_OVF_U1_UN
:
848 case CEE_CONV_OVF_U2_UN
:
849 case CEE_CONV_OVF_U4_UN
:
850 ins
->type
= STACK_I4
;
851 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
854 ins
->type
= STACK_PTR
;
855 switch (src1
->type
) {
857 ins
->opcode
= OP_ICONV_TO_U
;
861 #if SIZEOF_REGISTER == 8
862 ins
->opcode
= OP_LCONV_TO_U
;
864 ins
->opcode
= OP_MOVE
;
868 ins
->opcode
= OP_LCONV_TO_U
;
871 ins
->opcode
= OP_FCONV_TO_U
;
877 ins
->type
= STACK_I8
;
878 ins
->opcode
+= unops_op_map
[src1
->type
];
880 case CEE_CONV_OVF_I8
:
881 case CEE_CONV_OVF_U8
:
882 ins
->type
= STACK_I8
;
883 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
885 case CEE_CONV_OVF_U8_UN
:
886 case CEE_CONV_OVF_I8_UN
:
887 ins
->type
= STACK_I8
;
888 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
892 ins
->type
= STACK_R8
;
893 ins
->opcode
+= unops_op_map
[src1
->type
];
896 ins
->type
= STACK_R8
;
900 ins
->type
= STACK_I4
;
901 ins
->opcode
+= ovfops_op_map
[src1
->type
];
906 ins
->type
= STACK_PTR
;
907 ins
->opcode
+= ovfops_op_map
[src1
->type
];
915 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
916 ins
->opcode
+= ovfops_op_map
[src1
->type
];
917 if (ins
->type
== STACK_R8
)
918 ins
->type
= STACK_INV
;
920 case OP_LOAD_MEMBASE
:
921 ins
->type
= STACK_PTR
;
923 case OP_LOADI1_MEMBASE
:
924 case OP_LOADU1_MEMBASE
:
925 case OP_LOADI2_MEMBASE
:
926 case OP_LOADU2_MEMBASE
:
927 case OP_LOADI4_MEMBASE
:
928 case OP_LOADU4_MEMBASE
:
929 ins
->type
= STACK_PTR
;
931 case OP_LOADI8_MEMBASE
:
932 ins
->type
= STACK_I8
;
934 case OP_LOADR4_MEMBASE
:
935 case OP_LOADR8_MEMBASE
:
936 ins
->type
= STACK_R8
;
939 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
943 if (ins
->type
== STACK_MP
)
944 ins
->klass
= mono_defaults
.object_class
;
949 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
955 param_table
[STACK_MAX
] [STACK_MAX
] = {
960 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
964 switch (args
->type
) {
974 for (i
= 0; i
< sig
->param_count
; ++i
) {
975 switch (args
[i
].type
) {
979 if (!sig
->params
[i
]->byref
)
983 if (sig
->params
[i
]->byref
)
985 switch (sig
->params
[i
]->type
) {
986 case MONO_TYPE_CLASS
:
987 case MONO_TYPE_STRING
:
988 case MONO_TYPE_OBJECT
:
989 case MONO_TYPE_SZARRAY
:
990 case MONO_TYPE_ARRAY
:
997 if (sig
->params
[i
]->byref
)
999 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1008 /*if (!param_table [args [i].type] [sig->params [i]->type])
1016 * When we need a pointer to the current domain many times in a method, we
1017 * call mono_domain_get() once and we store the result in a local variable.
1018 * This function returns the variable that represents the MonoDomain*.
1020 inline static MonoInst
*
1021 mono_get_domainvar (MonoCompile
*cfg
)
1023 if (!cfg
->domainvar
)
1024 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1025 return cfg
->domainvar
;
1029 * The got_var contains the address of the Global Offset Table when AOT
1032 inline static MonoInst
*
1033 mono_get_got_var (MonoCompile
*cfg
)
1035 #ifdef MONO_ARCH_NEED_GOT_VAR
1036 if (!cfg
->compile_aot
)
1038 if (!cfg
->got_var
) {
1039 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1041 return cfg
->got_var
;
1048 mono_get_vtable_var (MonoCompile
*cfg
)
1050 g_assert (cfg
->generic_sharing_context
);
1052 if (!cfg
->rgctx_var
) {
1053 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1054 /* force the var to be stack allocated */
1055 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1058 return cfg
->rgctx_var
;
1062 type_from_stack_type (MonoInst
*ins
) {
1063 switch (ins
->type
) {
1064 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1065 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1066 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1067 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1069 return &ins
->klass
->this_arg
;
1070 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1071 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1073 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1078 static G_GNUC_UNUSED
int
1079 type_to_stack_type (MonoType
*t
)
1081 switch (mono_type_get_underlying_type (t
)->type
) {
1084 case MONO_TYPE_BOOLEAN
:
1087 case MONO_TYPE_CHAR
:
1094 case MONO_TYPE_FNPTR
:
1096 case MONO_TYPE_CLASS
:
1097 case MONO_TYPE_STRING
:
1098 case MONO_TYPE_OBJECT
:
1099 case MONO_TYPE_SZARRAY
:
1100 case MONO_TYPE_ARRAY
:
1108 case MONO_TYPE_VALUETYPE
:
1109 case MONO_TYPE_TYPEDBYREF
:
1111 case MONO_TYPE_GENERICINST
:
1112 if (mono_type_generic_inst_is_valuetype (t
))
1118 g_assert_not_reached ();
1125 array_access_to_klass (int opcode
)
1129 return mono_defaults
.byte_class
;
1131 return mono_defaults
.uint16_class
;
1134 return mono_defaults
.int_class
;
1137 return mono_defaults
.sbyte_class
;
1140 return mono_defaults
.int16_class
;
1143 return mono_defaults
.int32_class
;
1145 return mono_defaults
.uint32_class
;
1148 return mono_defaults
.int64_class
;
1151 return mono_defaults
.single_class
;
1154 return mono_defaults
.double_class
;
1155 case CEE_LDELEM_REF
:
1156 case CEE_STELEM_REF
:
1157 return mono_defaults
.object_class
;
1159 g_assert_not_reached ();
1165 * We try to share variables when possible
1168 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1173 /* inlining can result in deeper stacks */
1174 if (slot
>= mono_method_get_header (cfg
->method
)->max_stack
)
1175 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1177 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1179 switch (ins
->type
) {
1186 if ((vnum
= cfg
->intvars
[pos
]))
1187 return cfg
->varinfo
[vnum
];
1188 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1189 cfg
->intvars
[pos
] = res
->inst_c0
;
1192 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1198 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1201 * Don't use this if a generic_context is set, since that means AOT can't
1202 * look up the method using just the image+token.
1203 * table == 0 means this is a reference made from a wrapper.
1205 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1206 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1207 jump_info_token
->image
= image
;
1208 jump_info_token
->token
= token
;
1209 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1214 * This function is called to handle items that are left on the evaluation stack
1215 * at basic block boundaries. What happens is that we save the values to local variables
1216 * and we reload them later when first entering the target basic block (with the
1217 * handle_loaded_temps () function).
1218 * A single joint point will use the same variables (stored in the array bb->out_stack or
1219 * bb->in_stack, if the basic block is before or after the joint point).
1221 * This function needs to be called _before_ emitting the last instruction of
1222 * the bb (i.e. before emitting a branch).
1223 * If the stack merge fails at a join point, cfg->unverifiable is set.
1226 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1229 MonoBasicBlock
*bb
= cfg
->cbb
;
1230 MonoBasicBlock
*outb
;
1231 MonoInst
*inst
, **locals
;
1236 if (cfg
->verbose_level
> 3)
1237 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1238 if (!bb
->out_scount
) {
1239 bb
->out_scount
= count
;
1240 //printf ("bblock %d has out:", bb->block_num);
1242 for (i
= 0; i
< bb
->out_count
; ++i
) {
1243 outb
= bb
->out_bb
[i
];
1244 /* exception handlers are linked, but they should not be considered for stack args */
1245 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1247 //printf (" %d", outb->block_num);
1248 if (outb
->in_stack
) {
1250 bb
->out_stack
= outb
->in_stack
;
1256 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1257 for (i
= 0; i
< count
; ++i
) {
1259 * try to reuse temps already allocated for this purpouse, if they occupy the same
1260 * stack slot and if they are of the same type.
1261 * This won't cause conflicts since if 'local' is used to
1262 * store one of the values in the in_stack of a bblock, then
1263 * the same variable will be used for the same outgoing stack
1265 * This doesn't work when inlining methods, since the bblocks
1266 * in the inlined methods do not inherit their in_stack from
1267 * the bblock they are inlined to. See bug #58863 for an
1270 if (cfg
->inlined_method
)
1271 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1273 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1278 for (i
= 0; i
< bb
->out_count
; ++i
) {
1279 outb
= bb
->out_bb
[i
];
1280 /* exception handlers are linked, but they should not be considered for stack args */
1281 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1283 if (outb
->in_scount
) {
1284 if (outb
->in_scount
!= bb
->out_scount
) {
1285 cfg
->unverifiable
= TRUE
;
1288 continue; /* check they are the same locals */
1290 outb
->in_scount
= count
;
1291 outb
->in_stack
= bb
->out_stack
;
1294 locals
= bb
->out_stack
;
1296 for (i
= 0; i
< count
; ++i
) {
1297 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1298 inst
->cil_code
= sp
[i
]->cil_code
;
1299 sp
[i
] = locals
[i
];
1300 if (cfg
->verbose_level
> 3)
1301 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1305 * It is possible that the out bblocks already have in_stack assigned, and
1306 * the in_stacks differ. In this case, we will store to all the different
1313 /* Find a bblock which has a different in_stack */
1315 while (bindex
< bb
->out_count
) {
1316 outb
= bb
->out_bb
[bindex
];
1317 /* exception handlers are linked, but they should not be considered for stack args */
1318 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1322 if (outb
->in_stack
!= locals
) {
1323 for (i
= 0; i
< count
; ++i
) {
1324 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1325 inst
->cil_code
= sp
[i
]->cil_code
;
1326 sp
[i
] = locals
[i
];
1327 if (cfg
->verbose_level
> 3)
1328 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1330 locals
= outb
->in_stack
;
1339 /* Emit code which loads interface_offsets [klass->interface_id]
1340 * The array is stored in memory before vtable.
1343 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1345 if (cfg
->compile_aot
) {
1346 int ioffset_reg
= alloc_preg (cfg
);
1347 int iid_reg
= alloc_preg (cfg
);
1349 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1350 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1359 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1360 * stored in "klass_reg" implements the interface "klass".
1363 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1365 int ibitmap_reg
= alloc_preg (cfg
);
1366 int ibitmap_byte_reg
= alloc_preg (cfg
);
1368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
));
1370 if (cfg
->compile_aot
) {
1371 int iid_reg
= alloc_preg (cfg
);
1372 int shifted_iid_reg
= alloc_preg (cfg
);
1373 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1374 int masked_iid_reg
= alloc_preg (cfg
);
1375 int iid_one_bit_reg
= alloc_preg (cfg
);
1376 int iid_bit_reg
= alloc_preg (cfg
);
1377 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1378 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1379 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1380 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1381 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1382 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1383 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1384 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1386 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1387 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1392 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1393 * stored in "vtable_reg" implements the interface "klass".
1396 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1398 int ibitmap_reg
= alloc_preg (cfg
);
1399 int ibitmap_byte_reg
= alloc_preg (cfg
);
1401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
));
1403 if (cfg
->compile_aot
) {
1404 int iid_reg
= alloc_preg (cfg
);
1405 int shifted_iid_reg
= alloc_preg (cfg
);
1406 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1407 int masked_iid_reg
= alloc_preg (cfg
);
1408 int iid_one_bit_reg
= alloc_preg (cfg
);
1409 int iid_bit_reg
= alloc_preg (cfg
);
1410 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1412 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1413 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, masked_iid_reg
, iid_reg
, 7);
1415 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1416 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1417 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1419 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1420 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1425 * Emit code which checks whenever the interface id of @klass is smaller than
1426 * than the value given by max_iid_reg.
1429 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1430 MonoBasicBlock
*false_target
)
1432 if (cfg
->compile_aot
) {
1433 int iid_reg
= alloc_preg (cfg
);
1434 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1435 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1442 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1445 /* Same as above, but obtains max_iid from a vtable */
1447 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1448 MonoBasicBlock
*false_target
)
1450 int max_iid_reg
= alloc_preg (cfg
);
1452 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1453 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1456 /* Same as above, but obtains max_iid from a klass */
1458 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1459 MonoBasicBlock
*false_target
)
1461 int max_iid_reg
= alloc_preg (cfg
);
1463 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1464 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1468 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1470 int idepth_reg
= alloc_preg (cfg
);
1471 int stypes_reg
= alloc_preg (cfg
);
1472 int stype
= alloc_preg (cfg
);
1474 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1476 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1479 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1480 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1481 if (cfg
->compile_aot
) {
1482 int const_reg
= alloc_preg (cfg
);
1483 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1484 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1486 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1492 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1494 int intf_reg
= alloc_preg (cfg
);
1496 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1497 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1498 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1500 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1502 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1506 * Variant of the above that takes a register to the class, not the vtable.
1509 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1511 int intf_bit_reg
= alloc_preg (cfg
);
1513 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1514 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1515 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1519 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1523 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1525 if (cfg
->compile_aot
) {
1526 int const_reg
= alloc_preg (cfg
);
1527 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1528 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1530 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1532 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1536 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1538 if (cfg
->compile_aot
) {
1539 int const_reg
= alloc_preg (cfg
);
1540 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1541 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1543 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1549 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1552 int rank_reg
= alloc_preg (cfg
);
1553 int eclass_reg
= alloc_preg (cfg
);
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1556 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1557 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1558 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1560 if (klass
->cast_class
== mono_defaults
.object_class
) {
1561 int parent_reg
= alloc_preg (cfg
);
1562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1563 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1564 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1565 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1566 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1567 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1568 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1569 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1570 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1571 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1573 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1574 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1577 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1578 /* Check that the object is a vector too */
1579 int bounds_reg
= alloc_preg (cfg
);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1581 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1582 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1585 int idepth_reg
= alloc_preg (cfg
);
1586 int stypes_reg
= alloc_preg (cfg
);
1587 int stype
= alloc_preg (cfg
);
1589 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1590 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1591 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1592 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1596 mini_emit_class_check (cfg
, stype
, klass
);
1601 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1605 g_assert (val
== 0);
1610 if ((size
<= 4) && (size
<= align
)) {
1613 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1616 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1621 #if SIZEOF_REGISTER == 8
1623 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1629 val_reg
= alloc_preg (cfg
);
1631 if (SIZEOF_REGISTER
== 8)
1632 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1634 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1637 /* This could be optimized further if neccesary */
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1646 #if !NO_UNALIGNED_ACCESS
1647 if (SIZEOF_REGISTER
== 8) {
1649 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1654 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1662 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1678 #endif /* DISABLE_JIT */
1681 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1689 /* This could be optimized further if neccesary */
1691 cur_reg
= alloc_preg (cfg
);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1700 #if !NO_UNALIGNED_ACCESS
1701 if (SIZEOF_REGISTER
== 8) {
1703 cur_reg
= alloc_preg (cfg
);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1714 cur_reg
= alloc_preg (cfg
);
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1716 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1722 cur_reg
= alloc_preg (cfg
);
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1730 cur_reg
= alloc_preg (cfg
);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1742 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1745 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1748 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1749 switch (type
->type
) {
1750 case MONO_TYPE_VOID
:
1751 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1754 case MONO_TYPE_BOOLEAN
:
1757 case MONO_TYPE_CHAR
:
1760 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1764 case MONO_TYPE_FNPTR
:
1765 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1766 case MONO_TYPE_CLASS
:
1767 case MONO_TYPE_STRING
:
1768 case MONO_TYPE_OBJECT
:
1769 case MONO_TYPE_SZARRAY
:
1770 case MONO_TYPE_ARRAY
:
1771 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1774 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1777 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1778 case MONO_TYPE_VALUETYPE
:
1779 if (type
->data
.klass
->enumtype
) {
1780 type
= mono_class_enum_basetype (type
->data
.klass
);
1783 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1784 case MONO_TYPE_TYPEDBYREF
:
1785 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1786 case MONO_TYPE_GENERICINST
:
1787 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1790 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1796 * target_type_is_incompatible:
1797 * @cfg: MonoCompile context
1799 * Check that the item @arg on the evaluation stack can be stored
1800 * in the target type (can be a local, or field, etc).
1801 * The cfg arg can be used to check if we need verification or just
1804 * Returns: non-0 value if arg can't be stored on a target.
1807 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1809 MonoType
*simple_type
;
1812 if (target
->byref
) {
1813 /* FIXME: check that the pointed to types match */
1814 if (arg
->type
== STACK_MP
)
1815 return arg
->klass
!= mono_class_from_mono_type (target
);
1816 if (arg
->type
== STACK_PTR
)
1821 simple_type
= mono_type_get_underlying_type (target
);
1822 switch (simple_type
->type
) {
1823 case MONO_TYPE_VOID
:
1827 case MONO_TYPE_BOOLEAN
:
1830 case MONO_TYPE_CHAR
:
1833 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1837 /* STACK_MP is needed when setting pinned locals */
1838 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1843 case MONO_TYPE_FNPTR
:
1844 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1847 case MONO_TYPE_CLASS
:
1848 case MONO_TYPE_STRING
:
1849 case MONO_TYPE_OBJECT
:
1850 case MONO_TYPE_SZARRAY
:
1851 case MONO_TYPE_ARRAY
:
1852 if (arg
->type
!= STACK_OBJ
)
1854 /* FIXME: check type compatibility */
1858 if (arg
->type
!= STACK_I8
)
1863 if (arg
->type
!= STACK_R8
)
1866 case MONO_TYPE_VALUETYPE
:
1867 if (arg
->type
!= STACK_VTYPE
)
1869 klass
= mono_class_from_mono_type (simple_type
);
1870 if (klass
!= arg
->klass
)
1873 case MONO_TYPE_TYPEDBYREF
:
1874 if (arg
->type
!= STACK_VTYPE
)
1876 klass
= mono_class_from_mono_type (simple_type
);
1877 if (klass
!= arg
->klass
)
1880 case MONO_TYPE_GENERICINST
:
1881 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1882 if (arg
->type
!= STACK_VTYPE
)
1884 klass
= mono_class_from_mono_type (simple_type
);
1885 if (klass
!= arg
->klass
)
1889 if (arg
->type
!= STACK_OBJ
)
1891 /* FIXME: check type compatibility */
1895 case MONO_TYPE_MVAR
:
1896 /* FIXME: all the arguments must be references for now,
1897 * later look inside cfg and see if the arg num is
1898 * really a reference
1900 g_assert (cfg
->generic_sharing_context
);
1901 if (arg
->type
!= STACK_OBJ
)
1905 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
1911 * Prepare arguments for passing to a function call.
1912 * Return a non-zero value if the arguments can't be passed to the given
1914 * The type checks are not yet complete and some conversions may need
1915 * casts on 32 or 64 bit architectures.
1917 * FIXME: implement this using target_type_is_incompatible ()
1920 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
1922 MonoType
*simple_type
;
1926 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
1930 for (i
= 0; i
< sig
->param_count
; ++i
) {
1931 if (sig
->params
[i
]->byref
) {
1932 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
1936 simple_type
= sig
->params
[i
];
1937 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
1939 switch (simple_type
->type
) {
1940 case MONO_TYPE_VOID
:
1945 case MONO_TYPE_BOOLEAN
:
1948 case MONO_TYPE_CHAR
:
1951 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
1957 case MONO_TYPE_FNPTR
:
1958 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
1961 case MONO_TYPE_CLASS
:
1962 case MONO_TYPE_STRING
:
1963 case MONO_TYPE_OBJECT
:
1964 case MONO_TYPE_SZARRAY
:
1965 case MONO_TYPE_ARRAY
:
1966 if (args
[i
]->type
!= STACK_OBJ
)
1971 if (args
[i
]->type
!= STACK_I8
)
1976 if (args
[i
]->type
!= STACK_R8
)
1979 case MONO_TYPE_VALUETYPE
:
1980 if (simple_type
->data
.klass
->enumtype
) {
1981 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
1984 if (args
[i
]->type
!= STACK_VTYPE
)
1987 case MONO_TYPE_TYPEDBYREF
:
1988 if (args
[i
]->type
!= STACK_VTYPE
)
1991 case MONO_TYPE_GENERICINST
:
1992 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
1996 g_error ("unknown type 0x%02x in check_call_signature",
2004 callvirt_to_call (int opcode
)
2009 case OP_VOIDCALLVIRT
:
2018 g_assert_not_reached ();
2025 callvirt_to_call_membase (int opcode
)
2029 return OP_CALL_MEMBASE
;
2030 case OP_VOIDCALLVIRT
:
2031 return OP_VOIDCALL_MEMBASE
;
2033 return OP_FCALL_MEMBASE
;
2035 return OP_LCALL_MEMBASE
;
2037 return OP_VCALL_MEMBASE
;
2039 g_assert_not_reached ();
2045 #ifdef MONO_ARCH_HAVE_IMT
2047 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
2049 #ifdef MONO_ARCH_IMT_REG
2050 int method_reg
= alloc_preg (cfg
);
2053 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2054 } else if (cfg
->compile_aot
) {
2055 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2058 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2059 ins
->inst_p0
= call
->method
;
2060 ins
->dreg
= method_reg
;
2061 MONO_ADD_INS (cfg
->cbb
, ins
);
2064 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2066 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2071 static MonoJumpInfo
*
2072 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2074 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2078 ji
->data
.target
= target
;
2083 inline static MonoInst
*
2084 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
);
2086 inline static MonoCallInst
*
2087 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2088 MonoInst
**args
, int calli
, int virtual)
2091 #ifdef MONO_ARCH_SOFT_FLOAT
2095 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2098 call
->signature
= sig
;
2100 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2102 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2103 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2106 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2109 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2110 * address of return value to increase optimization opportunities.
2111 * Before vtype decomposition, the dreg of the call ins itself represents the
2112 * fact the call modifies the return value. After decomposition, the call will
2113 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2114 * will be transformed into an LDADDR.
2116 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2117 loada
->dreg
= alloc_preg (cfg
);
2118 loada
->inst_p0
= temp
;
2119 /* We reference the call too since call->dreg could change during optimization */
2120 loada
->inst_p1
= call
;
2121 MONO_ADD_INS (cfg
->cbb
, loada
);
2123 call
->inst
.dreg
= temp
->dreg
;
2125 call
->vret_var
= loada
;
2126 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2127 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2131 * If the call has a float argument, we would need to do an r8->r4 conversion using
2132 * an icall, but that cannot be done during the call sequence since it would clobber
2133 * the call registers + the stack. So we do it before emitting the call.
2135 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2137 MonoInst
*in
= call
->args
[i
];
2139 if (i
>= sig
->hasthis
)
2140 t
= sig
->params
[i
- sig
->hasthis
];
2142 t
= &mono_defaults
.int_class
->byval_arg
;
2143 t
= mono_type_get_underlying_type (t
);
2145 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2146 MonoInst
*iargs
[1];
2150 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2152 /* The result will be in an int vreg */
2153 call
->args
[i
] = conv
;
2158 mono_arch_emit_call (cfg
, call
);
2160 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2161 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2166 inline static MonoInst
*
2167 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
)
2169 MonoCallInst
*call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
);
2171 call
->inst
.sreg1
= addr
->dreg
;
2173 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2175 return (MonoInst
*)call
;
2178 inline static MonoInst
*
2179 mono_emit_rgctx_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2181 #ifdef MONO_ARCH_RGCTX_REG
2186 rgctx_reg
= mono_alloc_preg (cfg
);
2187 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2189 call
= (MonoCallInst
*)mono_emit_calli (cfg
, sig
, args
, addr
);
2191 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2192 cfg
->uses_rgctx_reg
= TRUE
;
2194 return (MonoInst
*)call
;
2196 g_assert_not_reached ();
2202 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2203 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
)
2205 gboolean
virtual = this != NULL
;
2206 gboolean enable_for_aot
= TRUE
;
2209 if (method
->string_ctor
) {
2210 /* Create the real signature */
2211 /* FIXME: Cache these */
2212 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2213 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2218 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual);
2220 if (this && sig
->hasthis
&&
2221 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2222 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && !MONO_CHECK_THIS (this)) {
2223 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2225 call
->method
= method
;
2227 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2228 call
->inst
.inst_left
= this;
2231 int vtable_reg
, slot_reg
, this_reg
;
2233 this_reg
= this->dreg
;
2235 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2236 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2237 /* Make a call to delegate->invoke_impl */
2238 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2239 call
->inst
.inst_basereg
= this_reg
;
2240 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2241 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2243 return (MonoInst
*)call
;
2247 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2248 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2249 (MONO_METHOD_IS_FINAL (method
) &&
2250 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
))) {
2252 * the method is not virtual, we just need to ensure this is not null
2253 * and then we can call the method directly.
2255 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2256 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2259 if (!method
->string_ctor
) {
2260 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2261 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2262 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2265 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2267 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2269 return (MonoInst
*)call
;
2272 if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2274 * the method is virtual, but we can statically dispatch since either
2275 * it's class or the method itself are sealed.
2276 * But first we need to ensure it's not a null reference.
2278 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2279 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2280 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2282 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2283 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2285 return (MonoInst
*)call
;
2288 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2290 vtable_reg
= alloc_preg (cfg
);
2291 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2292 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2294 #ifdef MONO_ARCH_HAVE_IMT
2296 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2297 emit_imt_argument (cfg
, call
, imt_arg
);
2298 slot_reg
= vtable_reg
;
2299 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2302 if (slot_reg
== -1) {
2303 slot_reg
= alloc_preg (cfg
);
2304 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2305 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2308 slot_reg
= vtable_reg
;
2309 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2310 (mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
);
2311 #ifdef MONO_ARCH_HAVE_IMT
2313 g_assert (mono_method_signature (method
)->generic_param_count
);
2314 emit_imt_argument (cfg
, call
, imt_arg
);
2319 call
->inst
.sreg1
= slot_reg
;
2320 call
->virtual = TRUE
;
2323 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2325 return (MonoInst
*)call
;
2329 mono_emit_rgctx_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2330 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*vtable_arg
)
2337 #ifdef MONO_ARCH_RGCTX_REG
2338 rgctx_reg
= mono_alloc_preg (cfg
);
2339 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
2344 ins
= mono_emit_method_call_full (cfg
, method
, sig
, args
, this, imt_arg
);
2346 call
= (MonoCallInst
*)ins
;
2348 #ifdef MONO_ARCH_RGCTX_REG
2349 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2350 cfg
->uses_rgctx_reg
= TRUE
;
2359 static inline MonoInst
*
2360 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2362 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
);
2366 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2373 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
);
2376 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2378 return (MonoInst
*)call
;
2381 inline static MonoInst
*
2382 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2384 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2388 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2392 * mono_emit_abs_call:
2394 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2396 inline static MonoInst
*
2397 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2398 MonoMethodSignature
*sig
, MonoInst
**args
)
2400 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2404 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2407 if (cfg
->abs_patches
== NULL
)
2408 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2409 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2410 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2411 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2416 get_memcpy_method (void)
2418 static MonoMethod
*memcpy_method
= NULL
;
2419 if (!memcpy_method
) {
2420 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2422 g_error ("Old corlib found. Install a new one");
2424 return memcpy_method
;
2428 * Emit code to copy a valuetype of type @klass whose address is stored in
2429 * @src->dreg to memory whose address is stored at @dest->dreg.
2432 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2434 MonoInst
*iargs
[3];
2437 MonoMethod
*memcpy_method
;
2441 * This check breaks with spilled vars... need to handle it during verification anyway.
2442 * g_assert (klass && klass == src->klass && klass == dest->klass);
2446 n
= mono_class_native_size (klass
, &align
);
2448 n
= mono_class_value_size (klass
, &align
);
2450 #if HAVE_WRITE_BARRIERS
2451 /* if native is true there should be no references in the struct */
2452 if (klass
->has_references
&& !native
) {
2453 /* Avoid barriers when storing to the stack */
2454 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2455 (dest
->opcode
== OP_LDADDR
))) {
2458 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2460 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2465 if ((cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2466 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2467 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2471 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2473 memcpy_method
= get_memcpy_method ();
2474 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2479 get_memset_method (void)
2481 static MonoMethod
*memset_method
= NULL
;
2482 if (!memset_method
) {
2483 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2485 g_error ("Old corlib found. Install a new one");
2487 return memset_method
;
2491 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2493 MonoInst
*iargs
[3];
2496 MonoMethod
*memset_method
;
2498 /* FIXME: Optimize this for the case when dest is an LDADDR */
2500 mono_class_init (klass
);
2501 n
= mono_class_value_size (klass
, &align
);
2503 if (n
<= sizeof (gpointer
) * 5) {
2504 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2507 memset_method
= get_memset_method ();
2509 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2510 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2511 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2516 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2518 MonoInst
*this = NULL
;
2520 g_assert (cfg
->generic_sharing_context
);
2522 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2523 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2524 !method
->klass
->valuetype
)
2525 EMIT_NEW_ARGLOAD (cfg
, this, 0);
2527 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
2528 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2531 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2533 mrgctx_loc
= mono_get_vtable_var (cfg
);
2534 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2537 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
2538 MonoInst
*vtable_loc
, *vtable_var
;
2542 vtable_loc
= mono_get_vtable_var (cfg
);
2543 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2545 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2546 MonoInst
*mrgctx_var
= vtable_var
;
2549 vtable_reg
= alloc_preg (cfg
);
2550 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2551 vtable_var
->type
= STACK_PTR
;
2557 int vtable_reg
, res_reg
;
2559 vtable_reg
= alloc_preg (cfg
);
2560 res_reg
= alloc_preg (cfg
);
2561 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2566 static MonoJumpInfoRgctxEntry
*
2567 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, int info_type
)
2569 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2570 res
->method
= method
;
2571 res
->in_mrgctx
= in_mrgctx
;
2572 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2573 res
->data
->type
= patch_type
;
2574 res
->data
->data
.target
= patch_data
;
2575 res
->info_type
= info_type
;
2580 static inline MonoInst
*
2581 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2583 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
2587 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2588 MonoClass
*klass
, int rgctx_type
)
2590 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2591 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2593 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2597 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2598 MonoMethod
*cmethod
, int rgctx_type
)
2600 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
2601 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2603 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2607 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
2608 MonoClassField
*field
, int rgctx_type
)
2610 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
2611 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2613 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2617 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
2619 int vtable_reg
= alloc_preg (cfg
);
2620 int context_used
= 0;
2622 if (cfg
->generic_sharing_context
)
2623 context_used
= mono_class_check_context_used (array_class
);
2625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2627 if (cfg
->opt
& MONO_OPT_SHARED
) {
2628 int class_reg
= alloc_preg (cfg
);
2629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2630 if (cfg
->compile_aot
) {
2631 int klass_reg
= alloc_preg (cfg
);
2632 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
2633 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
2635 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
2637 } else if (context_used
) {
2638 MonoInst
*vtable_ins
;
2640 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
2641 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
2643 if (cfg
->compile_aot
) {
2644 int vt_reg
= alloc_preg (cfg
);
2645 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, mono_class_vtable (cfg
->domain
, array_class
));
2646 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
2648 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, mono_class_vtable (cfg
->domain
, array_class
));
2652 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
2656 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
2658 if (mini_get_debug_options ()->better_cast_details
) {
2659 int to_klass_reg
= alloc_preg (cfg
);
2660 int vtable_reg
= alloc_preg (cfg
);
2661 int klass_reg
= alloc_preg (cfg
);
2662 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2665 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
2669 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2673 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
2674 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
2675 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
2680 reset_cast_details (MonoCompile
*cfg
)
2682 /* Reset the variables holding the cast details */
2683 if (mini_get_debug_options ()->better_cast_details
) {
2684 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2686 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2687 /* It is enough to reset the from field */
2688 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
2693 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2694 * generic code is generated.
2697 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
2699 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
2702 MonoInst
*rgctx
, *addr
;
2704 /* FIXME: What if the class is shared? We might not
2705 have to get the address of the method from the
2707 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
2708 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
2710 rgctx
= emit_get_rgctx (cfg
, method
, context_used
);
2712 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
2714 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
2719 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
2723 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
2724 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2725 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2726 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
2728 obj_reg
= sp
[0]->dreg
;
2729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
2732 /* FIXME: generics */
2733 g_assert (klass
->rank
== 0);
2736 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
2737 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
2743 MonoInst
*element_class
;
2745 /* This assertion is from the unboxcast insn */
2746 g_assert (klass
->rank
== 0);
2748 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
2749 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
2751 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
2752 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2754 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
2755 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
2756 reset_cast_details (cfg
);
2759 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_PTR
), obj_reg
, sizeof (MonoObject
));
2760 MONO_ADD_INS (cfg
->cbb
, add
);
2761 add
->type
= STACK_MP
;
2768 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
)
2770 MonoInst
*iargs
[2];
2773 if (cfg
->opt
& MONO_OPT_SHARED
) {
2774 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2775 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
2777 alloc_ftn
= mono_object_new
;
2778 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
2779 /* This happens often in argument checking code, eg. throw new FooException... */
2780 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2781 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
2782 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
2784 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
2785 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
2788 if (managed_alloc
) {
2789 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2790 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2792 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
2794 guint32 lw
= vtable
->klass
->instance_size
;
2795 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
2796 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
2797 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
2800 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2804 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
2808 handle_alloc_from_inst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*data_inst
,
2811 MonoInst
*iargs
[2];
2812 MonoMethod
*managed_alloc
= NULL
;
2816 FIXME: we cannot get managed_alloc here because we can't get
2817 the class's vtable (because it's not a closed class)
2819 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2820 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2823 if (cfg
->opt
& MONO_OPT_SHARED
) {
2824 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2825 iargs
[1] = data_inst
;
2826 alloc_ftn
= mono_object_new
;
2828 if (managed_alloc
) {
2829 iargs
[0] = data_inst
;
2830 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2833 iargs
[0] = data_inst
;
2834 alloc_ftn
= mono_object_new_specific
;
2837 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
2841 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
)
2843 MonoInst
*alloc
, *ins
;
2845 if (mono_class_is_nullable (klass
)) {
2846 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
2847 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
2850 alloc
= handle_alloc (cfg
, klass
, TRUE
);
2852 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
2858 handle_box_from_inst (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
, MonoInst
*data_inst
)
2860 MonoInst
*alloc
, *ins
;
2862 if (mono_class_is_nullable (klass
)) {
2863 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
2864 /* FIXME: What if the class is shared? We might not
2865 have to get the method address from the RGCTX. */
2866 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
2867 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
2868 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2870 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
2872 alloc
= handle_alloc_from_inst (cfg
, klass
, data_inst
, TRUE
);
2874 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
2881 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
2883 MonoBasicBlock
*is_null_bb
;
2884 int obj_reg
= src
->dreg
;
2885 int vtable_reg
= alloc_preg (cfg
);
2887 NEW_BBLOCK (cfg
, is_null_bb
);
2889 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
2890 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
2892 save_cast_details (cfg
, klass
, obj_reg
);
2894 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2896 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
2898 int klass_reg
= alloc_preg (cfg
);
2900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2902 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
2903 /* the remoting code is broken, access the class for now */
2905 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
2906 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2909 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
2911 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2914 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, is_null_bb
);
2918 MONO_START_BB (cfg
, is_null_bb
);
2920 reset_cast_details (cfg
);
2926 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
2929 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
2930 int obj_reg
= src
->dreg
;
2931 int vtable_reg
= alloc_preg (cfg
);
2932 int res_reg
= alloc_preg (cfg
);
2934 NEW_BBLOCK (cfg
, is_null_bb
);
2935 NEW_BBLOCK (cfg
, false_bb
);
2936 NEW_BBLOCK (cfg
, end_bb
);
2938 /* Do the assignment at the beginning, so the other assignment can be if converted */
2939 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
2940 ins
->type
= STACK_OBJ
;
2943 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
2946 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2948 /* the is_null_bb target simply copies the input register to the output */
2949 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
2951 int klass_reg
= alloc_preg (cfg
);
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2956 int rank_reg
= alloc_preg (cfg
);
2957 int eclass_reg
= alloc_preg (cfg
);
2959 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
2960 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
2961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
2962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
2964 if (klass
->cast_class
== mono_defaults
.object_class
) {
2965 int parent_reg
= alloc_preg (cfg
);
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
2967 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
2968 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
2969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
2970 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
2971 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
2972 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
2973 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
2974 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
2975 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
2976 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
2977 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2978 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
2980 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
2981 /* Check that the object is a vector too */
2982 int bounds_reg
= alloc_preg (cfg
);
2983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
2984 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
2985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
2988 /* the is_null_bb target simply copies the input register to the output */
2989 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
2991 } else if (mono_class_is_nullable (klass
)) {
2992 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2993 /* the is_null_bb target simply copies the input register to the output */
2994 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
2996 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
2997 /* the remoting code is broken, access the class for now */
2999 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3000 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3002 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3003 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3008 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3009 /* the is_null_bb target simply copies the input register to the output */
3010 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, is_null_bb
);
3015 MONO_START_BB (cfg
, false_bb
);
3017 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3020 MONO_START_BB (cfg
, is_null_bb
);
3022 MONO_START_BB (cfg
, end_bb
);
3028 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3030 /* This opcode takes as input an object reference and a class, and returns:
3031 0) if the object is an instance of the class,
3032 1) if the object is not instance of the class,
3033 2) if the object is a proxy whose type cannot be determined */
3036 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3037 int obj_reg
= src
->dreg
;
3038 int dreg
= alloc_ireg (cfg
);
3040 int klass_reg
= alloc_preg (cfg
);
3042 NEW_BBLOCK (cfg
, true_bb
);
3043 NEW_BBLOCK (cfg
, false_bb
);
3044 NEW_BBLOCK (cfg
, false2_bb
);
3045 NEW_BBLOCK (cfg
, end_bb
);
3046 NEW_BBLOCK (cfg
, no_proxy_bb
);
3048 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3049 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3051 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3052 NEW_BBLOCK (cfg
, interface_fail_bb
);
3054 tmp_reg
= alloc_preg (cfg
);
3055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3056 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3057 MONO_START_BB (cfg
, interface_fail_bb
);
3058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3060 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3062 tmp_reg
= alloc_preg (cfg
);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3064 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3067 tmp_reg
= alloc_preg (cfg
);
3068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3071 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3072 tmp_reg
= alloc_preg (cfg
);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3076 tmp_reg
= alloc_preg (cfg
);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3078 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3081 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3084 MONO_START_BB (cfg
, no_proxy_bb
);
3086 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3089 MONO_START_BB (cfg
, false_bb
);
3091 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3094 MONO_START_BB (cfg
, false2_bb
);
3096 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3099 MONO_START_BB (cfg
, true_bb
);
3101 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3103 MONO_START_BB (cfg
, end_bb
);
3106 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3108 ins
->type
= STACK_I4
;
3114 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3116 /* This opcode takes as input an object reference and a class, and returns:
3117 0) if the object is an instance of the class,
3118 1) if the object is a proxy whose type cannot be determined
3119 an InvalidCastException exception is thrown otherwhise*/
3122 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3123 int obj_reg
= src
->dreg
;
3124 int dreg
= alloc_ireg (cfg
);
3125 int tmp_reg
= alloc_preg (cfg
);
3126 int klass_reg
= alloc_preg (cfg
);
3128 NEW_BBLOCK (cfg
, end_bb
);
3129 NEW_BBLOCK (cfg
, ok_result_bb
);
3131 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3134 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3135 NEW_BBLOCK (cfg
, interface_fail_bb
);
3137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3138 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3139 MONO_START_BB (cfg
, interface_fail_bb
);
3140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3142 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3144 tmp_reg
= alloc_preg (cfg
);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3146 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3147 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3149 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3153 NEW_BBLOCK (cfg
, no_proxy_bb
);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3157 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3159 tmp_reg
= alloc_preg (cfg
);
3160 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3163 tmp_reg
= alloc_preg (cfg
);
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3165 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3168 NEW_BBLOCK (cfg
, fail_1_bb
);
3170 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3172 MONO_START_BB (cfg
, fail_1_bb
);
3174 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3177 MONO_START_BB (cfg
, no_proxy_bb
);
3179 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3182 MONO_START_BB (cfg
, ok_result_bb
);
3184 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3186 MONO_START_BB (cfg
, end_bb
);
3189 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3191 ins
->type
= STACK_I4
;
3196 static G_GNUC_UNUSED MonoInst
*
3197 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
)
3199 gpointer
*trampoline
;
3200 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3204 obj
= handle_alloc (cfg
, klass
, FALSE
);
3206 /* Inline the contents of mono_delegate_ctor */
3208 /* Set target field */
3209 /* Optimize away setting of NULL target */
3210 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0))
3211 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3213 /* Set method field */
3214 EMIT_NEW_METHODCONST (cfg
, method_ins
, method
);
3215 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3218 * To avoid looking up the compiled code belonging to the target method
3219 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3220 * store it, and we fill it after the method has been compiled.
3222 if (!cfg
->compile_aot
&& !method
->dynamic
) {
3223 MonoInst
*code_slot_ins
;
3225 domain
= mono_domain_get ();
3226 mono_domain_lock (domain
);
3227 if (!domain_jit_info (domain
)->method_code_hash
)
3228 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3229 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3231 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3232 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3234 mono_domain_unlock (domain
);
3236 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3237 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3240 /* Set invoke_impl field */
3241 if (cfg
->compile_aot
) {
3242 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3244 trampoline
= mono_create_delegate_trampoline (klass
);
3245 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3247 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3249 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3255 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3257 MonoJitICallInfo
*info
;
3259 /* Need to register the icall so it gets an icall wrapper */
3260 info
= mono_get_array_new_va_icall (rank
);
3262 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3264 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3265 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3269 mono_emit_load_got_addr (MonoCompile
*cfg
)
3271 MonoInst
*getaddr
, *dummy_use
;
3273 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3276 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3277 getaddr
->dreg
= cfg
->got_var
->dreg
;
3279 /* Add it to the start of the first bblock */
3280 if (cfg
->bb_entry
->code
) {
3281 getaddr
->next
= cfg
->bb_entry
->code
;
3282 cfg
->bb_entry
->code
= getaddr
;
3285 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3287 cfg
->got_var_allocated
= TRUE
;
3290 * Add a dummy use to keep the got_var alive, since real uses might
3291 * only be generated by the back ends.
3292 * Add it to end_bblock, so the variable's lifetime covers the whole
3294 * It would be better to make the usage of the got var explicit in all
3295 * cases when the backend needs it (i.e. calls, throw etc.), so this
3296 * wouldn't be needed.
3298 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3299 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3302 static int inline_limit
;
3303 static gboolean inline_limit_inited
;
3306 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3308 MonoMethodHeader
*header
;
3310 #ifdef MONO_ARCH_SOFT_FLOAT
3311 MonoMethodSignature
*sig
= mono_method_signature (method
);
3315 if (cfg
->generic_sharing_context
)
3318 #ifdef MONO_ARCH_HAVE_LMF_OPS
3319 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3320 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
3321 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
3325 if (method
->is_inflated
)
3326 /* Avoid inflating the header */
3327 header
= mono_method_get_header (((MonoMethodInflated
*)method
)->declaring
);
3329 header
= mono_method_get_header (method
);
3331 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_RUNTIME
) ||
3332 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3333 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3334 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3335 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
) ||
3336 (method
->klass
->marshalbyref
) ||
3337 !header
|| header
->num_clauses
)
3340 /* also consider num_locals? */
3341 /* Do the size check early to avoid creating vtables */
3342 if (!inline_limit_inited
) {
3343 if (getenv ("MONO_INLINELIMIT"))
3344 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
3346 inline_limit
= INLINE_LENGTH_LIMIT
;
3347 inline_limit_inited
= TRUE
;
3349 if (header
->code_size
>= inline_limit
)
3353 * if we can initialize the class of the method right away, we do,
3354 * otherwise we don't allow inlining if the class needs initialization,
3355 * since it would mean inserting a call to mono_runtime_class_init()
3356 * inside the inlined code
3358 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3359 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
3360 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
3361 if (!method
->klass
->runtime_info
)
3362 /* No vtable created yet */
3364 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3367 /* This makes so that inline cannot trigger */
3368 /* .cctors: too many apps depend on them */
3369 /* running with a specific order... */
3370 if (! vtable
->initialized
)
3372 mono_runtime_class_init (vtable
);
3374 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3375 if (!method
->klass
->runtime_info
)
3376 /* No vtable created yet */
3378 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3381 if (!vtable
->initialized
)
3386 * If we're compiling for shared code
3387 * the cctor will need to be run at aot method load time, for example,
3388 * or at the end of the compilation of the inlining method.
3390 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
3395 * CAS - do not inline methods with declarative security
3396 * Note: this has to be before any possible return TRUE;
3398 if (mono_method_has_declsec (method
))
3401 #ifdef MONO_ARCH_SOFT_FLOAT
3403 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
3405 for (i
= 0; i
< sig
->param_count
; ++i
)
3406 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
3414 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
3416 if (vtable
->initialized
&& !cfg
->compile_aot
)
3419 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
3422 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
3425 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
3426 /* The initialization is already done before the method is called */
3433 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
)
3437 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
3439 mono_class_init (klass
);
3440 size
= mono_class_array_element_size (klass
);
3442 mult_reg
= alloc_preg (cfg
);
3443 array_reg
= arr
->dreg
;
3444 index_reg
= index
->dreg
;
3446 #if SIZEOF_REGISTER == 8
3447 /* The array reg is 64 bits but the index reg is only 32 */
3448 index2_reg
= alloc_preg (cfg
);
3449 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
3451 if (index
->type
== STACK_I8
) {
3452 index2_reg
= alloc_preg (cfg
);
3453 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
3455 index2_reg
= index_reg
;
3459 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
3461 #if defined(__i386__) || defined(__x86_64__)
3462 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
3463 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3465 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
3466 ins
->type
= STACK_PTR
;
3472 add_reg
= alloc_preg (cfg
);
3474 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
3475 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
3476 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3477 ins
->type
= STACK_PTR
;
3478 MONO_ADD_INS (cfg
->cbb
, ins
);
3483 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3485 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
3487 int bounds_reg
= alloc_preg (cfg
);
3488 int add_reg
= alloc_preg (cfg
);
3489 int mult_reg
= alloc_preg (cfg
);
3490 int mult2_reg
= alloc_preg (cfg
);
3491 int low1_reg
= alloc_preg (cfg
);
3492 int low2_reg
= alloc_preg (cfg
);
3493 int high1_reg
= alloc_preg (cfg
);
3494 int high2_reg
= alloc_preg (cfg
);
3495 int realidx1_reg
= alloc_preg (cfg
);
3496 int realidx2_reg
= alloc_preg (cfg
);
3497 int sum_reg
= alloc_preg (cfg
);
3502 mono_class_init (klass
);
3503 size
= mono_class_array_element_size (klass
);
3505 index1
= index_ins1
->dreg
;
3506 index2
= index_ins2
->dreg
;
3508 /* range checking */
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
3510 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
3513 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3514 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
3515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
3516 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3517 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
3518 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
3521 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3522 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
3523 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
3524 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3525 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
3526 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3528 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
3529 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
3530 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
3531 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
3532 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3534 ins
->type
= STACK_MP
;
3536 MONO_ADD_INS (cfg
->cbb
, ins
);
3543 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
3547 MonoMethod
*addr_method
;
3550 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
3553 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1]);
3555 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3556 /* emit_ldelema_2 depends on OP_LMUL */
3557 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
3558 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
3562 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
3563 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
3564 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
3570 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
3572 MonoInst
*ins
= NULL
;
3574 static MonoClass
*runtime_helpers_class
= NULL
;
3575 if (! runtime_helpers_class
)
3576 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
3577 "System.Runtime.CompilerServices", "RuntimeHelpers");
3579 if (cmethod
->klass
== mono_defaults
.string_class
) {
3580 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
3581 int dreg
= alloc_ireg (cfg
);
3582 int index_reg
= alloc_preg (cfg
);
3583 int mult_reg
= alloc_preg (cfg
);
3584 int add_reg
= alloc_preg (cfg
);
3586 #if SIZEOF_REGISTER == 8
3587 /* The array reg is 64 bits but the index reg is only 32 */
3588 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
3590 index_reg
= args
[1]->dreg
;
3592 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
3594 #if defined(__i386__) || defined(__x86_64__)
3595 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
3596 add_reg
= ins
->dreg
;
3597 /* Avoid a warning */
3599 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3602 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
3603 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3604 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3605 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
3607 type_from_op (ins
, NULL
, NULL
);
3609 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3610 int dreg
= alloc_ireg (cfg
);
3611 /* Decompose later to allow more optimizations */
3612 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
3613 ins
->type
= STACK_I4
;
3614 cfg
->cbb
->has_array_access
= TRUE
;
3615 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
3618 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
3619 int mult_reg
= alloc_preg (cfg
);
3620 int add_reg
= alloc_preg (cfg
);
3622 /* The corlib functions check for oob already. */
3623 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
3624 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3625 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
3628 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
3630 if (strcmp (cmethod
->name
, "GetType") == 0) {
3631 int dreg
= alloc_preg (cfg
);
3632 int vt_reg
= alloc_preg (cfg
);
3633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3634 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
3635 type_from_op (ins
, NULL
, NULL
);
3638 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3639 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0) {
3640 int dreg
= alloc_ireg (cfg
);
3641 int t1
= alloc_ireg (cfg
);
3643 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
3644 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
3645 ins
->type
= STACK_I4
;
3649 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
3650 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
3651 MONO_ADD_INS (cfg
->cbb
, ins
);
3655 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
3656 if (cmethod
->name
[0] != 'g')
3659 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
3660 int dreg
= alloc_ireg (cfg
);
3661 int vtable_reg
= alloc_preg (cfg
);
3662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
3663 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3664 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
3665 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3666 type_from_op (ins
, NULL
, NULL
);
3669 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3670 int dreg
= alloc_ireg (cfg
);
3672 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
3673 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
3674 type_from_op (ins
, NULL
, NULL
);
3679 } else if (cmethod
->klass
== runtime_helpers_class
) {
3681 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
3682 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
3686 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
3687 if (strcmp (cmethod
->name
, "get_CurrentThread") == 0 && (ins
= mono_arch_get_thread_intrinsic (cfg
))) {
3688 ins
->dreg
= alloc_preg (cfg
);
3689 ins
->type
= STACK_OBJ
;
3690 MONO_ADD_INS (cfg
->cbb
, ins
);
3692 } else if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
3693 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
3694 MONO_ADD_INS (cfg
->cbb
, ins
);
3696 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
3697 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
3698 MONO_ADD_INS (cfg
->cbb
, ins
);
3701 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
3702 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3703 if (strcmp (cmethod
->name
, "Enter") == 0) {
3706 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
3707 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3708 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3709 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3711 return (MonoInst
*)call
;
3712 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
3715 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
3716 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3717 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3718 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3720 return (MonoInst
*)call
;
3722 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3723 MonoMethod
*fast_method
= NULL
;
3725 /* Avoid infinite recursion */
3726 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
3727 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
3728 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
3731 if (strcmp (cmethod
->name
, "Enter") == 0 ||
3732 strcmp (cmethod
->name
, "Exit") == 0)
3733 fast_method
= mono_monitor_get_fast_path (cmethod
);
3737 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
3739 } else if (mini_class_is_system_array (cmethod
->klass
) &&
3740 strcmp (cmethod
->name
, "GetGenericValueImpl") == 0) {
3741 MonoInst
*addr
, *store
, *load
;
3742 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[1]);
3744 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1]);
3745 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
3746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
3748 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
3749 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
3750 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
3753 #if SIZEOF_REGISTER == 8
3754 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
3755 /* 64 bit reads are already atomic */
3756 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
3757 ins
->dreg
= mono_alloc_preg (cfg
);
3758 ins
->inst_basereg
= args
[0]->dreg
;
3759 ins
->inst_offset
= 0;
3760 MONO_ADD_INS (cfg
->cbb
, ins
);
3764 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3765 if (strcmp (cmethod
->name
, "Increment") == 0) {
3766 MonoInst
*ins_iconst
;
3769 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3770 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3771 #if SIZEOF_REGISTER == 8
3772 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3773 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3776 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3777 ins_iconst
->inst_c0
= 1;
3778 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
3779 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
3781 MONO_INST_NEW (cfg
, ins
, opcode
);
3782 ins
->dreg
= mono_alloc_ireg (cfg
);
3783 ins
->inst_basereg
= args
[0]->dreg
;
3784 ins
->inst_offset
= 0;
3785 ins
->sreg2
= ins_iconst
->dreg
;
3786 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3787 MONO_ADD_INS (cfg
->cbb
, ins
);
3789 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
3790 MonoInst
*ins_iconst
;
3793 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3794 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3795 #if SIZEOF_REGISTER == 8
3796 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3797 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3800 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3801 ins_iconst
->inst_c0
= -1;
3802 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
3803 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
3805 MONO_INST_NEW (cfg
, ins
, opcode
);
3806 ins
->dreg
= mono_alloc_ireg (cfg
);
3807 ins
->inst_basereg
= args
[0]->dreg
;
3808 ins
->inst_offset
= 0;
3809 ins
->sreg2
= ins_iconst
->dreg
;
3810 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3811 MONO_ADD_INS (cfg
->cbb
, ins
);
3813 } else if (strcmp (cmethod
->name
, "Add") == 0) {
3816 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3817 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3818 #if SIZEOF_REGISTER == 8
3819 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3820 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3824 MONO_INST_NEW (cfg
, ins
, opcode
);
3825 ins
->dreg
= mono_alloc_ireg (cfg
);
3826 ins
->inst_basereg
= args
[0]->dreg
;
3827 ins
->inst_offset
= 0;
3828 ins
->sreg2
= args
[1]->dreg
;
3829 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3830 MONO_ADD_INS (cfg
->cbb
, ins
);
3833 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3835 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3836 if (strcmp (cmethod
->name
, "Exchange") == 0) {
3839 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3840 opcode
= OP_ATOMIC_EXCHANGE_I4
;
3841 #if SIZEOF_REGISTER == 8
3842 else if ((fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
3843 (fsig
->params
[0]->type
== MONO_TYPE_I
) ||
3844 (fsig
->params
[0]->type
== MONO_TYPE_OBJECT
))
3845 opcode
= OP_ATOMIC_EXCHANGE_I8
;
3847 else if ((fsig
->params
[0]->type
== MONO_TYPE_I
) ||
3848 (fsig
->params
[0]->type
== MONO_TYPE_OBJECT
))
3849 opcode
= OP_ATOMIC_EXCHANGE_I4
;
3854 MONO_INST_NEW (cfg
, ins
, opcode
);
3855 ins
->dreg
= mono_alloc_ireg (cfg
);
3856 ins
->inst_basereg
= args
[0]->dreg
;
3857 ins
->inst_offset
= 0;
3858 ins
->sreg2
= args
[1]->dreg
;
3859 MONO_ADD_INS (cfg
->cbb
, ins
);
3861 switch (fsig
->params
[0]->type
) {
3863 ins
->type
= STACK_I4
;
3867 ins
->type
= STACK_I8
;
3869 case MONO_TYPE_OBJECT
:
3870 ins
->type
= STACK_OBJ
;
3873 g_assert_not_reached ();
3876 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3878 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3880 * Can't implement CompareExchange methods this way since they have
3881 * three arguments. We can implement one of the common cases, where the new
3882 * value is a constant.
3884 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
3885 if ((fsig
->params
[1]->type
== MONO_TYPE_I4
||
3886 (sizeof (gpointer
) == 4 && fsig
->params
[1]->type
== MONO_TYPE_I
))
3887 && args
[2]->opcode
== OP_ICONST
) {
3888 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_IMM_I4
);
3889 ins
->dreg
= alloc_ireg (cfg
);
3890 ins
->sreg1
= args
[0]->dreg
;
3891 ins
->sreg2
= args
[1]->dreg
;
3892 ins
->backend
.data
= GINT_TO_POINTER (args
[2]->inst_c0
);
3893 ins
->type
= STACK_I4
;
3894 MONO_ADD_INS (cfg
->cbb
, ins
);
3896 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3898 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3902 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
3903 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
3904 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
3905 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
3906 MONO_ADD_INS (cfg
->cbb
, ins
);
3909 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
3910 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
3911 #ifdef PLATFORM_WIN32
3912 EMIT_NEW_ICONST (cfg
, ins
, 1);
3914 EMIT_NEW_ICONST (cfg
, ins
, 0);
3918 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
3920 * There is general branches code for Min/Max, but it does not work for
3922 * http://everything2.com/?node_id=1051618
3926 #ifdef MONO_ARCH_SIMD_INTRINSICS
3927 if (cfg
->opt
& MONO_OPT_SIMD
) {
3928 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
3934 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
3938 * This entry point could be used later for arbitrary method
3941 inline static MonoInst
*
3942 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
3943 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
3945 if (method
->klass
== mono_defaults
.string_class
) {
3946 /* managed string allocation support */
3947 if (strcmp (method
->name
, "InternalAllocateStr") == 0) {
3948 MonoInst
*iargs
[2];
3949 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3950 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
3953 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3954 iargs
[1] = args
[0];
3955 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
3962 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
3964 MonoInst
*store
, *temp
;
3967 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3968 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
3971 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3972 * would be different than the MonoInst's used to represent arguments, and
3973 * the ldelema implementation can't deal with that.
3974 * Solution: When ldelema is used on an inline argument, create a var for
3975 * it, emit ldelema on that var, and emit the saving code below in
3976 * inline_method () if needed.
3978 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
3979 cfg
->args
[i
] = temp
;
3980 /* This uses cfg->args [i] which is set by the preceeding line */
3981 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
3982 store
->cil_code
= sp
[0]->cil_code
;
3987 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3988 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3990 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3992 check_inline_called_method_name_limit (MonoMethod
*called_method
)
3995 static char *limit
= NULL
;
3997 if (limit
== NULL
) {
3998 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4000 if (limit_string
!= NULL
)
4001 limit
= limit_string
;
4003 limit
= (char *) "";
4006 if (limit
[0] != '\0') {
4007 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4009 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4010 g_free (called_method_name
);
4012 //return (strncmp_result <= 0);
4013 return (strncmp_result
== 0);
4020 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4022 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4025 static char *limit
= NULL
;
4027 if (limit
== NULL
) {
4028 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4029 if (limit_string
!= NULL
) {
4030 limit
= limit_string
;
4032 limit
= (char *) "";
4036 if (limit
[0] != '\0') {
4037 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4039 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4040 g_free (caller_method_name
);
4042 //return (strncmp_result <= 0);
4043 return (strncmp_result
== 0);
4051 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4052 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_allways
)
4054 MonoInst
*ins
, *rvar
= NULL
;
4055 MonoMethodHeader
*cheader
;
4056 MonoBasicBlock
*ebblock
, *sbblock
;
4058 MonoMethod
*prev_inlined_method
;
4059 MonoInst
**prev_locals
, **prev_args
;
4060 MonoType
**prev_arg_types
;
4061 guint prev_real_offset
;
4062 GHashTable
*prev_cbb_hash
;
4063 MonoBasicBlock
**prev_cil_offset_to_bb
;
4064 MonoBasicBlock
*prev_cbb
;
4065 unsigned char* prev_cil_start
;
4066 guint32 prev_cil_offset_to_bb_len
;
4067 MonoMethod
*prev_current_method
;
4068 MonoGenericContext
*prev_generic_context
;
4069 gboolean ret_var_set
, prev_ret_var_set
;
4071 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4073 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4074 if ((! inline_allways
) && ! check_inline_called_method_name_limit (cmethod
))
4077 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4078 if ((! inline_allways
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4082 if (cfg
->verbose_level
> 2)
4083 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4085 if (!cmethod
->inline_info
) {
4086 mono_jit_stats
.inlineable_methods
++;
4087 cmethod
->inline_info
= 1;
4089 /* allocate space to store the return value */
4090 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4091 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4094 /* allocate local variables */
4095 cheader
= mono_method_get_header (cmethod
);
4096 prev_locals
= cfg
->locals
;
4097 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4098 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4099 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4101 /* allocate start and end blocks */
4102 /* This is needed so if the inline is aborted, we can clean up */
4103 NEW_BBLOCK (cfg
, sbblock
);
4104 sbblock
->real_offset
= real_offset
;
4106 NEW_BBLOCK (cfg
, ebblock
);
4107 ebblock
->block_num
= cfg
->num_bblocks
++;
4108 ebblock
->real_offset
= real_offset
;
4110 prev_args
= cfg
->args
;
4111 prev_arg_types
= cfg
->arg_types
;
4112 prev_inlined_method
= cfg
->inlined_method
;
4113 cfg
->inlined_method
= cmethod
;
4114 cfg
->ret_var_set
= FALSE
;
4115 prev_real_offset
= cfg
->real_offset
;
4116 prev_cbb_hash
= cfg
->cbb_hash
;
4117 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4118 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4119 prev_cil_start
= cfg
->cil_start
;
4120 prev_cbb
= cfg
->cbb
;
4121 prev_current_method
= cfg
->current_method
;
4122 prev_generic_context
= cfg
->generic_context
;
4123 prev_ret_var_set
= cfg
->ret_var_set
;
4125 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, *ip
== CEE_CALLVIRT
);
4127 ret_var_set
= cfg
->ret_var_set
;
4129 cfg
->inlined_method
= prev_inlined_method
;
4130 cfg
->real_offset
= prev_real_offset
;
4131 cfg
->cbb_hash
= prev_cbb_hash
;
4132 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4133 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4134 cfg
->cil_start
= prev_cil_start
;
4135 cfg
->locals
= prev_locals
;
4136 cfg
->args
= prev_args
;
4137 cfg
->arg_types
= prev_arg_types
;
4138 cfg
->current_method
= prev_current_method
;
4139 cfg
->generic_context
= prev_generic_context
;
4140 cfg
->ret_var_set
= prev_ret_var_set
;
4142 if ((costs
>= 0 && costs
< 60) || inline_allways
) {
4143 if (cfg
->verbose_level
> 2)
4144 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4146 mono_jit_stats
.inlined_methods
++;
4148 /* always add some code to avoid block split failures */
4149 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4150 MONO_ADD_INS (prev_cbb
, ins
);
4152 prev_cbb
->next_bb
= sbblock
;
4153 link_bblock (cfg
, prev_cbb
, sbblock
);
4156 * Get rid of the begin and end bblocks if possible to aid local
4159 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
4161 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
4162 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
4164 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
4165 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
4166 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
4168 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
4169 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
4170 cfg
->cbb
= prev_cbb
;
4178 * If the inlined method contains only a throw, then the ret var is not
4179 * set, so set it to a dummy value.
4182 static double r8_0
= 0.0;
4184 switch (rvar
->type
) {
4186 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
4189 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
4194 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
4197 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
4198 ins
->type
= STACK_R8
;
4199 ins
->inst_p0
= (void*)&r8_0
;
4200 ins
->dreg
= rvar
->dreg
;
4201 MONO_ADD_INS (cfg
->cbb
, ins
);
4204 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (fsig
->ret
));
4207 g_assert_not_reached ();
4211 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
4216 if (cfg
->verbose_level
> 2)
4217 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod
, TRUE
));
4218 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
4219 mono_loader_clear_error ();
4221 /* This gets rid of the newly added bblocks */
4222 cfg
->cbb
= prev_cbb
;
4228 * Some of these comments may well be out-of-date.
4229 * Design decisions: we do a single pass over the IL code (and we do bblock
4230 * splitting/merging in the few cases when it's required: a back jump to an IL
4231 * address that was not already seen as bblock starting point).
4232 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4233 * Complex operations are decomposed in simpler ones right away. We need to let the
4234 * arch-specific code peek and poke inside this process somehow (except when the
4235 * optimizations can take advantage of the full semantic info of coarse opcodes).
4236 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4237 * MonoInst->opcode initially is the IL opcode or some simplification of that
4238 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4239 * opcode with value bigger than OP_LAST.
4240 * At this point the IR can be handed over to an interpreter, a dumb code generator
4241 * or to the optimizing code generator that will translate it to SSA form.
4243 * Profiling directed optimizations.
4244 * We may compile by default with few or no optimizations and instrument the code
4245 * or the user may indicate what methods to optimize the most either in a config file
4246 * or through repeated runs where the compiler applies offline the optimizations to
4247 * each method and then decides if it was worth it.
4250 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4251 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4252 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4253 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4254 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4255 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4256 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4257 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4259 /* offset from br.s -> br like opcodes */
4260 #define BIG_BRANCH_OFFSET 13
4263 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
4265 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
4267 return b
== NULL
|| b
== bb
;
4271 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
4273 unsigned char *ip
= start
;
4274 unsigned char *target
;
4277 MonoBasicBlock
*bblock
;
4278 const MonoOpcode
*opcode
;
4281 cli_addr
= ip
- start
;
4282 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
4285 opcode
= &mono_opcodes
[i
];
4286 switch (opcode
->argument
) {
4287 case MonoInlineNone
:
4290 case MonoInlineString
:
4291 case MonoInlineType
:
4292 case MonoInlineField
:
4293 case MonoInlineMethod
:
4296 case MonoShortInlineR
:
4303 case MonoShortInlineVar
:
4304 case MonoShortInlineI
:
4307 case MonoShortInlineBrTarget
:
4308 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
4309 GET_BBLOCK (cfg
, bblock
, target
);
4312 GET_BBLOCK (cfg
, bblock
, ip
);
4314 case MonoInlineBrTarget
:
4315 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
4316 GET_BBLOCK (cfg
, bblock
, target
);
4319 GET_BBLOCK (cfg
, bblock
, ip
);
4321 case MonoInlineSwitch
: {
4322 guint32 n
= read32 (ip
+ 1);
4325 cli_addr
+= 5 + 4 * n
;
4326 target
= start
+ cli_addr
;
4327 GET_BBLOCK (cfg
, bblock
, target
);
4329 for (j
= 0; j
< n
; ++j
) {
4330 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
4331 GET_BBLOCK (cfg
, bblock
, target
);
4341 g_assert_not_reached ();
4344 if (i
== CEE_THROW
) {
4345 unsigned char *bb_start
= ip
- 1;
4347 /* Find the start of the bblock containing the throw */
4349 while ((bb_start
>= start
) && !bblock
) {
4350 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
4354 bblock
->out_of_line
= 1;
4363 static inline MonoMethod
*
4364 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4368 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
4369 return mono_method_get_wrapper_data (m
, token
);
4371 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
4376 static inline MonoMethod
*
4377 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4379 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
4381 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
4387 static inline MonoClass
*
4388 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
4392 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
4393 klass
= mono_method_get_wrapper_data (method
, token
);
4395 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
4397 mono_class_init (klass
);
4402 * Returns TRUE if the JIT should abort inlining because "callee"
4403 * is influenced by security attributes.
4406 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
4410 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
4414 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
4415 if (result
== MONO_JIT_SECURITY_OK
)
4418 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
4419 /* Generate code to throw a SecurityException before the actual call/link */
4420 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4423 NEW_ICONST (cfg
, args
[0], 4);
4424 NEW_METHODCONST (cfg
, args
[1], caller
);
4425 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
4426 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
4427 /* don't hide previous results */
4428 cfg
->exception_type
= MONO_EXCEPTION_SECURITY_LINKDEMAND
;
4429 cfg
->exception_data
= result
;
4437 method_access_exception (void)
4439 static MonoMethod
*method
= NULL
;
4442 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4443 method
= mono_class_get_method_from_name (secman
->securitymanager
,
4444 "MethodAccessException", 2);
4451 emit_throw_method_access_exception (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
4452 MonoBasicBlock
*bblock
, unsigned char *ip
)
4454 MonoMethod
*thrower
= method_access_exception ();
4457 EMIT_NEW_METHODCONST (cfg
, args
[0], caller
);
4458 EMIT_NEW_METHODCONST (cfg
, args
[1], callee
);
4459 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4463 verification_exception (void)
4465 static MonoMethod
*method
= NULL
;
4468 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4469 method
= mono_class_get_method_from_name (secman
->securitymanager
,
4470 "VerificationException", 0);
4477 emit_throw_verification_exception (MonoCompile
*cfg
, MonoBasicBlock
*bblock
, unsigned char *ip
)
4479 MonoMethod
*thrower
= verification_exception ();
4481 mono_emit_method_call (cfg
, thrower
, NULL
, NULL
);
4485 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
4486 MonoBasicBlock
*bblock
, unsigned char *ip
)
4488 MonoSecurityCoreCLRLevel caller_level
= mono_security_core_clr_method_level (caller
, TRUE
);
4489 MonoSecurityCoreCLRLevel callee_level
= mono_security_core_clr_method_level (callee
, TRUE
);
4490 gboolean is_safe
= TRUE
;
4492 if (!(caller_level
>= callee_level
||
4493 caller_level
== MONO_SECURITY_CORE_CLR_SAFE_CRITICAL
||
4494 callee_level
== MONO_SECURITY_CORE_CLR_SAFE_CRITICAL
)) {
4499 emit_throw_method_access_exception (cfg
, caller
, callee
, bblock
, ip
);
4503 method_is_safe (MonoMethod
*method
)
4506 if (strcmp (method->name, "unsafeMethod") == 0)
4513 * Check that the IL instructions at ip are the array initialization
4514 * sequence and return the pointer to the data and the size.
4517 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
4520 * newarr[System.Int32]
4522 * ldtoken field valuetype ...
4523 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4525 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
4526 guint32 token
= read32 (ip
+ 7);
4527 guint32 field_token
= read32 (ip
+ 2);
4528 guint32 field_index
= field_token
& 0xffffff;
4530 const char *data_ptr
;
4532 MonoMethod
*cmethod
;
4533 MonoClass
*dummy_class
;
4534 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
4540 *out_field_token
= field_token
;
4542 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
4545 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
4547 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
4548 case MONO_TYPE_BOOLEAN
:
4552 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4553 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4554 case MONO_TYPE_CHAR
:
4564 return NULL
; /* stupid ARM FP swapped format */
4574 if (size
> mono_type_size (field
->type
, &dummy_align
))
4577 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4578 if (!method
->klass
->image
->dynamic
) {
4579 field_index
= read32 (ip
+ 2) & 0xffffff;
4580 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
4581 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
4582 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4583 /* for aot code we do the lookup on load */
4584 if (aot
&& data_ptr
)
4585 return GUINT_TO_POINTER (rva
);
4587 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4589 data_ptr
= mono_field_get_data (field
);
4597 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
4599 char *method_fname
= mono_method_full_name (method
, TRUE
);
4602 if (mono_method_get_header (method
)->code_size
== 0)
4603 method_code
= g_strdup ("method body is empty.");
4605 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
4606 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
4607 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
4608 g_free (method_fname
);
4609 g_free (method_code
);
4613 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
4615 cfg
->exception_type
= MONO_EXCEPTION_OBJECT_SUPPLIED
;
4616 MONO_GC_REGISTER_ROOT (cfg
->exception_ptr
);
4617 cfg
->exception_ptr
= exception
;
4621 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
4625 if (cfg
->generic_sharing_context
)
4626 type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, &klass
->byval_arg
);
4628 type
= &klass
->byval_arg
;
4629 return MONO_TYPE_IS_REFERENCE (type
);
4633 * mono_decompose_array_access_opts:
4635 * Decompose array access opcodes.
4636 * This should be in decompose.c, but it emits calls so it has to stay here until
4637 * the old JIT is gone.
4640 mono_decompose_array_access_opts (MonoCompile
*cfg
)
4642 MonoBasicBlock
*bb
, *first_bb
;
4645 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4646 * can be executed anytime. It should be run before decompose_long
4650 * Create a dummy bblock and emit code into it so we can use the normal
4651 * code generation macros.
4653 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4654 first_bb
= cfg
->cbb
;
4656 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4658 MonoInst
*prev
= NULL
;
4660 MonoInst
*iargs
[3];
4663 if (!bb
->has_array_access
)
4666 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4668 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
4674 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
4675 switch (ins
->opcode
) {
4677 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
, ins
->sreg1
,
4678 G_STRUCT_OFFSET (MonoArray
, max_length
));
4679 MONO_ADD_INS (cfg
->cbb
, dest
);
4681 case OP_BOUNDS_CHECK
:
4682 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg
, ins
->sreg1
, ins
->inst_imm
, ins
->sreg2
);
4685 if (cfg
->opt
& MONO_OPT_SHARED
) {
4686 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
4687 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], ins
->inst_newa_class
);
4688 MONO_INST_NEW (cfg
, iargs
[2], OP_MOVE
);
4689 iargs
[2]->dreg
= ins
->sreg1
;
4691 dest
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
4692 dest
->dreg
= ins
->dreg
;
4694 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, mono_array_class_get (ins
->inst_newa_class
, 1));
4697 NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4698 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
4699 MONO_INST_NEW (cfg
, iargs
[1], OP_MOVE
);
4700 iargs
[1]->dreg
= ins
->sreg1
;
4702 dest
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, iargs
);
4703 dest
->dreg
= ins
->dreg
;
4707 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
,
4708 ins
->sreg1
, G_STRUCT_OFFSET (MonoString
, length
));
4709 MONO_ADD_INS (cfg
->cbb
, dest
);
4715 g_assert (cfg
->cbb
== first_bb
);
4717 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
4718 /* Replace the original instruction with the new code sequence */
4720 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
4721 first_bb
->code
= first_bb
->last_ins
= NULL
;
4722 first_bb
->in_count
= first_bb
->out_count
= 0;
4723 cfg
->cbb
= first_bb
;
4730 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4740 #ifdef MONO_ARCH_SOFT_FLOAT
4743 * mono_decompose_soft_float:
4745 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4746 * similar to long support on 32 bit platforms. 32 bit float values require special
4747 * handling when used as locals, arguments, and in calls.
4748 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4751 mono_decompose_soft_float (MonoCompile
*cfg
)
4753 MonoBasicBlock
*bb
, *first_bb
;
4756 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4760 * Create a dummy bblock and emit code into it so we can use the normal
4761 * code generation macros.
4763 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4764 first_bb
= cfg
->cbb
;
4766 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4768 MonoInst
*prev
= NULL
;
4771 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE HANDLE-SOFT-FLOAT ");
4773 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
4779 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
4780 const char *spec
= INS_INFO (ins
->opcode
);
4782 /* Most fp operations are handled automatically by opcode emulation */
4784 switch (ins
->opcode
) {
4787 d
.vald
= *(double*)ins
->inst_p0
;
4788 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
4793 /* We load the r8 value */
4794 d
.vald
= *(float*)ins
->inst_p0
;
4795 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
4799 ins
->opcode
= OP_LMOVE
;
4802 ins
->opcode
= OP_MOVE
;
4803 ins
->sreg1
= ins
->sreg1
+ 1;
4806 ins
->opcode
= OP_MOVE
;
4807 ins
->sreg1
= ins
->sreg1
+ 2;
4810 int reg
= ins
->sreg1
;
4812 ins
->opcode
= OP_SETLRET
;
4814 ins
->sreg1
= reg
+ 1;
4815 ins
->sreg2
= reg
+ 2;
4818 case OP_LOADR8_MEMBASE
:
4819 ins
->opcode
= OP_LOADI8_MEMBASE
;
4821 case OP_STORER8_MEMBASE_REG
:
4822 ins
->opcode
= OP_STOREI8_MEMBASE_REG
;
4824 case OP_STORER4_MEMBASE_REG
: {
4825 MonoInst
*iargs
[2];
4828 /* Arg 1 is the double value */
4829 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4830 iargs
[0]->dreg
= ins
->sreg1
;
4832 /* Arg 2 is the address to store to */
4833 addr_reg
= mono_alloc_preg (cfg
);
4834 EMIT_NEW_BIALU_IMM (cfg
, iargs
[1], OP_PADD_IMM
, addr_reg
, ins
->inst_destbasereg
, ins
->inst_offset
);
4835 mono_emit_jit_icall (cfg
, mono_fstore_r4
, iargs
);
4839 case OP_LOADR4_MEMBASE
: {
4840 MonoInst
*iargs
[1];
4844 addr_reg
= mono_alloc_preg (cfg
);
4845 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, addr_reg
, ins
->inst_basereg
, ins
->inst_offset
);
4846 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
4847 conv
->dreg
= ins
->dreg
;
4852 case OP_FCALL_MEMBASE
: {
4853 MonoCallInst
*call
= (MonoCallInst
*)ins
;
4854 if (call
->signature
->ret
->type
== MONO_TYPE_R4
) {
4855 MonoCallInst
*call2
;
4856 MonoInst
*iargs
[1];
4859 /* Convert the call into a call returning an int */
4860 MONO_INST_NEW_CALL (cfg
, call2
, OP_CALL
);
4861 memcpy (call2
, call
, sizeof (MonoCallInst
));
4862 switch (ins
->opcode
) {
4864 call2
->inst
.opcode
= OP_CALL
;
4867 call2
->inst
.opcode
= OP_CALL_REG
;
4869 case OP_FCALL_MEMBASE
:
4870 call2
->inst
.opcode
= OP_CALL_MEMBASE
;
4873 g_assert_not_reached ();
4875 call2
->inst
.dreg
= mono_alloc_ireg (cfg
);
4876 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call2
);
4878 /* FIXME: Optimize this */
4880 /* Emit an r4->r8 conversion */
4881 EMIT_NEW_VARLOADA_VREG (cfg
, iargs
[0], call2
->inst
.dreg
, &mono_defaults
.int32_class
->byval_arg
);
4882 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
4883 conv
->dreg
= ins
->dreg
;
4885 switch (ins
->opcode
) {
4887 ins
->opcode
= OP_LCALL
;
4890 ins
->opcode
= OP_LCALL_REG
;
4892 case OP_FCALL_MEMBASE
:
4893 ins
->opcode
= OP_LCALL_MEMBASE
;
4896 g_assert_not_reached ();
4902 MonoJitICallInfo
*info
;
4903 MonoInst
*iargs
[2];
4904 MonoInst
*call
, *cmp
, *br
;
4906 /* Convert fcompare+fbcc to icall+icompare+beq */
4908 info
= mono_find_jit_opcode_emulation (ins
->next
->opcode
);
4911 /* Create dummy MonoInst's for the arguments */
4912 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4913 iargs
[0]->dreg
= ins
->sreg1
;
4914 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
4915 iargs
[1]->dreg
= ins
->sreg2
;
4917 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
4919 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
4920 cmp
->sreg1
= call
->dreg
;
4922 MONO_ADD_INS (cfg
->cbb
, cmp
);
4924 MONO_INST_NEW (cfg
, br
, OP_IBNE_UN
);
4925 br
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * 2);
4926 br
->inst_true_bb
= ins
->next
->inst_true_bb
;
4927 br
->inst_false_bb
= ins
->next
->inst_false_bb
;
4928 MONO_ADD_INS (cfg
->cbb
, br
);
4930 /* The call sequence might include fp ins */
4933 /* Skip fbcc or fccc */
4934 NULLIFY_INS (ins
->next
);
4942 MonoJitICallInfo
*info
;
4943 MonoInst
*iargs
[2];
4946 /* Convert fccc to icall+icompare+iceq */
4948 info
= mono_find_jit_opcode_emulation (ins
->opcode
);
4951 /* Create dummy MonoInst's for the arguments */
4952 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4953 iargs
[0]->dreg
= ins
->sreg1
;
4954 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
4955 iargs
[1]->dreg
= ins
->sreg2
;
4957 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
4959 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, call
->dreg
, 1);
4960 MONO_EMIT_NEW_UNALU (cfg
, OP_ICEQ
, ins
->dreg
, -1);
4962 /* The call sequence might include fp ins */
4967 MonoInst
*iargs
[2];
4968 MonoInst
*call
, *cmp
;
4970 /* Convert to icall+icompare+cond_exc+move */
4972 /* Create dummy MonoInst's for the arguments */
4973 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4974 iargs
[0]->dreg
= ins
->sreg1
;
4976 call
= mono_emit_jit_icall (cfg
, mono_isfinite
, iargs
);
4978 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
4979 cmp
->sreg1
= call
->dreg
;
4981 MONO_ADD_INS (cfg
->cbb
, cmp
);
4983 MONO_EMIT_NEW_COND_EXC (cfg
, INE_UN
, "ArithmeticException");
4985 /* Do the assignment if the value is finite */
4986 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, ins
->dreg
, ins
->sreg1
);
4992 if (spec
[MONO_INST_SRC1
] == 'f' || spec
[MONO_INST_SRC2
] == 'f' || spec
[MONO_INST_DEST
] == 'f') {
4993 mono_print_ins (ins
);
4994 g_assert_not_reached ();
4999 g_assert (cfg
->cbb
== first_bb
);
5001 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
5002 /* Replace the original instruction with the new code sequence */
5004 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
5005 first_bb
->code
= first_bb
->last_ins
= NULL
;
5006 first_bb
->in_count
= first_bb
->out_count
= 0;
5007 cfg
->cbb
= first_bb
;
5014 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER HANDLE-SOFT-FLOAT ");
5017 mono_decompose_long_opts (cfg
);
5023 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5026 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5027 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5028 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5029 /* Optimize reg-reg moves away */
5031 * Can't optimize other opcodes, since sp[0] might point to
5032 * the last ins of a decomposed opcode.
5034 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5036 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5041 * ldloca inhibits many optimizations so try to get rid of it in common
5044 static inline unsigned char *
5045 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5054 local
= read16 (ip
+ 2);
5058 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5059 gboolean skip
= FALSE
;
5061 /* From the INITOBJ case */
5062 token
= read32 (ip
+ 2);
5063 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5064 CHECK_TYPELOAD (klass
);
5065 if (generic_class_is_reference_type (cfg
, klass
)) {
5066 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5067 } else if (MONO_TYPE_IS_REFERENCE (&klass
->byval_arg
)) {
5068 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5069 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5070 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5083 is_exception_class (MonoClass
*class)
5086 if (class == mono_defaults
.exception_class
)
5088 class = class->parent
;
5094 * mono_method_to_ir:
5096 * Translate the .net IL into linear IR.
5099 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5100 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
5101 guint inline_offset
, gboolean is_virtual_call
)
5103 MonoInst
*ins
, **sp
, **stack_start
;
5104 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
5105 MonoMethod
*cmethod
, *method_definition
;
5106 MonoInst
**arg_array
;
5107 MonoMethodHeader
*header
;
5109 guint32 token
, ins_flag
;
5111 MonoClass
*constrained_call
= NULL
;
5112 unsigned char *ip
, *end
, *target
, *err_pos
;
5113 static double r8_0
= 0.0;
5114 MonoMethodSignature
*sig
;
5115 MonoGenericContext
*generic_context
= NULL
;
5116 MonoGenericContainer
*generic_container
= NULL
;
5117 MonoType
**param_types
;
5118 int i
, n
, start_new_bblock
, dreg
;
5119 int num_calls
= 0, inline_costs
= 0;
5120 int breakpoint_id
= 0;
5122 MonoBoolean security
, pinvoke
;
5123 MonoSecurityManager
* secman
= NULL
;
5124 MonoDeclSecurityActions actions
;
5125 GSList
*class_inits
= NULL
;
5126 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5129 /* serialization and xdomain stuff may need access to private fields and methods */
5130 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5131 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5132 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5133 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5134 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5135 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5137 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
5139 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5140 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5141 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
5142 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
5144 image
= method
->klass
->image
;
5145 header
= mono_method_get_header (method
);
5146 generic_container
= mono_method_get_generic_container (method
);
5147 sig
= mono_method_signature (method
);
5148 num_args
= sig
->hasthis
+ sig
->param_count
;
5149 ip
= (unsigned char*)header
->code
;
5150 cfg
->cil_start
= ip
;
5151 end
= ip
+ header
->code_size
;
5152 mono_jit_stats
.cil_code_size
+= header
->code_size
;
5154 method_definition
= method
;
5155 while (method_definition
->is_inflated
) {
5156 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
5157 method_definition
= imethod
->declaring
;
5160 /* SkipVerification is not allowed if core-clr is enabled */
5161 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
5163 dont_verify_stloc
= TRUE
;
5166 if (!dont_verify
&& mini_method_verify (cfg
, method_definition
))
5167 goto exception_exit
;
5169 if (mono_debug_using_mono_debugger ())
5170 cfg
->keep_cil_nops
= TRUE
;
5172 if (sig
->is_inflated
)
5173 generic_context
= mono_method_get_context (method
);
5174 else if (generic_container
)
5175 generic_context
= &generic_container
->context
;
5176 cfg
->generic_context
= generic_context
;
5178 if (!cfg
->generic_sharing_context
)
5179 g_assert (!sig
->has_type_parameters
);
5181 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5182 g_assert (method
->is_inflated
);
5183 g_assert (mono_method_get_context (method
)->method_inst
);
5185 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
5186 g_assert (sig
->generic_param_count
);
5188 if (cfg
->method
== method
) {
5189 cfg
->real_offset
= 0;
5191 cfg
->real_offset
= inline_offset
;
5194 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
5195 cfg
->cil_offset_to_bb_len
= header
->code_size
;
5197 cfg
->current_method
= method
;
5199 if (cfg
->verbose_level
> 2)
5200 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
5202 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
5204 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
5205 for (n
= 0; n
< sig
->param_count
; ++n
)
5206 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
5207 cfg
->arg_types
= param_types
;
5209 dont_inline
= g_list_prepend (dont_inline
, method
);
5210 if (cfg
->method
== method
) {
5212 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
5213 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
5216 NEW_BBLOCK (cfg
, start_bblock
);
5217 cfg
->bb_entry
= start_bblock
;
5218 start_bblock
->cil_code
= NULL
;
5219 start_bblock
->cil_length
= 0;
5222 NEW_BBLOCK (cfg
, end_bblock
);
5223 cfg
->bb_exit
= end_bblock
;
5224 end_bblock
->cil_code
= NULL
;
5225 end_bblock
->cil_length
= 0;
5226 g_assert (cfg
->num_bblocks
== 2);
5228 arg_array
= cfg
->args
;
5230 if (header
->num_clauses
) {
5231 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
5232 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
5234 /* handle exception clauses */
5235 for (i
= 0; i
< header
->num_clauses
; ++i
) {
5236 MonoBasicBlock
*try_bb
;
5237 MonoExceptionClause
*clause
= &header
->clauses
[i
];
5238 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
5239 try_bb
->real_offset
= clause
->try_offset
;
5240 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
5241 tblock
->real_offset
= clause
->handler_offset
;
5242 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5244 link_bblock (cfg
, try_bb
, tblock
);
5246 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
5247 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
5249 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
5250 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
5251 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
5252 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5253 MONO_ADD_INS (tblock
, ins
);
5255 /* todo: is a fault block unsafe to optimize? */
5256 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
5257 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
5261 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5263 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5265 /* catch and filter blocks get the exception object on the stack */
5266 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
5267 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5268 MonoInst
*dummy_use
;
5270 /* mostly like handle_stack_args (), but just sets the input args */
5271 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5272 tblock
->in_scount
= 1;
5273 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5274 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5277 * Add a dummy use for the exvar so its liveness info will be
5281 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
5283 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5284 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
5285 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5286 tblock
->real_offset
= clause
->data
.filter_offset
;
5287 tblock
->in_scount
= 1;
5288 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5289 /* The filter block shares the exvar with the handler block */
5290 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5291 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5292 MONO_ADD_INS (tblock
, ins
);
5296 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
5297 clause
->data
.catch_class
&&
5298 cfg
->generic_sharing_context
&&
5299 mono_class_check_context_used (clause
->data
.catch_class
)) {
5301 * In shared generic code with catch
5302 * clauses containing type variables
5303 * the exception handling code has to
5304 * be able to get to the rgctx.
5305 * Therefore we have to make sure that
5306 * the vtable/mrgctx argument (for
5307 * static or generic methods) or the
5308 * "this" argument (for non-static
5309 * methods) are live.
5311 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5312 mini_method_get_context (method
)->method_inst
||
5313 method
->klass
->valuetype
) {
5314 mono_get_vtable_var (cfg
);
5316 MonoInst
*dummy_use
;
5318 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
5323 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
5324 cfg
->cbb
= start_bblock
;
5325 cfg
->args
= arg_array
;
5326 mono_save_args (cfg
, sig
, inline_args
);
5329 /* FIRST CODE BLOCK */
5330 NEW_BBLOCK (cfg
, bblock
);
5331 bblock
->cil_code
= ip
;
5335 ADD_BBLOCK (cfg
, bblock
);
5337 if (cfg
->method
== method
) {
5338 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
5339 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
5340 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5341 MONO_ADD_INS (bblock
, ins
);
5345 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5346 secman
= mono_security_manager_get_methods ();
5348 security
= (secman
&& mono_method_has_declsec (method
));
5349 /* at this point having security doesn't mean we have any code to generate */
5350 if (security
&& (cfg
->method
== method
)) {
5351 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5352 * And we do not want to enter the next section (with allocation) if we
5353 * have nothing to generate */
5354 security
= mono_declsec_get_demands (method
, &actions
);
5357 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5358 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
5360 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5361 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5362 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
5364 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5365 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5369 mono_custom_attrs_free (custom
);
5372 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
5373 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5377 mono_custom_attrs_free (custom
);
5380 /* not a P/Invoke after all */
5385 if ((header
->init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
5386 /* we use a separate basic block for the initialization code */
5387 NEW_BBLOCK (cfg
, init_localsbb
);
5388 cfg
->bb_init
= init_localsbb
;
5389 init_localsbb
->real_offset
= cfg
->real_offset
;
5390 start_bblock
->next_bb
= init_localsbb
;
5391 init_localsbb
->next_bb
= bblock
;
5392 link_bblock (cfg
, start_bblock
, init_localsbb
);
5393 link_bblock (cfg
, init_localsbb
, bblock
);
5395 cfg
->cbb
= init_localsbb
;
5397 start_bblock
->next_bb
= bblock
;
5398 link_bblock (cfg
, start_bblock
, bblock
);
5401 /* at this point we know, if security is TRUE, that some code needs to be generated */
5402 if (security
&& (cfg
->method
== method
)) {
5405 mono_jit_stats
.cas_demand_generation
++;
5407 if (actions
.demand
.blob
) {
5408 /* Add code for SecurityAction.Demand */
5409 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
5410 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
5411 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5412 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5414 if (actions
.noncasdemand
.blob
) {
5415 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5416 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5417 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
5418 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
5419 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5420 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5422 if (actions
.demandchoice
.blob
) {
5423 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5424 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
5425 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
5426 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5427 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
5431 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5433 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
5436 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
5437 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
5438 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5439 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5440 if (!(method
->klass
&& method
->klass
->image
&&
5441 mono_security_core_clr_is_platform_image (method
->klass
->image
))) {
5442 emit_throw_method_access_exception (cfg
, method
, wrapped
, bblock
, ip
);
5446 if (!method_is_safe (method
))
5447 emit_throw_verification_exception (cfg
, bblock
, ip
);
5450 if (header
->code_size
== 0)
5453 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
5458 if (cfg
->method
== method
)
5459 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
5461 for (n
= 0; n
< header
->num_locals
; ++n
) {
5462 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
5467 /* We force the vtable variable here for all shared methods
5468 for the possibility that they might show up in a stack
5469 trace where their exact instantiation is needed. */
5470 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
5471 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5472 mini_method_get_context (method
)->method_inst
||
5473 method
->klass
->valuetype
) {
5474 mono_get_vtable_var (cfg
);
5476 /* FIXME: Is there a better way to do this?
5477 We need the variable live for the duration
5478 of the whole method. */
5479 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
5483 /* add a check for this != NULL to inlined methods */
5484 if (is_virtual_call
) {
5487 NEW_ARGLOAD (cfg
, arg_ins
, 0);
5488 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
5489 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
5490 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, arg_ins
->dreg
);
5491 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, arg_ins
->dreg
);
5494 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5495 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
5498 start_new_bblock
= 0;
5502 if (cfg
->method
== method
)
5503 cfg
->real_offset
= ip
- header
->code
;
5505 cfg
->real_offset
= inline_offset
;
5510 if (start_new_bblock
) {
5511 bblock
->cil_length
= ip
- bblock
->cil_code
;
5512 if (start_new_bblock
== 2) {
5513 g_assert (ip
== tblock
->cil_code
);
5515 GET_BBLOCK (cfg
, tblock
, ip
);
5517 bblock
->next_bb
= tblock
;
5520 start_new_bblock
= 0;
5521 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5522 if (cfg
->verbose_level
> 3)
5523 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5524 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5528 g_slist_free (class_inits
);
5531 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
5532 link_bblock (cfg
, bblock
, tblock
);
5533 if (sp
!= stack_start
) {
5534 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
5536 CHECK_UNVERIFIABLE (cfg
);
5538 bblock
->next_bb
= tblock
;
5541 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5542 if (cfg
->verbose_level
> 3)
5543 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5544 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5547 g_slist_free (class_inits
);
5552 bblock
->real_offset
= cfg
->real_offset
;
5554 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
5555 guint32 cil_offset
= ip
- header
->code
;
5556 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
5558 /* TODO: Use an increment here */
5559 #if defined(__i386__)
5560 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
5561 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
5563 MONO_ADD_INS (cfg
->cbb
, ins
);
5565 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
5566 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
5570 if (cfg
->verbose_level
> 3)
5571 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
5575 if (cfg
->keep_cil_nops
)
5576 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
5578 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5580 MONO_ADD_INS (bblock
, ins
);
5583 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5585 MONO_ADD_INS (bblock
, ins
);
5591 CHECK_STACK_OVF (1);
5592 n
= (*ip
)-CEE_LDARG_0
;
5594 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5602 CHECK_STACK_OVF (1);
5603 n
= (*ip
)-CEE_LDLOC_0
;
5605 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5614 n
= (*ip
)-CEE_STLOC_0
;
5617 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
5619 emit_stloc_ir (cfg
, sp
, header
, n
);
5626 CHECK_STACK_OVF (1);
5629 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5635 CHECK_STACK_OVF (1);
5638 NEW_ARGLOADA (cfg
, ins
, n
);
5639 MONO_ADD_INS (cfg
->cbb
, ins
);
5649 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
5651 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
5656 CHECK_STACK_OVF (1);
5659 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5663 case CEE_LDLOCA_S
: {
5664 unsigned char *tmp_ip
;
5666 CHECK_STACK_OVF (1);
5667 CHECK_LOCAL (ip
[1]);
5669 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
5675 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
5684 CHECK_LOCAL (ip
[1]);
5685 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
5687 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
5692 CHECK_STACK_OVF (1);
5693 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
5694 ins
->type
= STACK_OBJ
;
5699 CHECK_STACK_OVF (1);
5700 EMIT_NEW_ICONST (cfg
, ins
, -1);
5713 CHECK_STACK_OVF (1);
5714 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
5720 CHECK_STACK_OVF (1);
5722 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
5728 CHECK_STACK_OVF (1);
5729 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
5735 CHECK_STACK_OVF (1);
5736 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
5737 ins
->type
= STACK_I8
;
5738 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
5740 ins
->inst_l
= (gint64
)read64 (ip
);
5741 MONO_ADD_INS (bblock
, ins
);
5747 /* FIXME: we should really allocate this only late in the compilation process */
5748 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
5750 CHECK_STACK_OVF (1);
5751 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
5752 ins
->type
= STACK_R8
;
5753 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
5757 MONO_ADD_INS (bblock
, ins
);
5765 /* FIXME: we should really allocate this only late in the compilation process */
5766 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
5768 CHECK_STACK_OVF (1);
5769 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
5770 ins
->type
= STACK_R8
;
5771 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
5775 MONO_ADD_INS (bblock
, ins
);
5782 MonoInst
*temp
, *store
;
5784 CHECK_STACK_OVF (1);
5788 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
5789 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
5791 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
5794 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
5807 if (sp
[0]->type
== STACK_R8
)
5808 /* we need to pop the value from the x86 FP stack */
5809 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
5818 if (stack_start
!= sp
)
5820 token
= read32 (ip
+ 1);
5821 /* FIXME: check the signature matches */
5822 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
5827 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
5828 GENERIC_SHARING_FAILURE (CEE_JMP
);
5830 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5831 CHECK_CFG_EXCEPTION
;
5835 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
5838 /* Handle tail calls similarly to calls */
5839 n
= fsig
->param_count
+ fsig
->hasthis
;
5841 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
5842 call
->method
= cmethod
;
5843 call
->tail_call
= TRUE
;
5844 call
->signature
= mono_method_signature (cmethod
);
5845 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
5846 call
->inst
.inst_p0
= cmethod
;
5847 for (i
= 0; i
< n
; ++i
)
5848 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
5850 mono_arch_emit_call (cfg
, call
);
5851 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
5854 for (i
= 0; i
< num_args
; ++i
)
5855 /* Prevent arguments from being optimized away */
5856 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
5858 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
5859 ins
= (MonoInst
*)call
;
5860 ins
->inst_p0
= cmethod
;
5861 MONO_ADD_INS (bblock
, ins
);
5865 start_new_bblock
= 1;
5870 case CEE_CALLVIRT
: {
5871 MonoInst
*addr
= NULL
;
5872 MonoMethodSignature
*fsig
= NULL
;
5874 int virtual = *ip
== CEE_CALLVIRT
;
5875 int calli
= *ip
== CEE_CALLI
;
5876 gboolean pass_imt_from_rgctx
= FALSE
;
5877 MonoInst
*imt_arg
= NULL
;
5878 gboolean pass_vtable
= FALSE
;
5879 gboolean pass_mrgctx
= FALSE
;
5880 MonoInst
*vtable_arg
= NULL
;
5881 gboolean check_this
= FALSE
;
5884 token
= read32 (ip
+ 1);
5891 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
5892 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
5894 fsig
= mono_metadata_parse_signature (image
, token
);
5896 n
= fsig
->param_count
+ fsig
->hasthis
;
5898 MonoMethod
*cil_method
;
5900 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
5901 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
5902 cil_method
= cmethod
;
5903 } else if (constrained_call
) {
5904 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
5906 * This is needed since get_method_constrained can't find
5907 * the method in klass representing a type var.
5908 * The type var is guaranteed to be a reference type in this
5911 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
5912 cil_method
= cmethod
;
5913 g_assert (!cmethod
->klass
->valuetype
);
5915 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
5918 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
5919 cil_method
= cmethod
;
5924 if (!dont_verify
&& !cfg
->skip_visibility
) {
5925 MonoMethod
*target_method
= cil_method
;
5926 if (method
->is_inflated
) {
5927 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
5929 if (!mono_method_can_access_method (method_definition
, target_method
) &&
5930 !mono_method_can_access_method (method
, cil_method
))
5931 METHOD_ACCESS_FAILURE
;
5934 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
5935 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
5937 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
5938 /* MS.NET seems to silently convert this to a callvirt */
5941 if (!cmethod
->klass
->inited
)
5942 if (!mono_class_init (cmethod
->klass
))
5945 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
5946 mini_class_is_system_array (cmethod
->klass
)) {
5947 array_rank
= cmethod
->klass
->rank
;
5948 fsig
= mono_method_signature (cmethod
);
5950 if (mono_method_signature (cmethod
)->pinvoke
) {
5951 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
5952 check_for_pending_exc
, FALSE
);
5953 fsig
= mono_method_signature (wrapper
);
5954 } else if (constrained_call
) {
5955 fsig
= mono_method_signature (cmethod
);
5957 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
5961 mono_save_token_info (cfg
, image
, token
, cil_method
);
5963 n
= fsig
->param_count
+ fsig
->hasthis
;
5965 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
5966 if (check_linkdemand (cfg
, method
, cmethod
))
5968 CHECK_CFG_EXCEPTION
;
5971 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
5972 g_assert_not_reached ();
5975 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
5978 if (!cfg
->generic_sharing_context
&& cmethod
)
5979 g_assert (!mono_method_check_context_used (cmethod
));
5983 //g_assert (!virtual || fsig->hasthis);
5987 if (constrained_call
) {
5989 * We have the `constrained.' prefix opcode.
5991 if (constrained_call
->valuetype
&& !cmethod
->klass
->valuetype
) {
5995 * The type parameter is instantiated as a valuetype,
5996 * but that type doesn't override the method we're
5997 * calling, so we need to box `this'.
5999 dreg
= alloc_dreg (cfg
, STACK_VTYPE
);
6000 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADV_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6001 ins
->klass
= constrained_call
;
6002 sp
[0] = handle_box (cfg
, ins
, constrained_call
);
6003 } else if (!constrained_call
->valuetype
) {
6004 int dreg
= alloc_preg (cfg
);
6007 * The type parameter is instantiated as a reference
6008 * type. We have a managed pointer on the stack, so
6009 * we need to dereference it here.
6011 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6012 ins
->type
= STACK_OBJ
;
6014 } else if (cmethod
->klass
->valuetype
)
6016 constrained_call
= NULL
;
6019 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
6023 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
6024 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
6025 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6026 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
6027 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6030 * Pass vtable iff target method might
6031 * be shared, which means that sharing
6032 * is enabled for its class and its
6033 * context is sharable (and it's not a
6036 if (sharing_enabled
&& context_sharable
&&
6037 !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
6041 if (cmethod
&& mini_method_get_context (cmethod
) &&
6042 mini_method_get_context (cmethod
)->method_inst
) {
6043 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6044 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
6045 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6047 g_assert (!pass_vtable
);
6049 if (sharing_enabled
&& context_sharable
)
6053 if (cfg
->generic_sharing_context
&& cmethod
) {
6054 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
6056 context_used
= mono_method_check_context_used (cmethod
);
6058 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
6059 /* Generic method interface
6060 calls are resolved via a
6061 helper function and don't
6063 if (!cmethod_context
|| !cmethod_context
->method_inst
)
6064 pass_imt_from_rgctx
= TRUE
;
6068 * If a shared method calls another
6069 * shared method then the caller must
6070 * have a generic sharing context
6071 * because the magic trampoline
6072 * requires it. FIXME: We shouldn't
6073 * have to force the vtable/mrgctx
6074 * variable here. Instead there
6075 * should be a flag in the cfg to
6076 * request a generic sharing context.
6079 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
6080 mono_get_vtable_var (cfg
);
6085 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
6087 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
6089 CHECK_TYPELOAD (cmethod
->klass
);
6090 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
6095 g_assert (!vtable_arg
);
6098 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
6100 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
6103 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
6104 MONO_METHOD_IS_FINAL (cmethod
)) {
6111 if (pass_imt_from_rgctx
) {
6112 g_assert (!pass_vtable
);
6115 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6116 cmethod
, MONO_RGCTX_INFO_METHOD
);
6122 MONO_INST_NEW (cfg
, check
, OP_CHECK_THIS
);
6123 check
->sreg1
= sp
[0]->dreg
;
6124 MONO_ADD_INS (cfg
->cbb
, check
);
6127 /* Calling virtual generic methods */
6128 if (cmethod
&& virtual &&
6129 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
6130 !(MONO_METHOD_IS_FINAL (cmethod
) &&
6131 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
6132 mono_method_signature (cmethod
)->generic_param_count
) {
6133 MonoInst
*this_temp
, *this_arg_temp
, *store
;
6134 MonoInst
*iargs
[4];
6136 g_assert (mono_method_signature (cmethod
)->is_inflated
);
6138 /* Prevent inlining of methods that contain indirect calls */
6141 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6142 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
) {
6143 g_assert (!imt_arg
);
6145 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6146 cmethod
, MONO_RGCTX_INFO_METHOD
);
6149 g_assert (cmethod
->is_inflated
);
6150 EMIT_NEW_METHODCONST (cfg
, imt_arg
, cmethod
);
6152 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
);
6156 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
6157 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
6158 MONO_ADD_INS (bblock
, store
);
6160 /* FIXME: This should be a managed pointer */
6161 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
6163 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
6165 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
6166 cmethod
, MONO_RGCTX_INFO_METHOD
);
6167 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6168 addr
= mono_emit_jit_icall (cfg
,
6169 mono_helper_compile_generic_method
, iargs
);
6171 EMIT_NEW_METHODCONST (cfg
, iargs
[1], cmethod
);
6172 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6173 addr
= mono_emit_jit_icall (cfg
, mono_helper_compile_generic_method
, iargs
);
6176 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
6178 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6181 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6190 /* FIXME: runtime generic context pointer for jumps? */
6191 /* FIXME: handle this for generic sharing eventually */
6192 if ((ins_flag
& MONO_INST_TAILCALL
) && !cfg
->generic_sharing_context
&& !vtable_arg
&& cmethod
&& (*ip
== CEE_CALL
) &&
6193 (mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)))) {
6196 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6199 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6200 call
->tail_call
= TRUE
;
6201 call
->method
= cmethod
;
6202 call
->signature
= mono_method_signature (cmethod
);
6205 /* Handle tail calls similarly to calls */
6206 call
->inst
.opcode
= OP_TAILCALL
;
6208 mono_arch_emit_call (cfg
, call
);
6211 * We implement tail calls by storing the actual arguments into the
6212 * argument variables, then emitting a CEE_JMP.
6214 for (i
= 0; i
< n
; ++i
) {
6215 /* Prevent argument from being register allocated */
6216 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6217 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6221 ins
= (MonoInst
*)call
;
6222 ins
->inst_p0
= cmethod
;
6223 ins
->inst_p1
= arg_array
[0];
6224 MONO_ADD_INS (bblock
, ins
);
6225 link_bblock (cfg
, bblock
, end_bblock
);
6226 start_new_bblock
= 1;
6227 /* skip CEE_RET as well */
6233 /* Conversion to a JIT intrinsic */
6234 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
6235 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6236 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
6247 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
6248 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
6249 mono_method_check_inlining (cfg
, cmethod
) &&
6250 !g_list_find (dont_inline
, cmethod
)) {
6252 gboolean allways
= FALSE
;
6254 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
6255 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
6256 /* Prevent inlining of methods that call wrappers */
6258 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
6262 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, allways
))) {
6264 cfg
->real_offset
+= 5;
6267 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6268 /* *sp is already set by inline_method */
6271 inline_costs
+= costs
;
6277 inline_costs
+= 10 * num_calls
++;
6279 /* Tail recursion elimination */
6280 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
6281 gboolean has_vtargs
= FALSE
;
6284 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6287 /* keep it simple */
6288 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
6289 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
6294 for (i
= 0; i
< n
; ++i
)
6295 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6296 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6297 MONO_ADD_INS (bblock
, ins
);
6298 tblock
= start_bblock
->out_bb
[0];
6299 link_bblock (cfg
, bblock
, tblock
);
6300 ins
->inst_target_bb
= tblock
;
6301 start_new_bblock
= 1;
6303 /* skip the CEE_RET, too */
6304 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
6314 /* Generic sharing */
6315 /* FIXME: only do this for generic methods if
6316 they are not shared! */
6317 if (context_used
&& !imt_arg
&& !array_rank
&&
6318 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
6319 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
6320 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
6321 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
6324 g_assert (cfg
->generic_sharing_context
&& cmethod
);
6328 * We are compiling a call to a
6329 * generic method from shared code,
6330 * which means that we have to look up
6331 * the method in the rgctx and do an
6334 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
6337 /* Indirect calls */
6339 g_assert (!imt_arg
);
6341 if (*ip
== CEE_CALL
)
6342 g_assert (context_used
);
6343 else if (*ip
== CEE_CALLI
)
6344 g_assert (!vtable_arg
);
6346 /* FIXME: what the hell is this??? */
6347 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
6348 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
6350 /* Prevent inlining of methods with indirect calls */
6354 #ifdef MONO_ARCH_RGCTX_REG
6356 int rgctx_reg
= mono_alloc_preg (cfg
);
6358 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
6359 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6360 call
= (MonoCallInst
*)ins
;
6361 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
6362 cfg
->uses_rgctx_reg
= TRUE
;
6367 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6369 * Instead of emitting an indirect call, emit a direct call
6370 * with the contents of the aotconst as the patch info.
6372 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
6375 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6378 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6379 if (fsig
->pinvoke
&& !fsig
->ret
->byref
) {
6383 * Native code might return non register sized integers
6384 * without initializing the upper bits.
6386 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
6387 case OP_LOADI1_MEMBASE
:
6388 widen_op
= OP_ICONV_TO_I1
;
6390 case OP_LOADU1_MEMBASE
:
6391 widen_op
= OP_ICONV_TO_U1
;
6393 case OP_LOADI2_MEMBASE
:
6394 widen_op
= OP_ICONV_TO_I2
;
6396 case OP_LOADU2_MEMBASE
:
6397 widen_op
= OP_ICONV_TO_U2
;
6403 if (widen_op
!= -1) {
6404 int dreg
= alloc_preg (cfg
);
6407 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
6408 widen
->type
= ins
->type
;
6425 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
6426 if (sp
[fsig
->param_count
]->type
== STACK_OBJ
) {
6427 MonoInst
*iargs
[2];
6430 iargs
[1] = sp
[fsig
->param_count
];
6432 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
6435 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
6436 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, sp
[fsig
->param_count
]->dreg
);
6437 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
6438 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6440 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
6443 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
6444 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
6445 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
6448 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6451 g_assert_not_reached ();
6459 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
6461 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6472 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
6474 } else if (imt_arg
) {
6475 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, imt_arg
);
6477 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, NULL
);
6480 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6488 if (cfg
->method
!= method
) {
6489 /* return from inlined method */
6491 * If in_count == 0, that means the ret is unreachable due to
6492 * being preceeded by a throw. In that case, inline_method () will
6493 * handle setting the return value
6494 * (test case: test_0_inline_throw ()).
6496 if (return_var
&& cfg
->cbb
->in_count
) {
6500 //g_assert (returnvar != -1);
6501 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
6502 cfg
->ret_var_set
= TRUE
;
6506 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
6508 g_assert (!return_var
);
6511 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
6514 if (!cfg
->vret_addr
) {
6517 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
6519 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
6521 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
6522 ins
->klass
= mono_class_from_mono_type (ret_type
);
6525 #ifdef MONO_ARCH_SOFT_FLOAT
6526 if (!ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
6527 MonoInst
*iargs
[1];
6531 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
6532 mono_arch_emit_setret (cfg
, method
, conv
);
6534 mono_arch_emit_setret (cfg
, method
, *sp
);
6537 mono_arch_emit_setret (cfg
, method
, *sp
);
6542 if (sp
!= stack_start
)
6544 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6546 ins
->inst_target_bb
= end_bblock
;
6547 MONO_ADD_INS (bblock
, ins
);
6548 link_bblock (cfg
, bblock
, end_bblock
);
6549 start_new_bblock
= 1;
6553 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6555 target
= ip
+ 1 + (signed char)(*ip
);
6557 GET_BBLOCK (cfg
, tblock
, target
);
6558 link_bblock (cfg
, bblock
, tblock
);
6559 ins
->inst_target_bb
= tblock
;
6560 if (sp
!= stack_start
) {
6561 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6563 CHECK_UNVERIFIABLE (cfg
);
6565 MONO_ADD_INS (bblock
, ins
);
6566 start_new_bblock
= 1;
6567 inline_costs
+= BRANCH_COST
;
6581 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
6583 target
= ip
+ 1 + *(signed char*)ip
;
6589 inline_costs
+= BRANCH_COST
;
6593 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6596 target
= ip
+ 4 + (gint32
)read32(ip
);
6598 GET_BBLOCK (cfg
, tblock
, target
);
6599 link_bblock (cfg
, bblock
, tblock
);
6600 ins
->inst_target_bb
= tblock
;
6601 if (sp
!= stack_start
) {
6602 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6604 CHECK_UNVERIFIABLE (cfg
);
6607 MONO_ADD_INS (bblock
, ins
);
6609 start_new_bblock
= 1;
6610 inline_costs
+= BRANCH_COST
;
6617 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
6618 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
6619 guint32 opsize
= is_short
? 1 : 4;
6621 CHECK_OPSIZE (opsize
);
6623 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
6626 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
6631 GET_BBLOCK (cfg
, tblock
, target
);
6632 link_bblock (cfg
, bblock
, tblock
);
6633 GET_BBLOCK (cfg
, tblock
, ip
);
6634 link_bblock (cfg
, bblock
, tblock
);
6636 if (sp
!= stack_start
) {
6637 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6638 CHECK_UNVERIFIABLE (cfg
);
6641 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
6642 cmp
->sreg1
= sp
[0]->dreg
;
6643 type_from_op (cmp
, sp
[0], NULL
);
6646 #if SIZEOF_REGISTER == 4
6647 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
6648 /* Convert it to OP_LCOMPARE */
6649 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6650 ins
->type
= STACK_I8
;
6651 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6653 MONO_ADD_INS (bblock
, ins
);
6654 cmp
->opcode
= OP_LCOMPARE
;
6655 cmp
->sreg2
= ins
->dreg
;
6658 MONO_ADD_INS (bblock
, cmp
);
6660 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
6661 type_from_op (ins
, sp
[0], NULL
);
6662 MONO_ADD_INS (bblock
, ins
);
6663 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
6664 GET_BBLOCK (cfg
, tblock
, target
);
6665 ins
->inst_true_bb
= tblock
;
6666 GET_BBLOCK (cfg
, tblock
, ip
);
6667 ins
->inst_false_bb
= tblock
;
6668 start_new_bblock
= 2;
6671 inline_costs
+= BRANCH_COST
;
6686 MONO_INST_NEW (cfg
, ins
, *ip
);
6688 target
= ip
+ 4 + (gint32
)read32(ip
);
6694 inline_costs
+= BRANCH_COST
;
6698 MonoBasicBlock
**targets
;
6699 MonoBasicBlock
*default_bblock
;
6700 MonoJumpInfoBBTable
*table
;
6701 int offset_reg
= alloc_preg (cfg
);
6702 int target_reg
= alloc_preg (cfg
);
6703 int table_reg
= alloc_preg (cfg
);
6704 int sum_reg
= alloc_preg (cfg
);
6705 gboolean use_op_switch
;
6709 n
= read32 (ip
+ 1);
6712 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
6716 CHECK_OPSIZE (n
* sizeof (guint32
));
6717 target
= ip
+ n
* sizeof (guint32
);
6719 GET_BBLOCK (cfg
, default_bblock
, target
);
6721 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
6722 for (i
= 0; i
< n
; ++i
) {
6723 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
6724 targets
[i
] = tblock
;
6728 if (sp
!= stack_start
) {
6730 * Link the current bb with the targets as well, so handle_stack_args
6731 * will set their in_stack correctly.
6733 link_bblock (cfg
, bblock
, default_bblock
);
6734 for (i
= 0; i
< n
; ++i
)
6735 link_bblock (cfg
, bblock
, targets
[i
]);
6737 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6739 CHECK_UNVERIFIABLE (cfg
);
6742 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
6743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
6746 for (i
= 0; i
< n
; ++i
)
6747 link_bblock (cfg
, bblock
, targets
[i
]);
6749 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
6750 table
->table
= targets
;
6751 table
->table_size
= n
;
6753 use_op_switch
= FALSE
;
6755 /* ARM implements SWITCH statements differently */
6756 /* FIXME: Make it use the generic implementation */
6757 if (!cfg
->compile_aot
)
6758 use_op_switch
= TRUE
;
6761 if (use_op_switch
) {
6762 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
6763 ins
->sreg1
= src1
->dreg
;
6764 ins
->inst_p0
= table
;
6765 ins
->inst_many_bb
= targets
;
6766 ins
->klass
= GUINT_TO_POINTER (n
);
6767 MONO_ADD_INS (cfg
->cbb
, ins
);
6769 if (sizeof (gpointer
) == 8)
6770 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
6772 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
6774 #if SIZEOF_REGISTER == 8
6775 /* The upper word might not be zero, and we add it to a 64 bit address later */
6776 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
6779 if (cfg
->compile_aot
) {
6780 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
6782 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
6783 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
6784 ins
->inst_p0
= table
;
6785 ins
->dreg
= table_reg
;
6786 MONO_ADD_INS (cfg
->cbb
, ins
);
6789 /* FIXME: Use load_memindex */
6790 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
6791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
6792 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
6794 start_new_bblock
= 1;
6795 inline_costs
+= (BRANCH_COST
* 2);
6815 dreg
= alloc_freg (cfg
);
6818 dreg
= alloc_lreg (cfg
);
6821 dreg
= alloc_preg (cfg
);
6824 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
6825 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
6826 ins
->flags
|= ins_flag
;
6828 MONO_ADD_INS (bblock
, ins
);
6843 #if HAVE_WRITE_BARRIERS
6844 if (*ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0))) {
6845 /* insert call to write barrier */
6846 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
6847 mono_emit_method_call (cfg
, write_barrier
, sp
, NULL
);
6854 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
6855 ins
->flags
|= ins_flag
;
6857 MONO_ADD_INS (bblock
, ins
);
6865 MONO_INST_NEW (cfg
, ins
, (*ip
));
6867 ins
->sreg1
= sp
[0]->dreg
;
6868 ins
->sreg2
= sp
[1]->dreg
;
6869 type_from_op (ins
, sp
[0], sp
[1]);
6871 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
6873 /* Use the immediate opcodes if possible */
6874 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
6875 int imm_opcode
= mono_op_to_op_imm (ins
->opcode
);
6876 if (imm_opcode
!= -1) {
6877 ins
->opcode
= imm_opcode
;
6878 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
6881 sp
[1]->opcode
= OP_NOP
;
6885 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
6888 mono_decompose_opcode (cfg
, ins
);
6905 MONO_INST_NEW (cfg
, ins
, (*ip
));
6907 ins
->sreg1
= sp
[0]->dreg
;
6908 ins
->sreg2
= sp
[1]->dreg
;
6909 type_from_op (ins
, sp
[0], sp
[1]);
6911 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
6912 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
6914 /* FIXME: Pass opcode to is_inst_imm */
6916 /* Use the immediate opcodes if possible */
6917 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
6920 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
6921 if (imm_opcode
!= -1) {
6922 ins
->opcode
= imm_opcode
;
6923 if (sp
[1]->opcode
== OP_I8CONST
) {
6924 #if SIZEOF_REGISTER == 8
6925 ins
->inst_imm
= sp
[1]->inst_l
;
6927 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
6928 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
6932 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
6935 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6936 if (sp
[1]->next
== NULL
)
6937 sp
[1]->opcode
= OP_NOP
;
6940 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
6943 mono_decompose_opcode (cfg
, ins
);
6956 case CEE_CONV_OVF_I8
:
6957 case CEE_CONV_OVF_U8
:
6961 /* Special case this earlier so we have long constants in the IR */
6962 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
6963 int data
= sp
[-1]->inst_c0
;
6964 sp
[-1]->opcode
= OP_I8CONST
;
6965 sp
[-1]->type
= STACK_I8
;
6966 #if SIZEOF_REGISTER == 8
6967 if ((*ip
) == CEE_CONV_U8
)
6968 sp
[-1]->inst_c0
= (guint32
)data
;
6970 sp
[-1]->inst_c0
= data
;
6972 sp
[-1]->inst_ls_word
= data
;
6973 if ((*ip
) == CEE_CONV_U8
)
6974 sp
[-1]->inst_ms_word
= 0;
6976 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
6978 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
6985 case CEE_CONV_OVF_I4
:
6986 case CEE_CONV_OVF_I1
:
6987 case CEE_CONV_OVF_I2
:
6988 case CEE_CONV_OVF_I
:
6989 case CEE_CONV_OVF_U
:
6992 if (sp
[-1]->type
== STACK_R8
) {
6993 ADD_UNOP (CEE_CONV_OVF_I8
);
7000 case CEE_CONV_OVF_U1
:
7001 case CEE_CONV_OVF_U2
:
7002 case CEE_CONV_OVF_U4
:
7005 if (sp
[-1]->type
== STACK_R8
) {
7006 ADD_UNOP (CEE_CONV_OVF_U8
);
7013 case CEE_CONV_OVF_I1_UN
:
7014 case CEE_CONV_OVF_I2_UN
:
7015 case CEE_CONV_OVF_I4_UN
:
7016 case CEE_CONV_OVF_I8_UN
:
7017 case CEE_CONV_OVF_U1_UN
:
7018 case CEE_CONV_OVF_U2_UN
:
7019 case CEE_CONV_OVF_U4_UN
:
7020 case CEE_CONV_OVF_U8_UN
:
7021 case CEE_CONV_OVF_I_UN
:
7022 case CEE_CONV_OVF_U_UN
:
7032 case CEE_ADD_OVF_UN
:
7034 case CEE_MUL_OVF_UN
:
7036 case CEE_SUB_OVF_UN
:
7044 token
= read32 (ip
+ 1);
7045 klass
= mini_get_class (method
, token
, generic_context
);
7046 CHECK_TYPELOAD (klass
);
7048 if (generic_class_is_reference_type (cfg
, klass
)) {
7049 MonoInst
*store
, *load
;
7050 int dreg
= alloc_preg (cfg
);
7052 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
7053 load
->flags
|= ins_flag
;
7054 MONO_ADD_INS (cfg
->cbb
, load
);
7056 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
7057 store
->flags
|= ins_flag
;
7058 MONO_ADD_INS (cfg
->cbb
, store
);
7060 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7072 token
= read32 (ip
+ 1);
7073 klass
= mini_get_class (method
, token
, generic_context
);
7074 CHECK_TYPELOAD (klass
);
7076 /* Optimize the common ldobj+stloc combination */
7086 loc_index
= ip
[5] - CEE_STLOC_0
;
7093 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
7094 CHECK_LOCAL (loc_index
);
7096 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7097 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
7103 /* Optimize the ldobj+stobj combination */
7104 /* The reference case ends up being a load+store anyway */
7105 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 9) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
7110 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7117 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7126 CHECK_STACK_OVF (1);
7128 n
= read32 (ip
+ 1);
7130 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
7131 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
7132 ins
->type
= STACK_OBJ
;
7135 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7136 MonoInst
*iargs
[1];
7138 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
7139 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
7141 if (cfg
->opt
& MONO_OPT_SHARED
) {
7142 MonoInst
*iargs
[3];
7144 if (cfg
->compile_aot
) {
7145 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
7147 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7148 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
7149 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
7150 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
7151 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7153 if (bblock
->out_of_line
) {
7154 MonoInst
*iargs
[2];
7156 if (image
== mono_defaults
.corlib
) {
7158 * Avoid relocations in AOT and save some space by using a
7159 * version of helper_ldstr specialized to mscorlib.
7161 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
7162 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
7164 /* Avoid creating the string object */
7165 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
7166 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
7167 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
7171 if (cfg
->compile_aot
) {
7172 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
7174 MONO_ADD_INS (bblock
, ins
);
7177 NEW_PCONST (cfg
, ins
, NULL
);
7178 ins
->type
= STACK_OBJ
;
7179 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7181 MONO_ADD_INS (bblock
, ins
);
7190 MonoInst
*iargs
[2];
7191 MonoMethodSignature
*fsig
;
7194 MonoInst
*vtable_arg
= NULL
;
7197 token
= read32 (ip
+ 1);
7198 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7201 fsig
= mono_method_get_signature (cmethod
, image
, token
);
7203 mono_save_token_info (cfg
, image
, token
, cmethod
);
7205 if (!mono_class_init (cmethod
->klass
))
7208 if (cfg
->generic_sharing_context
)
7209 context_used
= mono_method_check_context_used (cmethod
);
7211 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
7212 if (check_linkdemand (cfg
, method
, cmethod
))
7214 CHECK_CFG_EXCEPTION
;
7215 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
7216 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
7219 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
7220 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7221 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
7223 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
7224 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7226 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
7230 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
7231 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7233 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7235 CHECK_TYPELOAD (cmethod
->klass
);
7236 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7241 n
= fsig
->param_count
;
7245 * Generate smaller code for the common newobj <exception> instruction in
7246 * argument checking code.
7248 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
7249 is_exception_class (cmethod
->klass
) && n
<= 2 &&
7250 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
7251 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
7252 MonoInst
*iargs
[3];
7254 g_assert (!vtable_arg
);
7258 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
7261 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
7265 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
7270 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
7273 g_assert_not_reached ();
7281 /* move the args to allow room for 'this' in the first position */
7287 /* check_call_signature () requires sp[0] to be set */
7288 this_ins
.type
= STACK_OBJ
;
7290 if (check_call_signature (cfg
, fsig
, sp
))
7295 if (mini_class_is_system_array (cmethod
->klass
)) {
7296 g_assert (!vtable_arg
);
7299 *sp
= emit_get_rgctx_method (cfg
, context_used
,
7300 cmethod
, MONO_RGCTX_INFO_METHOD
);
7302 EMIT_NEW_METHODCONST (cfg
, *sp
, cmethod
);
7305 /* Avoid varargs in the common case */
7306 if (fsig
->param_count
== 1)
7307 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
7308 else if (fsig
->param_count
== 2)
7309 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
7311 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
7312 } else if (cmethod
->string_ctor
) {
7313 g_assert (!context_used
);
7314 g_assert (!vtable_arg
);
7315 /* we simply pass a null pointer */
7316 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
7317 /* now call the string ctor */
7318 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
);
7320 MonoInst
* callvirt_this_arg
= NULL
;
7322 if (cmethod
->klass
->valuetype
) {
7323 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
7324 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
7325 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
7330 * The code generated by mini_emit_virtual_call () expects
7331 * iargs [0] to be a boxed instance, but luckily the vcall
7332 * will be transformed into a normal call there.
7334 } else if (context_used
) {
7338 if (cfg
->opt
& MONO_OPT_SHARED
)
7339 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7341 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7342 data
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, rgctx_info
);
7344 alloc
= handle_alloc_from_inst (cfg
, cmethod
->klass
, data
, FALSE
);
7347 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7349 CHECK_TYPELOAD (cmethod
->klass
);
7352 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7353 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7354 * As a workaround, we call class cctors before allocating objects.
7356 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
7357 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
7358 if (cfg
->verbose_level
> 2)
7359 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
7360 class_inits
= g_slist_prepend (class_inits
, vtable
);
7363 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
);
7368 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
7370 /* Now call the actual ctor */
7371 /* Avoid virtual calls to ctors if possible */
7372 if (cmethod
->klass
->marshalbyref
)
7373 callvirt_this_arg
= sp
[0];
7375 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
7376 mono_method_check_inlining (cfg
, cmethod
) &&
7377 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
7378 !g_list_find (dont_inline
, cmethod
)) {
7381 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
7382 cfg
->real_offset
+= 5;
7385 inline_costs
+= costs
- 5;
7388 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
);
7390 } else if (context_used
&&
7391 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
7392 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
7393 MonoInst
*cmethod_addr
;
7395 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
7396 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7398 mono_emit_rgctx_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
7401 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
,
7402 callvirt_this_arg
, NULL
, vtable_arg
);
7403 if (mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && ((MonoCallInst
*)ins
)->method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)
7404 GENERIC_SHARING_FAILURE (*ip
);
7408 if (alloc
== NULL
) {
7410 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
7411 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
7425 token
= read32 (ip
+ 1);
7426 klass
= mini_get_class (method
, token
, generic_context
);
7427 CHECK_TYPELOAD (klass
);
7428 if (sp
[0]->type
!= STACK_OBJ
)
7431 if (cfg
->generic_sharing_context
)
7432 context_used
= mono_class_check_context_used (klass
);
7441 args
[1] = emit_get_rgctx_klass (cfg
, context_used
,
7442 klass
, MONO_RGCTX_INFO_KLASS
);
7444 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
7448 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7449 MonoMethod
*mono_castclass
;
7450 MonoInst
*iargs
[1];
7453 mono_castclass
= mono_marshal_get_castclass (klass
);
7456 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7457 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7458 g_assert (costs
> 0);
7461 cfg
->real_offset
+= 5;
7466 inline_costs
+= costs
;
7469 ins
= handle_castclass (cfg
, klass
, *sp
);
7479 token
= read32 (ip
+ 1);
7480 klass
= mini_get_class (method
, token
, generic_context
);
7481 CHECK_TYPELOAD (klass
);
7482 if (sp
[0]->type
!= STACK_OBJ
)
7485 if (cfg
->generic_sharing_context
)
7486 context_used
= mono_class_check_context_used (klass
);
7495 args
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7497 *sp
= mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
7501 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7502 MonoMethod
*mono_isinst
;
7503 MonoInst
*iargs
[1];
7506 mono_isinst
= mono_marshal_get_isinst (klass
);
7509 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
7510 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7511 g_assert (costs
> 0);
7514 cfg
->real_offset
+= 5;
7519 inline_costs
+= costs
;
7522 ins
= handle_isinst (cfg
, klass
, *sp
);
7529 case CEE_UNBOX_ANY
: {
7533 token
= read32 (ip
+ 1);
7534 klass
= mini_get_class (method
, token
, generic_context
);
7535 CHECK_TYPELOAD (klass
);
7537 mono_save_token_info (cfg
, image
, token
, klass
);
7539 if (cfg
->generic_sharing_context
)
7540 context_used
= mono_class_check_context_used (klass
);
7542 if (generic_class_is_reference_type (cfg
, klass
)) {
7545 MonoInst
*iargs
[2];
7550 iargs
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7551 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, iargs
);
7555 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7556 MonoMethod
*mono_castclass
;
7557 MonoInst
*iargs
[1];
7560 mono_castclass
= mono_marshal_get_castclass (klass
);
7563 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7564 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7566 g_assert (costs
> 0);
7569 cfg
->real_offset
+= 5;
7573 inline_costs
+= costs
;
7575 ins
= handle_castclass (cfg
, klass
, *sp
);
7583 if (mono_class_is_nullable (klass
)) {
7584 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7591 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7610 token
= read32 (ip
+ 1);
7611 klass
= mini_get_class (method
, token
, generic_context
);
7612 CHECK_TYPELOAD (klass
);
7614 mono_save_token_info (cfg
, image
, token
, klass
);
7616 if (cfg
->generic_sharing_context
)
7617 context_used
= mono_class_check_context_used (klass
);
7619 if (generic_class_is_reference_type (cfg
, klass
)) {
7625 if (klass
== mono_defaults
.void_class
)
7627 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
7629 /* frequent check in generic code: box (struct), brtrue */
7630 if (!mono_class_is_nullable (klass
) &&
7631 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) && (ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
)) {
7632 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7634 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7635 if (*ip
== CEE_BRTRUE_S
) {
7638 target
= ip
+ 1 + (signed char)(*ip
);
7643 target
= ip
+ 4 + (gint
)(read32 (ip
));
7646 GET_BBLOCK (cfg
, tblock
, target
);
7647 link_bblock (cfg
, bblock
, tblock
);
7648 ins
->inst_target_bb
= tblock
;
7649 GET_BBLOCK (cfg
, tblock
, ip
);
7651 * This leads to some inconsistency, since the two bblocks are
7652 * not really connected, but it is needed for handling stack
7653 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7654 * FIXME: This should only be needed if sp != stack_start, but that
7655 * doesn't work for some reason (test failure in mcs/tests on x86).
7657 link_bblock (cfg
, bblock
, tblock
);
7658 if (sp
!= stack_start
) {
7659 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7661 CHECK_UNVERIFIABLE (cfg
);
7663 MONO_ADD_INS (bblock
, ins
);
7664 start_new_bblock
= 1;
7672 if (cfg
->opt
& MONO_OPT_SHARED
)
7673 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7675 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7676 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
7677 *sp
++ = handle_box_from_inst (cfg
, val
, klass
, context_used
, data
);
7679 *sp
++ = handle_box (cfg
, val
, klass
);
7690 token
= read32 (ip
+ 1);
7691 klass
= mini_get_class (method
, token
, generic_context
);
7692 CHECK_TYPELOAD (klass
);
7694 mono_save_token_info (cfg
, image
, token
, klass
);
7696 if (cfg
->generic_sharing_context
)
7697 context_used
= mono_class_check_context_used (klass
);
7699 if (mono_class_is_nullable (klass
)) {
7702 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7703 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
7707 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7717 MonoClassField
*field
;
7721 if (*ip
== CEE_STFLD
) {
7728 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
7730 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
7733 token
= read32 (ip
+ 1);
7734 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7735 field
= mono_method_get_wrapper_data (method
, token
);
7736 klass
= field
->parent
;
7739 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
7743 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
7744 FIELD_ACCESS_FAILURE
;
7745 mono_class_init (klass
);
7747 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
7748 if (*ip
== CEE_STFLD
) {
7749 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
7751 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
7752 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
7753 MonoInst
*iargs
[5];
7756 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
7757 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
7758 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
7762 if (cfg
->opt
& MONO_OPT_INLINE
) {
7763 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
7764 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7765 g_assert (costs
> 0);
7767 cfg
->real_offset
+= 5;
7770 inline_costs
+= costs
;
7772 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
7777 #if HAVE_WRITE_BARRIERS
7778 if (mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
7779 /* insert call to write barrier */
7780 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
7781 MonoInst
*iargs
[2];
7784 dreg
= alloc_preg (cfg
);
7785 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
7787 mono_emit_method_call (cfg
, write_barrier
, iargs
, NULL
);
7791 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
7793 store
->flags
|= ins_flag
;
7800 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
7801 MonoMethod
*wrapper
= (*ip
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
7802 MonoInst
*iargs
[4];
7805 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
7806 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
7807 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
7808 if ((cfg
->opt
& MONO_OPT_INLINE
) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper
)->ret
)) {
7809 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
7810 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7812 g_assert (costs
> 0);
7814 cfg
->real_offset
+= 5;
7818 inline_costs
+= costs
;
7820 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
7824 if (sp
[0]->type
== STACK_VTYPE
) {
7827 /* Have to compute the address of the variable */
7829 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
7831 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
7833 g_assert (var
->klass
== klass
);
7835 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
7839 if (*ip
== CEE_LDFLDA
) {
7840 dreg
= alloc_preg (cfg
);
7842 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
7843 ins
->klass
= mono_class_from_mono_type (field
->type
);
7844 ins
->type
= STACK_MP
;
7849 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
7850 load
->flags
|= ins_flag
;
7861 MonoClassField
*field
;
7862 gpointer addr
= NULL
;
7863 gboolean is_special_static
;
7866 token
= read32 (ip
+ 1);
7868 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7869 field
= mono_method_get_wrapper_data (method
, token
);
7870 klass
= field
->parent
;
7873 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
7876 mono_class_init (klass
);
7877 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
7878 FIELD_ACCESS_FAILURE
;
7881 * We can only support shared generic static
7882 * field access on architectures where the
7883 * trampoline code has been extended to handle
7884 * the generic class init.
7886 #ifndef MONO_ARCH_VTABLE_REG
7887 GENERIC_SHARING_FAILURE (*ip
);
7890 if (cfg
->generic_sharing_context
)
7891 context_used
= mono_class_check_context_used (klass
);
7893 g_assert (!(field
->type
->attrs
& FIELD_ATTRIBUTE_LITERAL
));
7895 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7896 * to be called here.
7898 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
7899 mono_class_vtable (cfg
->domain
, klass
);
7900 CHECK_TYPELOAD (klass
);
7902 mono_domain_lock (cfg
->domain
);
7903 if (cfg
->domain
->special_static_fields
)
7904 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
7905 mono_domain_unlock (cfg
->domain
);
7907 is_special_static
= mono_class_field_is_special_static (field
);
7909 /* Generate IR to compute the field address */
7911 if ((cfg
->opt
& MONO_OPT_SHARED
) ||
7912 (cfg
->compile_aot
&& is_special_static
) ||
7913 (context_used
&& is_special_static
)) {
7914 MonoInst
*iargs
[2];
7916 g_assert (field
->parent
);
7917 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7919 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
7920 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
7922 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
7924 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
7925 } else if (context_used
) {
7926 MonoInst
*static_data
;
7929 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7930 method->klass->name_space, method->klass->name, method->name,
7931 depth, field->offset);
7934 if (mono_class_needs_cctor_run (klass
, method
)) {
7938 vtable
= emit_get_rgctx_klass (cfg
, context_used
,
7939 klass
, MONO_RGCTX_INFO_VTABLE
);
7941 // FIXME: This doesn't work since it tries to pass the argument
7942 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7944 * The vtable pointer is always passed in a register regardless of
7945 * the calling convention, so assign it manually, and make a call
7946 * using a signature without parameters.
7948 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable
);
7949 #ifdef MONO_ARCH_VTABLE_REG
7950 mono_call_inst_add_outarg_reg (cfg
, call
, vtable
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
7951 cfg
->uses_vtable_reg
= TRUE
;
7958 * The pointer we're computing here is
7960 * super_info.static_data + field->offset
7962 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
7963 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
7965 if (field
->offset
== 0) {
7968 int addr_reg
= mono_alloc_preg (cfg
);
7969 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
7971 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
7972 MonoInst
*iargs
[2];
7974 g_assert (field
->parent
);
7975 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7976 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
7977 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
7979 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
7981 CHECK_TYPELOAD (klass
);
7983 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
7984 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
7985 if (cfg
->verbose_level
> 2)
7986 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
7987 class_inits
= g_slist_prepend (class_inits
, vtable
);
7989 if (cfg
->run_cctors
) {
7991 /* This makes so that inline cannot trigger */
7992 /* .cctors: too many apps depend on them */
7993 /* running with a specific order... */
7994 if (! vtable
->initialized
)
7996 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
7998 set_exception_object (cfg
, ex
);
7999 goto exception_exit
;
8003 addr
= (char*)vtable
->data
+ field
->offset
;
8005 if (cfg
->compile_aot
)
8006 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8008 EMIT_NEW_PCONST (cfg
, ins
, addr
);
8011 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8012 * This could be later optimized to do just a couple of
8013 * memory dereferences with constant offsets.
8015 MonoInst
*iargs
[1];
8016 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
8017 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
8021 /* Generate IR to do the actual load/store operation */
8023 if (*ip
== CEE_LDSFLDA
) {
8024 ins
->klass
= mono_class_from_mono_type (field
->type
);
8025 ins
->type
= STACK_PTR
;
8027 } else if (*ip
== CEE_STSFLD
) {
8032 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, ins
->dreg
, 0, sp
[0]->dreg
);
8033 store
->flags
|= ins_flag
;
8035 gboolean is_const
= FALSE
;
8036 MonoVTable
*vtable
= NULL
;
8038 if (!context_used
) {
8039 vtable
= mono_class_vtable (cfg
->domain
, klass
);
8040 CHECK_TYPELOAD (klass
);
8042 if (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) &&
8043 vtable
->initialized
&& (field
->type
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
)) {
8044 gpointer addr
= (char*)vtable
->data
+ field
->offset
;
8045 int ro_type
= field
->type
->type
;
8046 if (ro_type
== MONO_TYPE_VALUETYPE
&& field
->type
->data
.klass
->enumtype
) {
8047 ro_type
= mono_class_enum_basetype (field
->type
->data
.klass
)->type
;
8049 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8052 case MONO_TYPE_BOOLEAN
:
8054 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
8058 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
8061 case MONO_TYPE_CHAR
:
8063 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
8067 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
8072 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
8076 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
8079 #ifndef HAVE_MOVING_COLLECTOR
8082 case MONO_TYPE_STRING
:
8083 case MONO_TYPE_OBJECT
:
8084 case MONO_TYPE_CLASS
:
8085 case MONO_TYPE_SZARRAY
:
8087 case MONO_TYPE_FNPTR
:
8088 case MONO_TYPE_ARRAY
:
8089 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8090 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8096 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
8101 case MONO_TYPE_VALUETYPE
:
8111 CHECK_STACK_OVF (1);
8113 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
8114 load
->flags
|= ins_flag
;
8127 token
= read32 (ip
+ 1);
8128 klass
= mini_get_class (method
, token
, generic_context
);
8129 CHECK_TYPELOAD (klass
);
8130 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8131 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
8142 const char *data_ptr
;
8144 guint32 field_token
;
8150 token
= read32 (ip
+ 1);
8152 klass
= mini_get_class (method
, token
, generic_context
);
8153 CHECK_TYPELOAD (klass
);
8155 if (cfg
->generic_sharing_context
)
8156 context_used
= mono_class_check_context_used (klass
);
8161 /* FIXME: Decompose later to help abcrem */
8164 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
8165 mono_array_class_get (klass
, 1), MONO_RGCTX_INFO_VTABLE
);
8170 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
8172 if (cfg
->opt
& MONO_OPT_SHARED
) {
8173 /* Decompose now to avoid problems with references to the domainvar */
8174 MonoInst
*iargs
[3];
8176 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8177 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8180 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
8182 /* Decompose later since it is needed by abcrem */
8183 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
8184 ins
->dreg
= alloc_preg (cfg
);
8185 ins
->sreg1
= sp
[0]->dreg
;
8186 ins
->inst_newa_class
= klass
;
8187 ins
->type
= STACK_OBJ
;
8189 MONO_ADD_INS (cfg
->cbb
, ins
);
8190 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8191 cfg
->cbb
->has_array_access
= TRUE
;
8193 /* Needed so mono_emit_load_get_addr () gets called */
8194 mono_get_got_var (cfg
);
8204 * we inline/optimize the initialization sequence if possible.
8205 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8206 * for small sizes open code the memcpy
8207 * ensure the rva field is big enough
8209 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
8210 MonoMethod
*memcpy_method
= get_memcpy_method ();
8211 MonoInst
*iargs
[3];
8212 int add_reg
= alloc_preg (cfg
);
8214 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
8215 if (cfg
->compile_aot
) {
8216 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
8218 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
8220 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
8221 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
8230 if (sp
[0]->type
!= STACK_OBJ
)
8233 dreg
= alloc_preg (cfg
);
8234 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
8235 ins
->dreg
= alloc_preg (cfg
);
8236 ins
->sreg1
= sp
[0]->dreg
;
8237 ins
->type
= STACK_I4
;
8238 MONO_ADD_INS (cfg
->cbb
, ins
);
8239 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8240 cfg
->cbb
->has_array_access
= TRUE
;
8248 if (sp
[0]->type
!= STACK_OBJ
)
8251 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8253 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
8254 CHECK_TYPELOAD (klass
);
8255 /* we need to make sure that this array is exactly the type it needs
8256 * to be for correctness. the wrappers are lax with their usage
8257 * so we need to ignore them here
8259 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
)
8260 mini_emit_check_array_type (cfg
, sp
[0], mono_array_class_get (klass
, 1));
8263 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8267 case CEE_LDELEM_ANY
:
8278 case CEE_LDELEM_REF
: {
8284 if (*ip
== CEE_LDELEM_ANY
) {
8286 token
= read32 (ip
+ 1);
8287 klass
= mini_get_class (method
, token
, generic_context
);
8288 CHECK_TYPELOAD (klass
);
8289 mono_class_init (klass
);
8292 klass
= array_access_to_klass (*ip
);
8294 if (sp
[0]->type
!= STACK_OBJ
)
8297 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8299 if (sp
[1]->opcode
== OP_ICONST
) {
8300 int array_reg
= sp
[0]->dreg
;
8301 int index_reg
= sp
[1]->dreg
;
8302 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8304 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8305 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
8307 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8308 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
8311 if (*ip
== CEE_LDELEM_ANY
)
8324 case CEE_STELEM_REF
:
8325 case CEE_STELEM_ANY
: {
8331 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8333 if (*ip
== CEE_STELEM_ANY
) {
8335 token
= read32 (ip
+ 1);
8336 klass
= mini_get_class (method
, token
, generic_context
);
8337 CHECK_TYPELOAD (klass
);
8338 mono_class_init (klass
);
8341 klass
= array_access_to_klass (*ip
);
8343 if (sp
[0]->type
!= STACK_OBJ
)
8346 /* storing a NULL doesn't need any of the complex checks in stelemref */
8347 if (generic_class_is_reference_type (cfg
, klass
) &&
8348 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
8349 MonoMethod
* helper
= mono_marshal_get_stelemref ();
8350 MonoInst
*iargs
[3];
8352 if (sp
[0]->type
!= STACK_OBJ
)
8354 if (sp
[2]->type
!= STACK_OBJ
)
8361 mono_emit_method_call (cfg
, helper
, iargs
, NULL
);
8363 if (sp
[1]->opcode
== OP_ICONST
) {
8364 int array_reg
= sp
[0]->dreg
;
8365 int index_reg
= sp
[1]->dreg
;
8366 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8368 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8369 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
8371 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8372 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
8376 if (*ip
== CEE_STELEM_ANY
)
8383 case CEE_CKFINITE
: {
8387 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
8388 ins
->sreg1
= sp
[0]->dreg
;
8389 ins
->dreg
= alloc_freg (cfg
);
8390 ins
->type
= STACK_R8
;
8391 MONO_ADD_INS (bblock
, ins
);
8394 mono_decompose_opcode (cfg
, ins
);
8399 case CEE_REFANYVAL
: {
8400 MonoInst
*src_var
, *src
;
8402 int klass_reg
= alloc_preg (cfg
);
8403 int dreg
= alloc_preg (cfg
);
8406 MONO_INST_NEW (cfg
, ins
, *ip
);
8409 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8410 CHECK_TYPELOAD (klass
);
8411 mono_class_init (klass
);
8413 if (cfg
->generic_sharing_context
)
8414 context_used
= mono_class_check_context_used (klass
);
8417 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8419 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8420 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
8421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
8424 MonoInst
*klass_ins
;
8426 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
8427 klass
, MONO_RGCTX_INFO_KLASS
);
8430 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
8431 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
8433 mini_emit_class_check (cfg
, klass_reg
, klass
);
8435 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
8436 ins
->type
= STACK_MP
;
8441 case CEE_MKREFANY
: {
8442 MonoInst
*loc
, *addr
;
8445 MONO_INST_NEW (cfg
, ins
, *ip
);
8448 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8449 CHECK_TYPELOAD (klass
);
8450 mono_class_init (klass
);
8452 if (cfg
->generic_sharing_context
)
8453 context_used
= mono_class_check_context_used (klass
);
8455 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
8456 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
8459 MonoInst
*const_ins
;
8460 int type_reg
= alloc_preg (cfg
);
8462 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
8463 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
8464 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8465 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8466 } else if (cfg
->compile_aot
) {
8467 int const_reg
= alloc_preg (cfg
);
8468 int type_reg
= alloc_preg (cfg
);
8470 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
8471 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
8472 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8473 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8475 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
8476 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
8478 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
8480 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
8481 ins
->type
= STACK_VTYPE
;
8482 ins
->klass
= mono_defaults
.typed_reference_class
;
8489 MonoClass
*handle_class
;
8491 CHECK_STACK_OVF (1);
8494 n
= read32 (ip
+ 1);
8496 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
8497 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
8498 handle
= mono_method_get_wrapper_data (method
, n
);
8499 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
8500 if (handle_class
== mono_defaults
.typehandle_class
)
8501 handle
= &((MonoClass
*)handle
)->byval_arg
;
8504 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
8508 mono_class_init (handle_class
);
8509 if (cfg
->generic_sharing_context
) {
8510 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
8511 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
8512 /* This case handles ldtoken
8513 of an open type, like for
8516 } else if (handle_class
== mono_defaults
.typehandle_class
) {
8517 /* If we get a MONO_TYPE_CLASS
8518 then we need to provide the
8520 instantiation of it. */
8521 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
8524 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
8525 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
8526 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
8527 else if (handle_class
== mono_defaults
.methodhandle_class
)
8528 context_used
= mono_method_check_context_used (handle
);
8530 g_assert_not_reached ();
8533 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
8534 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
8535 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
8536 MonoInst
*addr
, *vtvar
, *iargs
[3];
8537 int method_context_used
;
8539 if (cfg
->generic_sharing_context
)
8540 method_context_used
= mono_method_check_context_used (method
);
8542 method_context_used
= 0;
8544 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8546 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
8547 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
8548 if (method_context_used
) {
8549 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
8550 method
, MONO_RGCTX_INFO_METHOD
);
8551 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
8553 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
8554 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
8556 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8558 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8560 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8562 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
8563 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
8564 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
8565 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
8566 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
8567 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
8569 mono_class_init (tclass
);
8571 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8572 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
8573 } else if (cfg
->compile_aot
) {
8574 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
8576 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
8578 ins
->type
= STACK_OBJ
;
8579 ins
->klass
= cmethod
->klass
;
8582 MonoInst
*addr
, *vtvar
;
8584 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8587 if (handle_class
== mono_defaults
.typehandle_class
) {
8588 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8589 mono_class_from_mono_type (handle
),
8590 MONO_RGCTX_INFO_TYPE
);
8591 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
8592 ins
= emit_get_rgctx_method (cfg
, context_used
,
8593 handle
, MONO_RGCTX_INFO_METHOD
);
8594 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
8595 ins
= emit_get_rgctx_field (cfg
, context_used
,
8596 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
8598 g_assert_not_reached ();
8600 } else if (cfg
->compile_aot
) {
8601 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
8603 EMIT_NEW_PCONST (cfg
, ins
, handle
);
8605 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8606 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8607 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8617 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
8619 ins
->sreg1
= sp
[0]->dreg
;
8621 bblock
->out_of_line
= TRUE
;
8622 MONO_ADD_INS (bblock
, ins
);
8623 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
8624 MONO_ADD_INS (bblock
, ins
);
8627 link_bblock (cfg
, bblock
, end_bblock
);
8628 start_new_bblock
= 1;
8630 case CEE_ENDFINALLY
:
8631 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
8632 MONO_ADD_INS (bblock
, ins
);
8634 start_new_bblock
= 1;
8637 * Control will leave the method so empty the stack, otherwise
8638 * the next basic block will start with a nonempty stack.
8640 while (sp
!= stack_start
) {
8648 if (*ip
== CEE_LEAVE
) {
8650 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
8653 target
= ip
+ 2 + (signed char)(ip
[1]);
8656 /* empty the stack */
8657 while (sp
!= stack_start
) {
8662 * If this leave statement is in a catch block, check for a
8663 * pending exception, and rethrow it if necessary.
8665 for (i
= 0; i
< header
->num_clauses
; ++i
) {
8666 MonoExceptionClause
*clause
= &header
->clauses
[i
];
8669 * Use <= in the final comparison to handle clauses with multiple
8670 * leave statements, like in bug #78024.
8671 * The ordering of the exception clauses guarantees that we find the
8674 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
)) {
8676 MonoBasicBlock
*dont_throw
;
8681 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8684 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
8686 NEW_BBLOCK (cfg
, dont_throw
);
8689 * Currently, we allways rethrow the abort exception, despite the
8690 * fact that this is not correct. See thread6.cs for an example.
8691 * But propagating the abort exception is more important than
8692 * getting the sematics right.
8694 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
8695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
8696 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
8698 MONO_START_BB (cfg
, dont_throw
);
8703 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
8705 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
8707 link_bblock (cfg
, bblock
, tblock
);
8708 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
8709 ins
->inst_target_bb
= tblock
;
8710 MONO_ADD_INS (bblock
, ins
);
8712 g_list_free (handlers
);
8715 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8716 MONO_ADD_INS (bblock
, ins
);
8717 GET_BBLOCK (cfg
, tblock
, target
);
8718 link_bblock (cfg
, bblock
, tblock
);
8719 ins
->inst_target_bb
= tblock
;
8720 start_new_bblock
= 1;
8722 if (*ip
== CEE_LEAVE
)
8731 * Mono specific opcodes
8733 case MONO_CUSTOM_PREFIX
: {
8735 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
8739 case CEE_MONO_ICALL
: {
8741 MonoJitICallInfo
*info
;
8743 token
= read32 (ip
+ 2);
8744 func
= mono_method_get_wrapper_data (method
, token
);
8745 info
= mono_find_jit_icall_by_addr (func
);
8748 CHECK_STACK (info
->sig
->param_count
);
8749 sp
-= info
->sig
->param_count
;
8751 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
8752 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
8756 inline_costs
+= 10 * num_calls
++;
8760 case CEE_MONO_LDPTR
: {
8763 CHECK_STACK_OVF (1);
8765 token
= read32 (ip
+ 2);
8767 ptr
= mono_method_get_wrapper_data (method
, token
);
8768 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
8769 MonoJitICallInfo
*callinfo
;
8770 const char *icall_name
;
8772 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
8773 g_assert (icall_name
);
8774 callinfo
= mono_find_jit_icall_by_name (icall_name
);
8775 g_assert (callinfo
);
8777 if (ptr
== callinfo
->func
) {
8778 /* Will be transformed into an AOTCONST later */
8779 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8785 /* FIXME: Generalize this */
8786 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
8787 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
8792 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8795 inline_costs
+= 10 * num_calls
++;
8796 /* Can't embed random pointers into AOT code */
8797 cfg
->disable_aot
= 1;
8800 case CEE_MONO_ICALL_ADDR
: {
8801 MonoMethod
*cmethod
;
8804 CHECK_STACK_OVF (1);
8806 token
= read32 (ip
+ 2);
8808 cmethod
= mono_method_get_wrapper_data (method
, token
);
8810 if (cfg
->compile_aot
) {
8811 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
8813 ptr
= mono_lookup_internal_call (cmethod
);
8815 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8821 case CEE_MONO_VTADDR
: {
8822 MonoInst
*src_var
, *src
;
8828 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8829 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
8834 case CEE_MONO_NEWOBJ
: {
8835 MonoInst
*iargs
[2];
8837 CHECK_STACK_OVF (1);
8839 token
= read32 (ip
+ 2);
8840 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
8841 mono_class_init (klass
);
8842 NEW_DOMAINCONST (cfg
, iargs
[0]);
8843 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
8844 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8845 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
8846 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
8848 inline_costs
+= 10 * num_calls
++;
8851 case CEE_MONO_OBJADDR
:
8854 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
8855 ins
->dreg
= alloc_preg (cfg
);
8856 ins
->sreg1
= sp
[0]->dreg
;
8857 ins
->type
= STACK_MP
;
8858 MONO_ADD_INS (cfg
->cbb
, ins
);
8862 case CEE_MONO_LDNATIVEOBJ
:
8864 * Similar to LDOBJ, but instead load the unmanaged
8865 * representation of the vtype to the stack.
8870 token
= read32 (ip
+ 2);
8871 klass
= mono_method_get_wrapper_data (method
, token
);
8872 g_assert (klass
->valuetype
);
8873 mono_class_init (klass
);
8876 MonoInst
*src
, *dest
, *temp
;
8879 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
8880 temp
->backend
.is_pinvoke
= 1;
8881 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
8882 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
8884 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
8885 dest
->type
= STACK_VTYPE
;
8886 dest
->klass
= klass
;
8892 case CEE_MONO_RETOBJ
: {
8894 * Same as RET, but return the native representation of a vtype
8897 g_assert (cfg
->ret
);
8898 g_assert (mono_method_signature (method
)->pinvoke
);
8903 token
= read32 (ip
+ 2);
8904 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
8906 if (!cfg
->vret_addr
) {
8907 g_assert (cfg
->ret_var_is_local
);
8909 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
8911 EMIT_NEW_RETLOADA (cfg
, ins
);
8913 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
8915 if (sp
!= stack_start
)
8918 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8919 ins
->inst_target_bb
= end_bblock
;
8920 MONO_ADD_INS (bblock
, ins
);
8921 link_bblock (cfg
, bblock
, end_bblock
);
8922 start_new_bblock
= 1;
8926 case CEE_MONO_CISINST
:
8927 case CEE_MONO_CCASTCLASS
: {
8932 token
= read32 (ip
+ 2);
8933 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
8934 if (ip
[1] == CEE_MONO_CISINST
)
8935 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
8937 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
8943 case CEE_MONO_SAVE_LMF
:
8944 case CEE_MONO_RESTORE_LMF
:
8945 #ifdef MONO_ARCH_HAVE_LMF_OPS
8946 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
8947 MONO_ADD_INS (bblock
, ins
);
8948 cfg
->need_lmf_area
= TRUE
;
8952 case CEE_MONO_CLASSCONST
:
8953 CHECK_STACK_OVF (1);
8955 token
= read32 (ip
+ 2);
8956 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
8959 inline_costs
+= 10 * num_calls
++;
8961 case CEE_MONO_NOT_TAKEN
:
8962 bblock
->out_of_line
= TRUE
;
8966 CHECK_STACK_OVF (1);
8968 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
8969 ins
->dreg
= alloc_preg (cfg
);
8970 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
8971 ins
->type
= STACK_PTR
;
8972 MONO_ADD_INS (bblock
, ins
);
8977 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
8987 /* somewhat similar to LDTOKEN */
8988 MonoInst
*addr
, *vtvar
;
8989 CHECK_STACK_OVF (1);
8990 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
8992 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8993 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
8995 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8996 ins
->type
= STACK_VTYPE
;
8997 ins
->klass
= mono_defaults
.argumenthandle_class
;
9010 * The following transforms:
9011 * CEE_CEQ into OP_CEQ
9012 * CEE_CGT into OP_CGT
9013 * CEE_CGT_UN into OP_CGT_UN
9014 * CEE_CLT into OP_CLT
9015 * CEE_CLT_UN into OP_CLT_UN
9017 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
9019 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
9021 cmp
->sreg1
= sp
[0]->dreg
;
9022 cmp
->sreg2
= sp
[1]->dreg
;
9023 type_from_op (cmp
, sp
[0], sp
[1]);
9025 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
9026 cmp
->opcode
= OP_LCOMPARE
;
9027 else if (sp
[0]->type
== STACK_R8
)
9028 cmp
->opcode
= OP_FCOMPARE
;
9030 cmp
->opcode
= OP_ICOMPARE
;
9031 MONO_ADD_INS (bblock
, cmp
);
9032 ins
->type
= STACK_I4
;
9033 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
9034 type_from_op (ins
, sp
[0], sp
[1]);
9036 if (cmp
->opcode
== OP_FCOMPARE
) {
9038 * The backends expect the fceq opcodes to do the
9041 cmp
->opcode
= OP_NOP
;
9042 ins
->sreg1
= cmp
->sreg1
;
9043 ins
->sreg2
= cmp
->sreg2
;
9045 MONO_ADD_INS (bblock
, ins
);
9052 MonoMethod
*cil_method
;
9053 gboolean needs_static_rgctx_invoke
;
9055 CHECK_STACK_OVF (1);
9057 n
= read32 (ip
+ 2);
9058 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9061 mono_class_init (cmethod
->klass
);
9063 mono_save_token_info (cfg
, image
, n
, cmethod
);
9065 if (cfg
->generic_sharing_context
)
9066 context_used
= mono_method_check_context_used (cmethod
);
9068 needs_static_rgctx_invoke
= mono_method_needs_static_rgctx_invoke (cmethod
, TRUE
);
9070 cil_method
= cmethod
;
9071 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
9072 METHOD_ACCESS_FAILURE
;
9074 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9075 if (check_linkdemand (cfg
, method
, cmethod
))
9077 CHECK_CFG_EXCEPTION
;
9078 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9079 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9083 * Optimize the common case of ldftn+delegate creation
9085 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9086 /* FIXME: SGEN support */
9087 /* FIXME: handle shared static generic methods */
9088 /* FIXME: handle this in shared code */
9089 if (!needs_static_rgctx_invoke
&& !context_used
&& (sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
9090 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
9091 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
9092 MonoInst
*target_ins
;
9095 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
9096 if (!invoke
|| !mono_method_signature (invoke
))
9100 if (cfg
->verbose_level
> 3)
9101 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
9102 target_ins
= sp
[-1];
9104 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
);
9113 if (needs_static_rgctx_invoke
)
9114 cmethod
= mono_marshal_get_static_rgctx_invoke (cmethod
);
9116 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
9117 } else if (needs_static_rgctx_invoke
) {
9118 EMIT_NEW_METHODCONST (cfg
, argconst
, mono_marshal_get_static_rgctx_invoke (cmethod
));
9120 EMIT_NEW_METHODCONST (cfg
, argconst
, cmethod
);
9122 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
9126 inline_costs
+= 10 * num_calls
++;
9129 case CEE_LDVIRTFTN
: {
9134 n
= read32 (ip
+ 2);
9135 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9138 mono_class_init (cmethod
->klass
);
9140 if (cfg
->generic_sharing_context
)
9141 context_used
= mono_method_check_context_used (cmethod
);
9143 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9144 if (check_linkdemand (cfg
, method
, cmethod
))
9146 CHECK_CFG_EXCEPTION
;
9147 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9148 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9155 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
9156 cmethod
, MONO_RGCTX_INFO_METHOD
);
9157 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
9159 EMIT_NEW_METHODCONST (cfg
, args
[1], cmethod
);
9160 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
9164 inline_costs
+= 10 * num_calls
++;
9168 CHECK_STACK_OVF (1);
9170 n
= read16 (ip
+ 2);
9172 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
9177 CHECK_STACK_OVF (1);
9179 n
= read16 (ip
+ 2);
9181 NEW_ARGLOADA (cfg
, ins
, n
);
9182 MONO_ADD_INS (cfg
->cbb
, ins
);
9190 n
= read16 (ip
+ 2);
9192 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
9194 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
9198 CHECK_STACK_OVF (1);
9200 n
= read16 (ip
+ 2);
9202 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
9207 unsigned char *tmp_ip
;
9208 CHECK_STACK_OVF (1);
9210 n
= read16 (ip
+ 2);
9213 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
9219 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
9228 n
= read16 (ip
+ 2);
9230 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
9232 emit_stloc_ir (cfg
, sp
, header
, n
);
9239 if (sp
!= stack_start
)
9241 if (cfg
->method
!= method
)
9243 * Inlining this into a loop in a parent could lead to
9244 * stack overflows which is different behavior than the
9245 * non-inlined case, thus disable inlining in this case.
9247 goto inline_failure
;
9249 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
9250 ins
->dreg
= alloc_preg (cfg
);
9251 ins
->sreg1
= sp
[0]->dreg
;
9252 ins
->type
= STACK_PTR
;
9253 MONO_ADD_INS (cfg
->cbb
, ins
);
9255 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
9256 if (header
->init_locals
)
9257 ins
->flags
|= MONO_INST_INIT
;
9262 case CEE_ENDFILTER
: {
9263 MonoExceptionClause
*clause
, *nearest
;
9264 int cc
, nearest_num
;
9268 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
9270 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
9271 ins
->sreg1
= (*sp
)->dreg
;
9272 MONO_ADD_INS (bblock
, ins
);
9273 start_new_bblock
= 1;
9278 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
9279 clause
= &header
->clauses
[cc
];
9280 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
9281 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
9282 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
9288 if ((ip
- header
->code
) != nearest
->handler_offset
)
9293 case CEE_UNALIGNED_
:
9294 ins_flag
|= MONO_INST_UNALIGNED
;
9295 /* FIXME: record alignment? we can assume 1 for now */
9300 ins_flag
|= MONO_INST_VOLATILE
;
9304 ins_flag
|= MONO_INST_TAILCALL
;
9305 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
9306 /* Can't inline tail calls at this time */
9307 inline_costs
+= 100000;
9314 token
= read32 (ip
+ 2);
9315 klass
= mini_get_class (method
, token
, generic_context
);
9316 CHECK_TYPELOAD (klass
);
9317 if (generic_class_is_reference_type (cfg
, klass
))
9318 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
9320 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
9324 case CEE_CONSTRAINED_
:
9326 token
= read32 (ip
+ 2);
9327 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
9328 CHECK_TYPELOAD (constrained_call
);
9333 MonoInst
*iargs
[3];
9337 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
9338 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
9339 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
9340 /* emit_memset only works when val == 0 */
9341 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
9346 if (ip
[1] == CEE_CPBLK
) {
9347 MonoMethod
*memcpy_method
= get_memcpy_method ();
9348 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9350 MonoMethod
*memset_method
= get_memset_method ();
9351 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
9361 ins_flag
|= MONO_INST_NOTYPECHECK
;
9363 ins_flag
|= MONO_INST_NORANGECHECK
;
9364 /* we ignore the no-nullcheck for now since we
9365 * really do it explicitly only when doing callvirt->call
9371 int handler_offset
= -1;
9373 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9374 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9375 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
9376 handler_offset
= clause
->handler_offset
;
9381 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
9383 g_assert (handler_offset
!= -1);
9385 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
9386 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
9387 ins
->sreg1
= load
->dreg
;
9388 MONO_ADD_INS (bblock
, ins
);
9390 link_bblock (cfg
, bblock
, end_bblock
);
9391 start_new_bblock
= 1;
9399 CHECK_STACK_OVF (1);
9401 token
= read32 (ip
+ 2);
9402 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
) {
9403 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
9404 token
= mono_type_size (type
, &ialign
);
9406 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
9407 CHECK_TYPELOAD (klass
);
9408 mono_class_init (klass
);
9409 token
= mono_class_value_size (klass
, &align
);
9411 EMIT_NEW_ICONST (cfg
, ins
, token
);
9416 case CEE_REFANYTYPE
: {
9417 MonoInst
*src_var
, *src
;
9423 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9425 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9426 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9427 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
9437 g_error ("opcode 0xfe 0x%02x not handled", ip
[1]);
9442 g_error ("opcode 0x%02x not handled", *ip
);
9445 if (start_new_bblock
!= 1)
9448 bblock
->cil_length
= ip
- bblock
->cil_code
;
9449 bblock
->next_bb
= end_bblock
;
9451 if (cfg
->method
== method
&& cfg
->domainvar
) {
9453 MonoInst
*get_domain
;
9455 cfg
->cbb
= init_localsbb
;
9457 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
9458 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
9461 get_domain
->dreg
= alloc_preg (cfg
);
9462 MONO_ADD_INS (cfg
->cbb
, get_domain
);
9464 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
9465 MONO_ADD_INS (cfg
->cbb
, store
);
9468 if (cfg
->method
== method
&& cfg
->got_var
)
9469 mono_emit_load_got_addr (cfg
);
9471 if (header
->init_locals
) {
9474 cfg
->cbb
= init_localsbb
;
9476 for (i
= 0; i
< header
->num_locals
; ++i
) {
9477 MonoType
*ptype
= header
->locals
[i
];
9478 int t
= ptype
->type
;
9479 dreg
= cfg
->locals
[i
]->dreg
;
9481 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
9482 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
9484 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9485 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
9486 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9487 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
9488 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9489 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
9490 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
9491 ins
->type
= STACK_R8
;
9492 ins
->inst_p0
= (void*)&r8_0
;
9493 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
9494 MONO_ADD_INS (init_localsbb
, ins
);
9495 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
9496 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
9497 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
9498 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
9500 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9507 if (cfg
->method
== method
) {
9509 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
9510 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
9512 mono_create_spvar_for_region (cfg
, bb
->region
);
9513 if (cfg
->verbose_level
> 2)
9514 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
9518 g_slist_free (class_inits
);
9519 dont_inline
= g_list_remove (dont_inline
, method
);
9521 if (inline_costs
< 0) {
9524 /* Method is too large */
9525 mname
= mono_method_full_name (method
, TRUE
);
9526 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
9527 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
9532 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
9533 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
9535 return inline_costs
;
9538 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
9539 g_slist_free (class_inits
);
9540 dont_inline
= g_list_remove (dont_inline
, method
);
9544 g_slist_free (class_inits
);
9545 dont_inline
= g_list_remove (dont_inline
, method
);
9549 g_slist_free (class_inits
);
9550 dont_inline
= g_list_remove (dont_inline
, method
);
9551 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
9555 g_slist_free (class_inits
);
9556 dont_inline
= g_list_remove (dont_inline
, method
);
9557 set_exception_type_from_invalid_il (cfg
, method
, ip
);
9562 store_membase_reg_to_store_membase_imm (int opcode
)
9565 case OP_STORE_MEMBASE_REG
:
9566 return OP_STORE_MEMBASE_IMM
;
9567 case OP_STOREI1_MEMBASE_REG
:
9568 return OP_STOREI1_MEMBASE_IMM
;
9569 case OP_STOREI2_MEMBASE_REG
:
9570 return OP_STOREI2_MEMBASE_IMM
;
9571 case OP_STOREI4_MEMBASE_REG
:
9572 return OP_STOREI4_MEMBASE_IMM
;
9573 case OP_STOREI8_MEMBASE_REG
:
9574 return OP_STOREI8_MEMBASE_IMM
;
9576 g_assert_not_reached ();
9582 #endif /* DISABLE_JIT */
9585 mono_op_to_op_imm (int opcode
)
9595 return OP_IDIV_UN_IMM
;
9599 return OP_IREM_UN_IMM
;
9613 return OP_ISHR_UN_IMM
;
9630 return OP_LSHR_UN_IMM
;
9633 return OP_COMPARE_IMM
;
9635 return OP_ICOMPARE_IMM
;
9637 return OP_LCOMPARE_IMM
;
9639 case OP_STORE_MEMBASE_REG
:
9640 return OP_STORE_MEMBASE_IMM
;
9641 case OP_STOREI1_MEMBASE_REG
:
9642 return OP_STOREI1_MEMBASE_IMM
;
9643 case OP_STOREI2_MEMBASE_REG
:
9644 return OP_STOREI2_MEMBASE_IMM
;
9645 case OP_STOREI4_MEMBASE_REG
:
9646 return OP_STOREI4_MEMBASE_IMM
;
9648 #if defined(__i386__) || defined (__x86_64__)
9650 return OP_X86_PUSH_IMM
;
9651 case OP_X86_COMPARE_MEMBASE_REG
:
9652 return OP_X86_COMPARE_MEMBASE_IMM
;
9654 #if defined(__x86_64__)
9655 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
9656 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
9658 case OP_VOIDCALL_REG
:
9667 return OP_LOCALLOC_IMM
;
9674 ldind_to_load_membase (int opcode
)
9678 return OP_LOADI1_MEMBASE
;
9680 return OP_LOADU1_MEMBASE
;
9682 return OP_LOADI2_MEMBASE
;
9684 return OP_LOADU2_MEMBASE
;
9686 return OP_LOADI4_MEMBASE
;
9688 return OP_LOADU4_MEMBASE
;
9690 return OP_LOAD_MEMBASE
;
9692 return OP_LOAD_MEMBASE
;
9694 return OP_LOADI8_MEMBASE
;
9696 return OP_LOADR4_MEMBASE
;
9698 return OP_LOADR8_MEMBASE
;
9700 g_assert_not_reached ();
9707 stind_to_store_membase (int opcode
)
9711 return OP_STOREI1_MEMBASE_REG
;
9713 return OP_STOREI2_MEMBASE_REG
;
9715 return OP_STOREI4_MEMBASE_REG
;
9718 return OP_STORE_MEMBASE_REG
;
9720 return OP_STOREI8_MEMBASE_REG
;
9722 return OP_STORER4_MEMBASE_REG
;
9724 return OP_STORER8_MEMBASE_REG
;
9726 g_assert_not_reached ();
9733 mono_load_membase_to_load_mem (int opcode
)
9735 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9736 #if defined(__i386__) || defined(__x86_64__)
9738 case OP_LOAD_MEMBASE
:
9740 case OP_LOADU1_MEMBASE
:
9741 return OP_LOADU1_MEM
;
9742 case OP_LOADU2_MEMBASE
:
9743 return OP_LOADU2_MEM
;
9744 case OP_LOADI4_MEMBASE
:
9745 return OP_LOADI4_MEM
;
9746 case OP_LOADU4_MEMBASE
:
9747 return OP_LOADU4_MEM
;
9748 #if SIZEOF_REGISTER == 8
9749 case OP_LOADI8_MEMBASE
:
9750 return OP_LOADI8_MEM
;
9759 op_to_op_dest_membase (int store_opcode
, int opcode
)
9761 #if defined(__i386__)
9762 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
9767 return OP_X86_ADD_MEMBASE_REG
;
9769 return OP_X86_SUB_MEMBASE_REG
;
9771 return OP_X86_AND_MEMBASE_REG
;
9773 return OP_X86_OR_MEMBASE_REG
;
9775 return OP_X86_XOR_MEMBASE_REG
;
9778 return OP_X86_ADD_MEMBASE_IMM
;
9781 return OP_X86_SUB_MEMBASE_IMM
;
9784 return OP_X86_AND_MEMBASE_IMM
;
9787 return OP_X86_OR_MEMBASE_IMM
;
9790 return OP_X86_XOR_MEMBASE_IMM
;
9796 #if defined(__x86_64__)
9797 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
9802 return OP_X86_ADD_MEMBASE_REG
;
9804 return OP_X86_SUB_MEMBASE_REG
;
9806 return OP_X86_AND_MEMBASE_REG
;
9808 return OP_X86_OR_MEMBASE_REG
;
9810 return OP_X86_XOR_MEMBASE_REG
;
9812 return OP_X86_ADD_MEMBASE_IMM
;
9814 return OP_X86_SUB_MEMBASE_IMM
;
9816 return OP_X86_AND_MEMBASE_IMM
;
9818 return OP_X86_OR_MEMBASE_IMM
;
9820 return OP_X86_XOR_MEMBASE_IMM
;
9822 return OP_AMD64_ADD_MEMBASE_REG
;
9824 return OP_AMD64_SUB_MEMBASE_REG
;
9826 return OP_AMD64_AND_MEMBASE_REG
;
9828 return OP_AMD64_OR_MEMBASE_REG
;
9830 return OP_AMD64_XOR_MEMBASE_REG
;
9833 return OP_AMD64_ADD_MEMBASE_IMM
;
9836 return OP_AMD64_SUB_MEMBASE_IMM
;
9839 return OP_AMD64_AND_MEMBASE_IMM
;
9842 return OP_AMD64_OR_MEMBASE_IMM
;
9845 return OP_AMD64_XOR_MEMBASE_IMM
;
9855 op_to_op_store_membase (int store_opcode
, int opcode
)
9857 #if defined(__i386__) || defined(__x86_64__)
9860 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
9861 return OP_X86_SETEQ_MEMBASE
;
9863 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
9864 return OP_X86_SETNE_MEMBASE
;
9872 op_to_op_src1_membase (int load_opcode
, int opcode
)
9875 /* FIXME: This has sign extension issues */
9877 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9878 return OP_X86_COMPARE_MEMBASE8_IMM;
9881 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
9886 return OP_X86_PUSH_MEMBASE
;
9887 case OP_COMPARE_IMM
:
9888 case OP_ICOMPARE_IMM
:
9889 return OP_X86_COMPARE_MEMBASE_IMM
;
9892 return OP_X86_COMPARE_MEMBASE_REG
;
9897 /* FIXME: This has sign extension issues */
9899 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9900 return OP_X86_COMPARE_MEMBASE8_IMM;
9905 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
9906 return OP_X86_PUSH_MEMBASE
;
9908 /* FIXME: This only works for 32 bit immediates
9909 case OP_COMPARE_IMM:
9910 case OP_LCOMPARE_IMM:
9911 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9912 return OP_AMD64_COMPARE_MEMBASE_IMM;
9914 case OP_ICOMPARE_IMM
:
9915 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9916 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
9920 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
9921 return OP_AMD64_COMPARE_MEMBASE_REG
;
9924 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9925 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
9934 op_to_op_src2_membase (int load_opcode
, int opcode
)
9937 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
9943 return OP_X86_COMPARE_REG_MEMBASE
;
9945 return OP_X86_ADD_REG_MEMBASE
;
9947 return OP_X86_SUB_REG_MEMBASE
;
9949 return OP_X86_AND_REG_MEMBASE
;
9951 return OP_X86_OR_REG_MEMBASE
;
9953 return OP_X86_XOR_REG_MEMBASE
;
9960 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9961 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
9965 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
9966 return OP_AMD64_COMPARE_REG_MEMBASE
;
9969 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9970 return OP_X86_ADD_REG_MEMBASE
;
9972 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9973 return OP_X86_SUB_REG_MEMBASE
;
9975 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9976 return OP_X86_AND_REG_MEMBASE
;
9978 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9979 return OP_X86_OR_REG_MEMBASE
;
9981 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9982 return OP_X86_XOR_REG_MEMBASE
;
9984 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
9985 return OP_AMD64_ADD_REG_MEMBASE
;
9987 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
9988 return OP_AMD64_SUB_REG_MEMBASE
;
9990 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
9991 return OP_AMD64_AND_REG_MEMBASE
;
9993 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
9994 return OP_AMD64_OR_REG_MEMBASE
;
9996 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
9997 return OP_AMD64_XOR_REG_MEMBASE
;
10005 mono_op_to_op_imm_noemul (int opcode
)
10008 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10013 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10021 return mono_op_to_op_imm (opcode
);
10025 #ifndef DISABLE_JIT
10028 * mono_handle_global_vregs:
10030 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10034 mono_handle_global_vregs (MonoCompile
*cfg
)
10036 gint32
*vreg_to_bb
;
10037 MonoBasicBlock
*bb
;
10040 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
10042 #ifdef MONO_ARCH_SIMD_INTRINSICS
10043 if (cfg
->uses_simd_intrinsics
)
10044 mono_simd_simplify_indirection (cfg
);
10047 /* Find local vregs used in more than one bb */
10048 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10049 MonoInst
*ins
= bb
->code
;
10050 int block_num
= bb
->block_num
;
10052 if (cfg
->verbose_level
> 2)
10053 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
10056 for (; ins
; ins
= ins
->next
) {
10057 const char *spec
= INS_INFO (ins
->opcode
);
10058 int regtype
, regindex
;
10061 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10062 mono_print_ins (ins
);
10064 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
10066 for (regindex
= 0; regindex
< 3; regindex
++) {
10069 if (regindex
== 0) {
10070 regtype
= spec
[MONO_INST_DEST
];
10071 if (regtype
== ' ')
10074 } else if (regindex
== 1) {
10075 regtype
= spec
[MONO_INST_SRC1
];
10076 if (regtype
== ' ')
10080 regtype
= spec
[MONO_INST_SRC2
];
10081 if (regtype
== ' ')
10086 #if SIZEOF_REGISTER == 4
10087 if (regtype
== 'l') {
10089 * Since some instructions reference the original long vreg,
10090 * and some reference the two component vregs, it is quite hard
10091 * to determine when it needs to be global. So be conservative.
10093 if (!get_vreg_to_inst (cfg
, vreg
)) {
10094 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
10096 if (cfg
->verbose_level
> 2)
10097 printf ("LONG VREG R%d made global.\n", vreg
);
10101 * Make the component vregs volatile since the optimizations can
10102 * get confused otherwise.
10104 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
10105 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
10109 g_assert (vreg
!= -1);
10111 prev_bb
= vreg_to_bb
[vreg
];
10112 if (prev_bb
== 0) {
10113 /* 0 is a valid block num */
10114 vreg_to_bb
[vreg
] = block_num
+ 1;
10115 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
10116 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
10119 if (!get_vreg_to_inst (cfg
, vreg
)) {
10120 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10121 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
10125 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
10128 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
10131 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
10134 g_assert_not_reached ();
10138 /* Flag as having been used in more than one bb */
10139 vreg_to_bb
[vreg
] = -1;
10145 /* If a variable is used in only one bblock, convert it into a local vreg */
10146 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10147 MonoInst
*var
= cfg
->varinfo
[i
];
10148 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
10150 switch (var
->type
) {
10156 #if SIZEOF_REGISTER == 8
10159 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10160 /* Enabling this screws up the fp stack on x86 */
10163 /* Arguments are implicitly global */
10164 /* Putting R4 vars into registers doesn't work currently */
10165 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
10167 * Make that the variable's liveness interval doesn't contain a call, since
10168 * that would cause the lvreg to be spilled, making the whole optimization
10171 /* This is too slow for JIT compilation */
10173 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
10175 int def_index
, call_index
, ins_index
;
10176 gboolean spilled
= FALSE
;
10181 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
10182 const char *spec
= INS_INFO (ins
->opcode
);
10184 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
10185 def_index
= ins_index
;
10187 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
10188 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
10189 if (call_index
> def_index
) {
10195 if (MONO_IS_CALL (ins
))
10196 call_index
= ins_index
;
10206 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10207 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
10208 var
->flags
|= MONO_INST_IS_DEAD
;
10209 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
10216 * Compress the varinfo and vars tables so the liveness computation is faster and
10217 * takes up less space.
10220 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10221 MonoInst
*var
= cfg
->varinfo
[i
];
10222 if (pos
< i
&& cfg
->locals_start
== i
)
10223 cfg
->locals_start
= pos
;
10224 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
10226 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
10227 cfg
->varinfo
[pos
]->inst_c0
= pos
;
10228 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
10229 cfg
->vars
[pos
].idx
= pos
;
10230 #if SIZEOF_REGISTER == 4
10231 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
10232 /* Modify the two component vars too */
10235 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
10236 var1
->inst_c0
= pos
;
10237 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
10238 var1
->inst_c0
= pos
;
10245 cfg
->num_varinfo
= pos
;
10246 if (cfg
->locals_start
> cfg
->num_varinfo
)
10247 cfg
->locals_start
= cfg
->num_varinfo
;
10251 * mono_spill_global_vars:
10253 * Generate spill code for variables which are not allocated to registers,
10254 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10255 * code is generated which could be optimized by the local optimization passes.
10258 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
10260 MonoBasicBlock
*bb
;
10262 int orig_next_vreg
;
10263 guint32
*vreg_to_lvreg
;
10265 guint32 i
, lvregs_len
;
10266 gboolean dest_has_lvreg
= FALSE
;
10267 guint32 stacktypes
[128];
10268 MonoInst
**live_range_start
, **live_range_end
;
10269 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
10271 *need_local_opts
= FALSE
;
10273 memset (spec2
, 0, sizeof (spec2
));
10275 /* FIXME: Move this function to mini.c */
10276 stacktypes
['i'] = STACK_PTR
;
10277 stacktypes
['l'] = STACK_I8
;
10278 stacktypes
['f'] = STACK_R8
;
10279 #ifdef MONO_ARCH_SIMD_INTRINSICS
10280 stacktypes
['x'] = STACK_VTYPE
;
10283 #if SIZEOF_REGISTER == 4
10284 /* Create MonoInsts for longs */
10285 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10286 MonoInst
*ins
= cfg
->varinfo
[i
];
10288 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
10289 switch (ins
->type
) {
10290 #ifdef MONO_ARCH_SOFT_FLOAT
10296 g_assert (ins
->opcode
== OP_REGOFFSET
);
10298 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
10300 tree
->opcode
= OP_REGOFFSET
;
10301 tree
->inst_basereg
= ins
->inst_basereg
;
10302 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
10304 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
10306 tree
->opcode
= OP_REGOFFSET
;
10307 tree
->inst_basereg
= ins
->inst_basereg
;
10308 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
10318 /* FIXME: widening and truncation */
10321 * As an optimization, when a variable allocated to the stack is first loaded into
10322 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10323 * the variable again.
10325 orig_next_vreg
= cfg
->next_vreg
;
10326 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
10327 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
10331 * These arrays contain the first and last instructions accessing a given
10333 * Since we emit bblocks in the same order we process them here, and we
10334 * don't split live ranges, these will precisely describe the live range of
10335 * the variable, i.e. the instruction range where a valid value can be found
10336 * in the variables location.
10338 /* FIXME: Only do this if debugging info is requested */
10339 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10340 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10341 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10342 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10344 /* Add spill loads/stores */
10345 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10348 if (cfg
->verbose_level
> 2)
10349 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
10351 /* Clear vreg_to_lvreg array */
10352 for (i
= 0; i
< lvregs_len
; i
++)
10353 vreg_to_lvreg
[lvregs
[i
]] = 0;
10357 MONO_BB_FOR_EACH_INS (bb
, ins
) {
10358 const char *spec
= INS_INFO (ins
->opcode
);
10359 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
;
10360 gboolean store
, no_lvreg
;
10362 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10363 mono_print_ins (ins
);
10365 if (ins
->opcode
== OP_NOP
)
10369 * We handle LDADDR here as well, since it can only be decomposed
10370 * when variable addresses are known.
10372 if (ins
->opcode
== OP_LDADDR
) {
10373 MonoInst
*var
= ins
->inst_p0
;
10375 if (var
->opcode
== OP_VTARG_ADDR
) {
10376 /* Happens on SPARC/S390 where vtypes are passed by reference */
10377 MonoInst
*vtaddr
= var
->inst_left
;
10378 if (vtaddr
->opcode
== OP_REGVAR
) {
10379 ins
->opcode
= OP_MOVE
;
10380 ins
->sreg1
= vtaddr
->dreg
;
10382 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
10383 ins
->opcode
= OP_LOAD_MEMBASE
;
10384 ins
->inst_basereg
= vtaddr
->inst_basereg
;
10385 ins
->inst_offset
= vtaddr
->inst_offset
;
10389 g_assert (var
->opcode
== OP_REGOFFSET
);
10391 ins
->opcode
= OP_ADD_IMM
;
10392 ins
->sreg1
= var
->inst_basereg
;
10393 ins
->inst_imm
= var
->inst_offset
;
10396 *need_local_opts
= TRUE
;
10397 spec
= INS_INFO (ins
->opcode
);
10400 if (ins
->opcode
< MONO_CEE_LAST
) {
10401 mono_print_ins (ins
);
10402 g_assert_not_reached ();
10406 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10410 if (MONO_IS_STORE_MEMBASE (ins
)) {
10411 tmp_reg
= ins
->dreg
;
10412 ins
->dreg
= ins
->sreg2
;
10413 ins
->sreg2
= tmp_reg
;
10416 spec2
[MONO_INST_DEST
] = ' ';
10417 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10418 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10420 } else if (MONO_IS_STORE_MEMINDEX (ins
))
10421 g_assert_not_reached ();
10426 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10427 printf ("\t %.3s %d %d %d\n", spec
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
10432 regtype
= spec
[MONO_INST_DEST
];
10433 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
10436 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
10437 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
10438 MonoInst
*store_ins
;
10440 MonoInst
*def_ins
= ins
;
10441 int dreg
= ins
->dreg
; /* The original vreg */
10443 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
10445 if (var
->opcode
== OP_REGVAR
) {
10446 ins
->dreg
= var
->dreg
;
10447 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
10449 * Instead of emitting a load+store, use a _membase opcode.
10451 g_assert (var
->opcode
== OP_REGOFFSET
);
10452 if (ins
->opcode
== OP_MOVE
) {
10456 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
10457 ins
->inst_basereg
= var
->inst_basereg
;
10458 ins
->inst_offset
= var
->inst_offset
;
10461 spec
= INS_INFO (ins
->opcode
);
10465 g_assert (var
->opcode
== OP_REGOFFSET
);
10467 prev_dreg
= ins
->dreg
;
10469 /* Invalidate any previous lvreg for this vreg */
10470 vreg_to_lvreg
[ins
->dreg
] = 0;
10474 #ifdef MONO_ARCH_SOFT_FLOAT
10475 if (store_opcode
== OP_STORER8_MEMBASE_REG
) {
10477 store_opcode
= OP_STOREI8_MEMBASE_REG
;
10481 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10483 if (regtype
== 'l') {
10484 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
10485 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10486 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
10487 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10488 def_ins
= store_ins
;
10491 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
10493 /* Try to fuse the store into the instruction itself */
10494 /* FIXME: Add more instructions */
10495 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
10496 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
10497 ins
->inst_imm
= ins
->inst_c0
;
10498 ins
->inst_destbasereg
= var
->inst_basereg
;
10499 ins
->inst_offset
= var
->inst_offset
;
10500 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
10501 ins
->opcode
= store_opcode
;
10502 ins
->inst_destbasereg
= var
->inst_basereg
;
10503 ins
->inst_offset
= var
->inst_offset
;
10507 tmp_reg
= ins
->dreg
;
10508 ins
->dreg
= ins
->sreg2
;
10509 ins
->sreg2
= tmp_reg
;
10512 spec2
[MONO_INST_DEST
] = ' ';
10513 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10514 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10516 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
10517 // FIXME: The backends expect the base reg to be in inst_basereg
10518 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
10520 ins
->inst_basereg
= var
->inst_basereg
;
10521 ins
->inst_offset
= var
->inst_offset
;
10522 spec
= INS_INFO (ins
->opcode
);
10524 /* printf ("INS: "); mono_print_ins (ins); */
10525 /* Create a store instruction */
10526 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
10528 /* Insert it after the instruction */
10529 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10531 def_ins
= store_ins
;
10534 * We can't assign ins->dreg to var->dreg here, since the
10535 * sregs could use it. So set a flag, and do it after
10538 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
10539 dest_has_lvreg
= TRUE
;
10544 if (def_ins
&& !live_range_start
[dreg
]) {
10545 live_range_start
[dreg
] = def_ins
;
10546 live_range_start_bb
[dreg
] = bb
;
10553 for (srcindex
= 0; srcindex
< 2; ++srcindex
) {
10554 regtype
= spec
[(srcindex
== 0) ? MONO_INST_SRC1
: MONO_INST_SRC2
];
10555 sreg
= srcindex
== 0 ? ins
->sreg1
: ins
->sreg2
;
10557 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
10558 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
10559 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
10560 MonoInst
*use_ins
= ins
;
10561 MonoInst
*load_ins
;
10562 guint32 load_opcode
;
10564 if (var
->opcode
== OP_REGVAR
) {
10566 ins
->sreg1
= var
->dreg
;
10568 ins
->sreg2
= var
->dreg
;
10569 live_range_end
[var
->dreg
] = use_ins
;
10570 live_range_end_bb
[var
->dreg
] = bb
;
10574 g_assert (var
->opcode
== OP_REGOFFSET
);
10576 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
10578 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
10580 if (vreg_to_lvreg
[sreg
]) {
10581 /* The variable is already loaded to an lvreg */
10582 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10583 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
10585 ins
->sreg1
= vreg_to_lvreg
[sreg
];
10587 ins
->sreg2
= vreg_to_lvreg
[sreg
];
10591 /* Try to fuse the load into the instruction */
10592 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
10593 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
10594 ins
->inst_basereg
= var
->inst_basereg
;
10595 ins
->inst_offset
= var
->inst_offset
;
10596 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
10597 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
10598 ins
->sreg2
= var
->inst_basereg
;
10599 ins
->inst_offset
= var
->inst_offset
;
10601 if (MONO_IS_REAL_MOVE (ins
)) {
10602 ins
->opcode
= OP_NOP
;
10605 //printf ("%d ", srcindex); mono_print_ins (ins);
10607 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10609 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
10610 if (var
->dreg
== prev_dreg
) {
10612 * sreg refers to the value loaded by the load
10613 * emitted below, but we need to use ins->dreg
10614 * since it refers to the store emitted earlier.
10618 vreg_to_lvreg
[var
->dreg
] = sreg
;
10619 g_assert (lvregs_len
< 1024);
10620 lvregs
[lvregs_len
++] = var
->dreg
;
10629 if (regtype
== 'l') {
10630 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
10631 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10632 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
10633 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10634 use_ins
= load_ins
;
10637 #if SIZEOF_REGISTER == 4
10638 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
10640 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
10641 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10642 use_ins
= load_ins
;
10646 if (var
->dreg
< orig_next_vreg
) {
10647 live_range_end
[var
->dreg
] = use_ins
;
10648 live_range_end_bb
[var
->dreg
] = bb
;
10653 if (dest_has_lvreg
) {
10654 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
10655 g_assert (lvregs_len
< 1024);
10656 lvregs
[lvregs_len
++] = prev_dreg
;
10657 dest_has_lvreg
= FALSE
;
10661 tmp_reg
= ins
->dreg
;
10662 ins
->dreg
= ins
->sreg2
;
10663 ins
->sreg2
= tmp_reg
;
10666 if (MONO_IS_CALL (ins
)) {
10667 /* Clear vreg_to_lvreg array */
10668 for (i
= 0; i
< lvregs_len
; i
++)
10669 vreg_to_lvreg
[lvregs
[i
]] = 0;
10673 if (cfg
->verbose_level
> 2)
10674 mono_print_ins_index (1, ins
);
10678 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10680 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10681 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10683 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10684 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
10687 if (live_range_start
[vreg
]) {
10688 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
10690 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
10692 if (live_range_end
[vreg
]) {
10693 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
10695 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
10700 g_free (live_range_start
);
10701 g_free (live_range_end
);
10702 g_free (live_range_start_bb
);
10703 g_free (live_range_end_bb
);
10708 * - use 'iadd' instead of 'int_add'
10709 * - handling ovf opcodes: decompose in method_to_ir.
10710 * - unify iregs/fregs
10711 * -> partly done, the missing parts are:
10712 * - a more complete unification would involve unifying the hregs as well, so
10713 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10714 * would no longer map to the machine hregs, so the code generators would need to
10715 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10716 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10717 * fp/non-fp branches speeds it up by about 15%.
10718 * - use sext/zext opcodes instead of shifts
10720 * - get rid of TEMPLOADs if possible and use vregs instead
10721 * - clean up usage of OP_P/OP_ opcodes
10722 * - cleanup usage of DUMMY_USE
10723 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10725 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10726 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10727 * - make sure handle_stack_args () is called before the branch is emitted
10728 * - when the new IR is done, get rid of all unused stuff
10729 * - COMPARE/BEQ as separate instructions or unify them ?
10730 * - keeping them separate allows specialized compare instructions like
10731 * compare_imm, compare_membase
10732 * - most back ends unify fp compare+branch, fp compare+ceq
10733 * - integrate mono_save_args into inline_method
10734 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10735 * - handle long shift opts on 32 bit platforms somehow: they require
10736 * 3 sregs (2 for arg1 and 1 for arg2)
10737 * - make byref a 'normal' type.
10738 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10739 * variable if needed.
10740 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10741 * like inline_method.
10742 * - remove inlining restrictions
10743 * - fix LNEG and enable cfold of INEG
10744 * - generalize x86 optimizations like ldelema as a peephole optimization
10745 * - add store_mem_imm for amd64
10746 * - optimize the loading of the interruption flag in the managed->native wrappers
10747 * - avoid special handling of OP_NOP in passes
10748 * - move code inserting instructions into one function/macro.
10749 * - try a coalescing phase after liveness analysis
10750 * - add float -> vreg conversion + local optimizations on !x86
10751 * - figure out how to handle decomposed branches during optimizations, ie.
10752 * compare+branch, op_jump_table+op_br etc.
10753 * - promote RuntimeXHandles to vregs
10754 * - vtype cleanups:
10755 * - add a NEW_VARLOADA_VREG macro
10756 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10757 * accessing vtype fields.
10758 * - get rid of I8CONST on 64 bit platforms
10759 * - dealing with the increase in code size due to branches created during opcode
10761 * - use extended basic blocks
10762 * - all parts of the JIT
10763 * - handle_global_vregs () && local regalloc
10764 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10765 * - sources of increase in code size:
10768 * - isinst and castclass
10769 * - lvregs not allocated to global registers even if used multiple times
10770 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10772 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10773 * - add all micro optimizations from the old JIT
10774 * - put tree optimizations into the deadce pass
10775 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10776 * specific function.
10777 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10778 * fcompare + branchCC.
10779 * - create a helper function for allocating a stack slot, taking into account
10780 * MONO_CFG_HAS_SPILLUP.
10782 * - merge the ia64 switch changes.
10783 * - optimize mono_regstate2_alloc_int/float.
10784 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10785 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10786 * parts of the tree could be separated by other instructions, killing the tree
10787 * arguments, or stores killing loads etc. Also, should we fold loads into other
10788 * instructions if the result of the load is used multiple times ?
10789 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10790 * - LAST MERGE: 108395.
10791 * - when returning vtypes in registers, generate IR and append it to the end of the
10792 * last bb instead of doing it in the epilog.
10793 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10801 - When to decompose opcodes:
10802 - earlier: this makes some optimizations hard to implement, since the low level IR
10803 no longer contains the neccessary information. But it is easier to do.
10804 - later: harder to implement, enables more optimizations.
10805 - Branches inside bblocks:
10806 - created when decomposing complex opcodes.
10807 - branches to another bblock: harmless, but not tracked by the branch
10808 optimizations, so need to branch to a label at the start of the bblock.
10809 - branches to inside the same bblock: very problematic, trips up the local
10810 reg allocator. Can be fixed by spitting the current bblock, but that is a
10811 complex operation, since some local vregs can become global vregs etc.
10812 - Local/global vregs:
10813 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10814 local register allocator.
10815 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10816 structure, created by mono_create_var (). Assigned to hregs or the stack by
10817 the global register allocator.
10818 - When to do optimizations like alu->alu_imm:
10819 - earlier -> saves work later on since the IR will be smaller/simpler
10820 - later -> can work on more instructions
10821 - Handling of valuetypes:
10822 - When a vtype is pushed on the stack, a new temporary is created, an
10823 instruction computing its address (LDADDR) is emitted and pushed on
10824 the stack. Need to optimize cases when the vtype is used immediately as in
10825 argument passing, stloc etc.
10826 - Instead of the to_end stuff in the old JIT, simply call the function handling
10827 the values on the stack before emitting the last instruction of the bb.
10830 #endif /* DISABLE_JIT */