Make sure x86 ATOMIC_CAS doesn't overwrite its own operands.
[mono-debugger.git] / mono / mini / method-to-ir.c
blob695d52a8376ece949ad46dd5dd942a7088068cd6
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 #endif
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
54 #include "mini.h"
55 #include "trace.h"
57 #include "ir-emit.h"
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
65 goto inline_failure;\
66 } while (0)
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
69 goto exception_exit;\
70 } while (0)
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
79 } while (0)
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
88 } while (0)
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
95 } \
96 } while (0)
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
121 #ifdef MINI_OP
122 #undef MINI_OP
123 #endif
124 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
125 #define NONE ' '
126 #define IREG 'i'
127 #define FREG 'f'
128 #define VREG 'v'
129 #define XREG 'x'
130 #if SIZEOF_REGISTER == 8
131 #define LREG IREG
132 #else
133 #define LREG 'l'
134 #endif
135 /* keep in sync with the enum in mini.h */
136 const char
137 ins_info[] = {
138 #include "mini-ops.h"
140 #undef MINI_OP
142 extern GHashTable *jit_icall_name_hash;
144 #define MONO_INIT_VARINFO(vi,id) do { \
145 (vi)->range.first_use.pos.bid = 0xffff; \
146 (vi)->reg = -1; \
147 (vi)->idx = (id); \
148 } while (0)
150 guint32
151 mono_alloc_ireg (MonoCompile *cfg)
153 return alloc_ireg (cfg);
156 guint32
157 mono_alloc_freg (MonoCompile *cfg)
159 return alloc_freg (cfg);
162 guint32
163 mono_alloc_preg (MonoCompile *cfg)
165 return alloc_preg (cfg);
168 guint32
169 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
171 return alloc_dreg (cfg, stack_type);
174 guint
175 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
177 if (type->byref)
178 return OP_MOVE;
180 handle_enum:
181 switch (type->type) {
182 case MONO_TYPE_I1:
183 case MONO_TYPE_U1:
184 case MONO_TYPE_BOOLEAN:
185 return OP_MOVE;
186 case MONO_TYPE_I2:
187 case MONO_TYPE_U2:
188 case MONO_TYPE_CHAR:
189 return OP_MOVE;
190 case MONO_TYPE_I4:
191 case MONO_TYPE_U4:
192 return OP_MOVE;
193 case MONO_TYPE_I:
194 case MONO_TYPE_U:
195 case MONO_TYPE_PTR:
196 case MONO_TYPE_FNPTR:
197 return OP_MOVE;
198 case MONO_TYPE_CLASS:
199 case MONO_TYPE_STRING:
200 case MONO_TYPE_OBJECT:
201 case MONO_TYPE_SZARRAY:
202 case MONO_TYPE_ARRAY:
203 return OP_MOVE;
204 case MONO_TYPE_I8:
205 case MONO_TYPE_U8:
206 #if SIZEOF_REGISTER == 8
207 return OP_MOVE;
208 #else
209 return OP_LMOVE;
210 #endif
211 case MONO_TYPE_R4:
212 return OP_FMOVE;
213 case MONO_TYPE_R8:
214 return OP_FMOVE;
215 case MONO_TYPE_VALUETYPE:
216 if (type->data.klass->enumtype) {
217 type = mono_class_enum_basetype (type->data.klass);
218 goto handle_enum;
220 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
221 return OP_XMOVE;
222 return OP_VMOVE;
223 case MONO_TYPE_TYPEDBYREF:
224 return OP_VMOVE;
225 case MONO_TYPE_GENERICINST:
226 type = &type->data.generic_class->container_class->byval_arg;
227 goto handle_enum;
228 case MONO_TYPE_VAR:
229 case MONO_TYPE_MVAR:
230 g_assert (cfg->generic_sharing_context);
231 return OP_MOVE;
232 default:
233 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
235 return -1;
238 void
239 mono_print_bb (MonoBasicBlock *bb, const char *msg)
241 int i;
242 MonoInst *tree;
244 printf ("\n%s %d: [IN: ", msg, bb->block_num);
245 for (i = 0; i < bb->in_count; ++i)
246 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
247 printf (", OUT: ");
248 for (i = 0; i < bb->out_count; ++i)
249 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
250 printf (" ]\n");
251 for (tree = bb->code; tree; tree = tree->next)
252 mono_print_ins_index (-1, tree);
256 * Can't put this at the beginning, since other files reference stuff from this
257 * file.
259 #ifndef DISABLE_JIT
261 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
263 #define GET_BBLOCK(cfg,tblock,ip) do { \
264 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
265 if (!(tblock)) { \
266 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
267 NEW_BBLOCK (cfg, (tblock)); \
268 (tblock)->cil_code = (ip); \
269 ADD_BBLOCK (cfg, (tblock)); \
271 } while (0)
273 #if defined(__i386__) || defined(__x86_64__)
274 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
275 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
276 (dest)->dreg = alloc_preg ((cfg)); \
277 (dest)->sreg1 = (sr1); \
278 (dest)->sreg2 = (sr2); \
279 (dest)->inst_imm = (imm); \
280 (dest)->backend.shift_amount = (shift); \
281 MONO_ADD_INS ((cfg)->cbb, (dest)); \
282 } while (0)
283 #endif
285 #if SIZEOF_REGISTER == 8
286 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
287 /* FIXME: Need to add many more cases */ \
288 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
289 MonoInst *widen; \
290 int dr = alloc_preg (cfg); \
291 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
292 (ins)->sreg2 = widen->dreg; \
294 } while (0)
295 #else
296 #define ADD_WIDEN_OP(ins, arg1, arg2)
297 #endif
299 #define ADD_BINOP(op) do { \
300 MONO_INST_NEW (cfg, ins, (op)); \
301 sp -= 2; \
302 ins->sreg1 = sp [0]->dreg; \
303 ins->sreg2 = sp [1]->dreg; \
304 type_from_op (ins, sp [0], sp [1]); \
305 CHECK_TYPE (ins); \
306 /* Have to insert a widening op */ \
307 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
308 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
309 MONO_ADD_INS ((cfg)->cbb, (ins)); \
310 *sp++ = ins; \
311 mono_decompose_opcode ((cfg), (ins)); \
312 } while (0)
314 #define ADD_UNOP(op) do { \
315 MONO_INST_NEW (cfg, ins, (op)); \
316 sp--; \
317 ins->sreg1 = sp [0]->dreg; \
318 type_from_op (ins, sp [0], NULL); \
319 CHECK_TYPE (ins); \
320 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
321 MONO_ADD_INS ((cfg)->cbb, (ins)); \
322 *sp++ = ins; \
323 mono_decompose_opcode (cfg, ins); \
324 } while (0)
326 #define ADD_BINCOND(next_block) do { \
327 MonoInst *cmp; \
328 sp -= 2; \
329 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
330 cmp->sreg1 = sp [0]->dreg; \
331 cmp->sreg2 = sp [1]->dreg; \
332 type_from_op (cmp, sp [0], sp [1]); \
333 CHECK_TYPE (cmp); \
334 type_from_op (ins, sp [0], sp [1]); \
335 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
336 GET_BBLOCK (cfg, tblock, target); \
337 link_bblock (cfg, bblock, tblock); \
338 ins->inst_true_bb = tblock; \
339 if ((next_block)) { \
340 link_bblock (cfg, bblock, (next_block)); \
341 ins->inst_false_bb = (next_block); \
342 start_new_bblock = 1; \
343 } else { \
344 GET_BBLOCK (cfg, tblock, ip); \
345 link_bblock (cfg, bblock, tblock); \
346 ins->inst_false_bb = tblock; \
347 start_new_bblock = 2; \
349 if (sp != stack_start) { \
350 handle_stack_args (cfg, stack_start, sp - stack_start); \
351 CHECK_UNVERIFIABLE (cfg); \
353 MONO_ADD_INS (bblock, cmp); \
354 MONO_ADD_INS (bblock, ins); \
355 } while (0)
357 /* *
358 * link_bblock: Links two basic blocks
360 * links two basic blocks in the control flow graph, the 'from'
361 * argument is the starting block and the 'to' argument is the block
362 * the control flow ends to after 'from'.
364 static void
365 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
367 MonoBasicBlock **newa;
368 int i, found;
370 #if 0
371 if (from->cil_code) {
372 if (to->cil_code)
373 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
374 else
375 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
376 } else {
377 if (to->cil_code)
378 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
379 else
380 printf ("edge from entry to exit\n");
382 #endif
384 found = FALSE;
385 for (i = 0; i < from->out_count; ++i) {
386 if (to == from->out_bb [i]) {
387 found = TRUE;
388 break;
391 if (!found) {
392 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
393 for (i = 0; i < from->out_count; ++i) {
394 newa [i] = from->out_bb [i];
396 newa [i] = to;
397 from->out_count++;
398 from->out_bb = newa;
401 found = FALSE;
402 for (i = 0; i < to->in_count; ++i) {
403 if (from == to->in_bb [i]) {
404 found = TRUE;
405 break;
408 if (!found) {
409 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
410 for (i = 0; i < to->in_count; ++i) {
411 newa [i] = to->in_bb [i];
413 newa [i] = from;
414 to->in_count++;
415 to->in_bb = newa;
419 void
420 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
422 link_bblock (cfg, from, to);
426 * mono_find_block_region:
428 * We mark each basic block with a region ID. We use that to avoid BB
429 * optimizations when blocks are in different regions.
431 * Returns:
432 * A region token that encodes where this region is, and information
433 * about the clause owner for this block.
435 * The region encodes the try/catch/filter clause that owns this block
436 * as well as the type. -1 is a special value that represents a block
437 * that is in none of try/catch/filter.
439 static int
440 mono_find_block_region (MonoCompile *cfg, int offset)
442 MonoMethod *method = cfg->method;
443 MonoMethodHeader *header = mono_method_get_header (method);
444 MonoExceptionClause *clause;
445 int i;
447 /* first search for handlers and filters */
448 for (i = 0; i < header->num_clauses; ++i) {
449 clause = &header->clauses [i];
450 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
451 (offset < (clause->handler_offset)))
452 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
454 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
455 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
456 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
457 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
458 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
459 else
460 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
464 /* search the try blocks */
465 for (i = 0; i < header->num_clauses; ++i) {
466 clause = &header->clauses [i];
467 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
468 return ((i + 1) << 8) | clause->flags;
471 return -1;
474 static GList*
475 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
477 MonoMethod *method = cfg->method;
478 MonoMethodHeader *header = mono_method_get_header (method);
479 MonoExceptionClause *clause;
480 MonoBasicBlock *handler;
481 int i;
482 GList *res = NULL;
484 for (i = 0; i < header->num_clauses; ++i) {
485 clause = &header->clauses [i];
486 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
487 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
488 if (clause->flags == type) {
489 handler = cfg->cil_offset_to_bb [clause->handler_offset];
490 g_assert (handler);
491 res = g_list_append (res, handler);
495 return res;
498 static void
499 mono_create_spvar_for_region (MonoCompile *cfg, int region)
501 MonoInst *var;
503 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
504 if (var)
505 return;
507 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
508 /* prevent it from being register allocated */
509 var->flags |= MONO_INST_INDIRECT;
511 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
514 static MonoInst *
515 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
517 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
520 static MonoInst*
521 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
523 MonoInst *var;
525 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
526 if (var)
527 return var;
529 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
530 /* prevent it from being register allocated */
531 var->flags |= MONO_INST_INDIRECT;
533 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
535 return var;
539 * Returns the type used in the eval stack when @type is loaded.
540 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
542 void
543 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
545 MonoClass *klass;
547 inst->klass = klass = mono_class_from_mono_type (type);
548 if (type->byref) {
549 inst->type = STACK_MP;
550 return;
553 handle_enum:
554 switch (type->type) {
555 case MONO_TYPE_VOID:
556 inst->type = STACK_INV;
557 return;
558 case MONO_TYPE_I1:
559 case MONO_TYPE_U1:
560 case MONO_TYPE_BOOLEAN:
561 case MONO_TYPE_I2:
562 case MONO_TYPE_U2:
563 case MONO_TYPE_CHAR:
564 case MONO_TYPE_I4:
565 case MONO_TYPE_U4:
566 inst->type = STACK_I4;
567 return;
568 case MONO_TYPE_I:
569 case MONO_TYPE_U:
570 case MONO_TYPE_PTR:
571 case MONO_TYPE_FNPTR:
572 inst->type = STACK_PTR;
573 return;
574 case MONO_TYPE_CLASS:
575 case MONO_TYPE_STRING:
576 case MONO_TYPE_OBJECT:
577 case MONO_TYPE_SZARRAY:
578 case MONO_TYPE_ARRAY:
579 inst->type = STACK_OBJ;
580 return;
581 case MONO_TYPE_I8:
582 case MONO_TYPE_U8:
583 inst->type = STACK_I8;
584 return;
585 case MONO_TYPE_R4:
586 case MONO_TYPE_R8:
587 inst->type = STACK_R8;
588 return;
589 case MONO_TYPE_VALUETYPE:
590 if (type->data.klass->enumtype) {
591 type = mono_class_enum_basetype (type->data.klass);
592 goto handle_enum;
593 } else {
594 inst->klass = klass;
595 inst->type = STACK_VTYPE;
596 return;
598 case MONO_TYPE_TYPEDBYREF:
599 inst->klass = mono_defaults.typed_reference_class;
600 inst->type = STACK_VTYPE;
601 return;
602 case MONO_TYPE_GENERICINST:
603 type = &type->data.generic_class->container_class->byval_arg;
604 goto handle_enum;
605 case MONO_TYPE_VAR :
606 case MONO_TYPE_MVAR :
607 /* FIXME: all the arguments must be references for now,
608 * later look inside cfg and see if the arg num is
609 * really a reference
611 g_assert (cfg->generic_sharing_context);
612 inst->type = STACK_OBJ;
613 return;
614 default:
615 g_error ("unknown type 0x%02x in eval stack type", type->type);
620 * The following tables are used to quickly validate the IL code in type_from_op ().
622 static const char
623 bin_num_table [STACK_MAX] [STACK_MAX] = {
624 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
625 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
626 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
627 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
628 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
629 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
630 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
631 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
634 static const char
635 neg_table [] = {
636 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
639 /* reduce the size of this table */
640 static const char
641 bin_int_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
652 static const char
653 bin_comp_table [STACK_MAX] [STACK_MAX] = {
654 /* Inv i L p F & O vt */
655 {0},
656 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
657 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
658 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
659 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
660 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
661 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
662 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
665 /* reduce the size of this table */
666 static const char
667 shift_table [STACK_MAX] [STACK_MAX] = {
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
673 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
674 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
675 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
679 * Tables to map from the non-specific opcode to the matching
680 * type-specific opcode.
682 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
683 static const guint16
684 binops_op_map [STACK_MAX] = {
685 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
688 /* handles from CEE_NEG to CEE_CONV_U8 */
689 static const guint16
690 unops_op_map [STACK_MAX] = {
691 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
694 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
695 static const guint16
696 ovfops_op_map [STACK_MAX] = {
697 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
700 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
701 static const guint16
702 ovf2ops_op_map [STACK_MAX] = {
703 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
706 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
707 static const guint16
708 ovf3ops_op_map [STACK_MAX] = {
709 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
712 /* handles from CEE_BEQ to CEE_BLT_UN */
713 static const guint16
714 beqops_op_map [STACK_MAX] = {
715 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
718 /* handles from CEE_CEQ to CEE_CLT_UN */
719 static const guint16
720 ceqops_op_map [STACK_MAX] = {
721 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
725 * Sets ins->type (the type on the eval stack) according to the
726 * type of the opcode and the arguments to it.
727 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
729 * FIXME: this function sets ins->type unconditionally in some cases, but
730 * it should set it to invalid for some types (a conv.x on an object)
732 static void
733 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
735 switch (ins->opcode) {
736 /* binops */
737 case CEE_ADD:
738 case CEE_SUB:
739 case CEE_MUL:
740 case CEE_DIV:
741 case CEE_REM:
742 /* FIXME: check unverifiable args for STACK_MP */
743 ins->type = bin_num_table [src1->type] [src2->type];
744 ins->opcode += binops_op_map [ins->type];
745 break;
746 case CEE_DIV_UN:
747 case CEE_REM_UN:
748 case CEE_AND:
749 case CEE_OR:
750 case CEE_XOR:
751 ins->type = bin_int_table [src1->type] [src2->type];
752 ins->opcode += binops_op_map [ins->type];
753 break;
754 case CEE_SHL:
755 case CEE_SHR:
756 case CEE_SHR_UN:
757 ins->type = shift_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
759 break;
760 case OP_COMPARE:
761 case OP_LCOMPARE:
762 case OP_ICOMPARE:
763 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
764 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
765 ins->opcode = OP_LCOMPARE;
766 else if (src1->type == STACK_R8)
767 ins->opcode = OP_FCOMPARE;
768 else
769 ins->opcode = OP_ICOMPARE;
770 break;
771 case OP_ICOMPARE_IMM:
772 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
773 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
774 ins->opcode = OP_LCOMPARE_IMM;
775 break;
776 case CEE_BEQ:
777 case CEE_BGE:
778 case CEE_BGT:
779 case CEE_BLE:
780 case CEE_BLT:
781 case CEE_BNE_UN:
782 case CEE_BGE_UN:
783 case CEE_BGT_UN:
784 case CEE_BLE_UN:
785 case CEE_BLT_UN:
786 ins->opcode += beqops_op_map [src1->type];
787 break;
788 case OP_CEQ:
789 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
790 ins->opcode += ceqops_op_map [src1->type];
791 break;
792 case OP_CGT:
793 case OP_CGT_UN:
794 case OP_CLT:
795 case OP_CLT_UN:
796 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
797 ins->opcode += ceqops_op_map [src1->type];
798 break;
799 /* unops */
800 case CEE_NEG:
801 ins->type = neg_table [src1->type];
802 ins->opcode += unops_op_map [ins->type];
803 break;
804 case CEE_NOT:
805 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
806 ins->type = src1->type;
807 else
808 ins->type = STACK_INV;
809 ins->opcode += unops_op_map [ins->type];
810 break;
811 case CEE_CONV_I1:
812 case CEE_CONV_I2:
813 case CEE_CONV_I4:
814 case CEE_CONV_U4:
815 ins->type = STACK_I4;
816 ins->opcode += unops_op_map [src1->type];
817 break;
818 case CEE_CONV_R_UN:
819 ins->type = STACK_R8;
820 switch (src1->type) {
821 case STACK_I4:
822 case STACK_PTR:
823 ins->opcode = OP_ICONV_TO_R_UN;
824 break;
825 case STACK_I8:
826 ins->opcode = OP_LCONV_TO_R_UN;
827 break;
829 break;
830 case CEE_CONV_OVF_I1:
831 case CEE_CONV_OVF_U1:
832 case CEE_CONV_OVF_I2:
833 case CEE_CONV_OVF_U2:
834 case CEE_CONV_OVF_I4:
835 case CEE_CONV_OVF_U4:
836 ins->type = STACK_I4;
837 ins->opcode += ovf3ops_op_map [src1->type];
838 break;
839 case CEE_CONV_OVF_I_UN:
840 case CEE_CONV_OVF_U_UN:
841 ins->type = STACK_PTR;
842 ins->opcode += ovf2ops_op_map [src1->type];
843 break;
844 case CEE_CONV_OVF_I1_UN:
845 case CEE_CONV_OVF_I2_UN:
846 case CEE_CONV_OVF_I4_UN:
847 case CEE_CONV_OVF_U1_UN:
848 case CEE_CONV_OVF_U2_UN:
849 case CEE_CONV_OVF_U4_UN:
850 ins->type = STACK_I4;
851 ins->opcode += ovf2ops_op_map [src1->type];
852 break;
853 case CEE_CONV_U:
854 ins->type = STACK_PTR;
855 switch (src1->type) {
856 case STACK_I4:
857 ins->opcode = OP_ICONV_TO_U;
858 break;
859 case STACK_PTR:
860 case STACK_MP:
861 #if SIZEOF_REGISTER == 8
862 ins->opcode = OP_LCONV_TO_U;
863 #else
864 ins->opcode = OP_MOVE;
865 #endif
866 break;
867 case STACK_I8:
868 ins->opcode = OP_LCONV_TO_U;
869 break;
870 case STACK_R8:
871 ins->opcode = OP_FCONV_TO_U;
872 break;
874 break;
875 case CEE_CONV_I8:
876 case CEE_CONV_U8:
877 ins->type = STACK_I8;
878 ins->opcode += unops_op_map [src1->type];
879 break;
880 case CEE_CONV_OVF_I8:
881 case CEE_CONV_OVF_U8:
882 ins->type = STACK_I8;
883 ins->opcode += ovf3ops_op_map [src1->type];
884 break;
885 case CEE_CONV_OVF_U8_UN:
886 case CEE_CONV_OVF_I8_UN:
887 ins->type = STACK_I8;
888 ins->opcode += ovf2ops_op_map [src1->type];
889 break;
890 case CEE_CONV_R4:
891 case CEE_CONV_R8:
892 ins->type = STACK_R8;
893 ins->opcode += unops_op_map [src1->type];
894 break;
895 case OP_CKFINITE:
896 ins->type = STACK_R8;
897 break;
898 case CEE_CONV_U2:
899 case CEE_CONV_U1:
900 ins->type = STACK_I4;
901 ins->opcode += ovfops_op_map [src1->type];
902 break;
903 case CEE_CONV_I:
904 case CEE_CONV_OVF_I:
905 case CEE_CONV_OVF_U:
906 ins->type = STACK_PTR;
907 ins->opcode += ovfops_op_map [src1->type];
908 break;
909 case CEE_ADD_OVF:
910 case CEE_ADD_OVF_UN:
911 case CEE_MUL_OVF:
912 case CEE_MUL_OVF_UN:
913 case CEE_SUB_OVF:
914 case CEE_SUB_OVF_UN:
915 ins->type = bin_num_table [src1->type] [src2->type];
916 ins->opcode += ovfops_op_map [src1->type];
917 if (ins->type == STACK_R8)
918 ins->type = STACK_INV;
919 break;
920 case OP_LOAD_MEMBASE:
921 ins->type = STACK_PTR;
922 break;
923 case OP_LOADI1_MEMBASE:
924 case OP_LOADU1_MEMBASE:
925 case OP_LOADI2_MEMBASE:
926 case OP_LOADU2_MEMBASE:
927 case OP_LOADI4_MEMBASE:
928 case OP_LOADU4_MEMBASE:
929 ins->type = STACK_PTR;
930 break;
931 case OP_LOADI8_MEMBASE:
932 ins->type = STACK_I8;
933 break;
934 case OP_LOADR4_MEMBASE:
935 case OP_LOADR8_MEMBASE:
936 ins->type = STACK_R8;
937 break;
938 default:
939 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
940 break;
943 if (ins->type == STACK_MP)
944 ins->klass = mono_defaults.object_class;
947 static const char
948 ldind_type [] = {
949 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
952 #if 0
954 static const char
955 param_table [STACK_MAX] [STACK_MAX] = {
956 {0},
959 static int
960 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
961 int i;
963 if (sig->hasthis) {
964 switch (args->type) {
965 case STACK_I4:
966 case STACK_I8:
967 case STACK_R8:
968 case STACK_VTYPE:
969 case STACK_INV:
970 return 0;
972 args++;
974 for (i = 0; i < sig->param_count; ++i) {
975 switch (args [i].type) {
976 case STACK_INV:
977 return 0;
978 case STACK_MP:
979 if (!sig->params [i]->byref)
980 return 0;
981 continue;
982 case STACK_OBJ:
983 if (sig->params [i]->byref)
984 return 0;
985 switch (sig->params [i]->type) {
986 case MONO_TYPE_CLASS:
987 case MONO_TYPE_STRING:
988 case MONO_TYPE_OBJECT:
989 case MONO_TYPE_SZARRAY:
990 case MONO_TYPE_ARRAY:
991 break;
992 default:
993 return 0;
995 continue;
996 case STACK_R8:
997 if (sig->params [i]->byref)
998 return 0;
999 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1000 return 0;
1001 continue;
1002 case STACK_PTR:
1003 case STACK_I4:
1004 case STACK_I8:
1005 case STACK_VTYPE:
1006 break;
1008 /*if (!param_table [args [i].type] [sig->params [i]->type])
1009 return 0;*/
1011 return 1;
1013 #endif
1016 * When we need a pointer to the current domain many times in a method, we
1017 * call mono_domain_get() once and we store the result in a local variable.
1018 * This function returns the variable that represents the MonoDomain*.
1020 inline static MonoInst *
1021 mono_get_domainvar (MonoCompile *cfg)
1023 if (!cfg->domainvar)
1024 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1025 return cfg->domainvar;
1029 * The got_var contains the address of the Global Offset Table when AOT
1030 * compiling.
1032 inline static MonoInst *
1033 mono_get_got_var (MonoCompile *cfg)
1035 #ifdef MONO_ARCH_NEED_GOT_VAR
1036 if (!cfg->compile_aot)
1037 return NULL;
1038 if (!cfg->got_var) {
1039 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1041 return cfg->got_var;
1042 #else
1043 return NULL;
1044 #endif
1047 static MonoInst *
1048 mono_get_vtable_var (MonoCompile *cfg)
1050 g_assert (cfg->generic_sharing_context);
1052 if (!cfg->rgctx_var) {
1053 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1054 /* force the var to be stack allocated */
1055 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1058 return cfg->rgctx_var;
1061 static MonoType*
1062 type_from_stack_type (MonoInst *ins) {
1063 switch (ins->type) {
1064 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1065 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1066 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1067 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1068 case STACK_MP:
1069 return &ins->klass->this_arg;
1070 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1071 case STACK_VTYPE: return &ins->klass->byval_arg;
1072 default:
1073 g_error ("stack type %d to monotype not handled\n", ins->type);
1075 return NULL;
1078 static G_GNUC_UNUSED int
1079 type_to_stack_type (MonoType *t)
1081 switch (mono_type_get_underlying_type (t)->type) {
1082 case MONO_TYPE_I1:
1083 case MONO_TYPE_U1:
1084 case MONO_TYPE_BOOLEAN:
1085 case MONO_TYPE_I2:
1086 case MONO_TYPE_U2:
1087 case MONO_TYPE_CHAR:
1088 case MONO_TYPE_I4:
1089 case MONO_TYPE_U4:
1090 return STACK_I4;
1091 case MONO_TYPE_I:
1092 case MONO_TYPE_U:
1093 case MONO_TYPE_PTR:
1094 case MONO_TYPE_FNPTR:
1095 return STACK_PTR;
1096 case MONO_TYPE_CLASS:
1097 case MONO_TYPE_STRING:
1098 case MONO_TYPE_OBJECT:
1099 case MONO_TYPE_SZARRAY:
1100 case MONO_TYPE_ARRAY:
1101 return STACK_OBJ;
1102 case MONO_TYPE_I8:
1103 case MONO_TYPE_U8:
1104 return STACK_I8;
1105 case MONO_TYPE_R4:
1106 case MONO_TYPE_R8:
1107 return STACK_R8;
1108 case MONO_TYPE_VALUETYPE:
1109 case MONO_TYPE_TYPEDBYREF:
1110 return STACK_VTYPE;
1111 case MONO_TYPE_GENERICINST:
1112 if (mono_type_generic_inst_is_valuetype (t))
1113 return STACK_VTYPE;
1114 else
1115 return STACK_OBJ;
1116 break;
1117 default:
1118 g_assert_not_reached ();
1121 return -1;
1124 static MonoClass*
1125 array_access_to_klass (int opcode)
1127 switch (opcode) {
1128 case CEE_LDELEM_U1:
1129 return mono_defaults.byte_class;
1130 case CEE_LDELEM_U2:
1131 return mono_defaults.uint16_class;
1132 case CEE_LDELEM_I:
1133 case CEE_STELEM_I:
1134 return mono_defaults.int_class;
1135 case CEE_LDELEM_I1:
1136 case CEE_STELEM_I1:
1137 return mono_defaults.sbyte_class;
1138 case CEE_LDELEM_I2:
1139 case CEE_STELEM_I2:
1140 return mono_defaults.int16_class;
1141 case CEE_LDELEM_I4:
1142 case CEE_STELEM_I4:
1143 return mono_defaults.int32_class;
1144 case CEE_LDELEM_U4:
1145 return mono_defaults.uint32_class;
1146 case CEE_LDELEM_I8:
1147 case CEE_STELEM_I8:
1148 return mono_defaults.int64_class;
1149 case CEE_LDELEM_R4:
1150 case CEE_STELEM_R4:
1151 return mono_defaults.single_class;
1152 case CEE_LDELEM_R8:
1153 case CEE_STELEM_R8:
1154 return mono_defaults.double_class;
1155 case CEE_LDELEM_REF:
1156 case CEE_STELEM_REF:
1157 return mono_defaults.object_class;
1158 default:
1159 g_assert_not_reached ();
1161 return NULL;
1165 * We try to share variables when possible
1167 static MonoInst *
1168 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1170 MonoInst *res;
1171 int pos, vnum;
1173 /* inlining can result in deeper stacks */
1174 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1175 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1177 pos = ins->type - 1 + slot * STACK_MAX;
1179 switch (ins->type) {
1180 case STACK_I4:
1181 case STACK_I8:
1182 case STACK_R8:
1183 case STACK_PTR:
1184 case STACK_MP:
1185 case STACK_OBJ:
1186 if ((vnum = cfg->intvars [pos]))
1187 return cfg->varinfo [vnum];
1188 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1189 cfg->intvars [pos] = res->inst_c0;
1190 break;
1191 default:
1192 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1194 return res;
1197 static void
1198 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1201 * Don't use this if a generic_context is set, since that means AOT can't
1202 * look up the method using just the image+token.
1203 * table == 0 means this is a reference made from a wrapper.
1205 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1206 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1207 jump_info_token->image = image;
1208 jump_info_token->token = token;
1209 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1214 * This function is called to handle items that are left on the evaluation stack
1215 * at basic block boundaries. What happens is that we save the values to local variables
1216 * and we reload them later when first entering the target basic block (with the
1217 * handle_loaded_temps () function).
1218 * A single joint point will use the same variables (stored in the array bb->out_stack or
1219 * bb->in_stack, if the basic block is before or after the joint point).
1221 * This function needs to be called _before_ emitting the last instruction of
1222 * the bb (i.e. before emitting a branch).
1223 * If the stack merge fails at a join point, cfg->unverifiable is set.
1225 static void
1226 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1228 int i, bindex;
1229 MonoBasicBlock *bb = cfg->cbb;
1230 MonoBasicBlock *outb;
1231 MonoInst *inst, **locals;
1232 gboolean found;
1234 if (!count)
1235 return;
1236 if (cfg->verbose_level > 3)
1237 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1238 if (!bb->out_scount) {
1239 bb->out_scount = count;
1240 //printf ("bblock %d has out:", bb->block_num);
1241 found = FALSE;
1242 for (i = 0; i < bb->out_count; ++i) {
1243 outb = bb->out_bb [i];
1244 /* exception handlers are linked, but they should not be considered for stack args */
1245 if (outb->flags & BB_EXCEPTION_HANDLER)
1246 continue;
1247 //printf (" %d", outb->block_num);
1248 if (outb->in_stack) {
1249 found = TRUE;
1250 bb->out_stack = outb->in_stack;
1251 break;
1254 //printf ("\n");
1255 if (!found) {
1256 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1257 for (i = 0; i < count; ++i) {
1259 * try to reuse temps already allocated for this purpouse, if they occupy the same
1260 * stack slot and if they are of the same type.
1261 * This won't cause conflicts since if 'local' is used to
1262 * store one of the values in the in_stack of a bblock, then
1263 * the same variable will be used for the same outgoing stack
1264 * slot as well.
1265 * This doesn't work when inlining methods, since the bblocks
1266 * in the inlined methods do not inherit their in_stack from
1267 * the bblock they are inlined to. See bug #58863 for an
1268 * example.
1270 if (cfg->inlined_method)
1271 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1272 else
1273 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1278 for (i = 0; i < bb->out_count; ++i) {
1279 outb = bb->out_bb [i];
1280 /* exception handlers are linked, but they should not be considered for stack args */
1281 if (outb->flags & BB_EXCEPTION_HANDLER)
1282 continue;
1283 if (outb->in_scount) {
1284 if (outb->in_scount != bb->out_scount) {
1285 cfg->unverifiable = TRUE;
1286 return;
1288 continue; /* check they are the same locals */
1290 outb->in_scount = count;
1291 outb->in_stack = bb->out_stack;
1294 locals = bb->out_stack;
1295 cfg->cbb = bb;
1296 for (i = 0; i < count; ++i) {
1297 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1298 inst->cil_code = sp [i]->cil_code;
1299 sp [i] = locals [i];
1300 if (cfg->verbose_level > 3)
1301 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1305 * It is possible that the out bblocks already have in_stack assigned, and
1306 * the in_stacks differ. In this case, we will store to all the different
1307 * in_stacks.
1310 found = TRUE;
1311 bindex = 0;
1312 while (found) {
1313 /* Find a bblock which has a different in_stack */
1314 found = FALSE;
1315 while (bindex < bb->out_count) {
1316 outb = bb->out_bb [bindex];
1317 /* exception handlers are linked, but they should not be considered for stack args */
1318 if (outb->flags & BB_EXCEPTION_HANDLER) {
1319 bindex++;
1320 continue;
1322 if (outb->in_stack != locals) {
1323 for (i = 0; i < count; ++i) {
1324 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1325 inst->cil_code = sp [i]->cil_code;
1326 sp [i] = locals [i];
1327 if (cfg->verbose_level > 3)
1328 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1330 locals = outb->in_stack;
1331 found = TRUE;
1332 break;
1334 bindex ++;
1339 /* Emit code which loads interface_offsets [klass->interface_id]
1340 * The array is stored in memory before vtable.
1342 static void
1343 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1345 if (cfg->compile_aot) {
1346 int ioffset_reg = alloc_preg (cfg);
1347 int iid_reg = alloc_preg (cfg);
1349 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1350 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1353 else {
1354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1359 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1360 * stored in "klass_reg" implements the interface "klass".
1362 static void
1363 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1365 int ibitmap_reg = alloc_preg (cfg);
1366 int ibitmap_byte_reg = alloc_preg (cfg);
1368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1370 if (cfg->compile_aot) {
1371 int iid_reg = alloc_preg (cfg);
1372 int shifted_iid_reg = alloc_preg (cfg);
1373 int ibitmap_byte_address_reg = alloc_preg (cfg);
1374 int masked_iid_reg = alloc_preg (cfg);
1375 int iid_one_bit_reg = alloc_preg (cfg);
1376 int iid_bit_reg = alloc_preg (cfg);
1377 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1379 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1380 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1382 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1383 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1384 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1385 } else {
1386 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1392 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1393 * stored in "vtable_reg" implements the interface "klass".
1395 static void
1396 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1398 int ibitmap_reg = alloc_preg (cfg);
1399 int ibitmap_byte_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1403 if (cfg->compile_aot) {
1404 int iid_reg = alloc_preg (cfg);
1405 int shifted_iid_reg = alloc_preg (cfg);
1406 int ibitmap_byte_address_reg = alloc_preg (cfg);
1407 int masked_iid_reg = alloc_preg (cfg);
1408 int iid_one_bit_reg = alloc_preg (cfg);
1409 int iid_bit_reg = alloc_preg (cfg);
1410 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1413 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1415 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1416 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1417 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1418 } else {
1419 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1425 * Emit code which checks whenever the interface id of @klass is smaller than
1426 * than the value given by max_iid_reg.
1428 static void
1429 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1430 MonoBasicBlock *false_target)
1432 if (cfg->compile_aot) {
1433 int iid_reg = alloc_preg (cfg);
1434 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1437 else
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1439 if (false_target)
1440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1441 else
1442 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1445 /* Same as above, but obtains max_iid from a vtable */
1446 static void
1447 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 int max_iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1453 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1456 /* Same as above, but obtains max_iid from a klass */
1457 static void
1458 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1459 MonoBasicBlock *false_target)
1461 int max_iid_reg = alloc_preg (cfg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1464 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1467 static void
1468 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1470 int idepth_reg = alloc_preg (cfg);
1471 int stypes_reg = alloc_preg (cfg);
1472 int stype = alloc_preg (cfg);
1474 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1476 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1479 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1480 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1481 if (cfg->compile_aot) {
1482 int const_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1484 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1485 } else {
1486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1491 static void
1492 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1494 int intf_reg = alloc_preg (cfg);
1496 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1497 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1499 if (true_target)
1500 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1501 else
1502 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1506 * Variant of the above that takes a register to the class, not the vtable.
1508 static void
1509 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1511 int intf_bit_reg = alloc_preg (cfg);
1513 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1514 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1516 if (true_target)
1517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1518 else
1519 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1522 static inline void
1523 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1525 if (cfg->compile_aot) {
1526 int const_reg = alloc_preg (cfg);
1527 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1528 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1529 } else {
1530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1532 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1535 static inline void
1536 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1538 if (cfg->compile_aot) {
1539 int const_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1542 } else {
1543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1548 static void
1549 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1551 if (klass->rank) {
1552 int rank_reg = alloc_preg (cfg);
1553 int eclass_reg = alloc_preg (cfg);
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1557 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1560 if (klass->cast_class == mono_defaults.object_class) {
1561 int parent_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1563 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1564 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1565 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1566 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1567 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1568 } else if (klass->cast_class == mono_defaults.enum_class) {
1569 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1570 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1571 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1572 } else {
1573 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1574 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1577 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1578 /* Check that the object is a vector too */
1579 int bounds_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1582 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1584 } else {
1585 int idepth_reg = alloc_preg (cfg);
1586 int stypes_reg = alloc_preg (cfg);
1587 int stype = alloc_preg (cfg);
1589 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1590 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1592 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1596 mini_emit_class_check (cfg, stype, klass);
1600 static void
1601 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1603 int val_reg;
1605 g_assert (val == 0);
1607 if (align == 0)
1608 align = 4;
1610 if ((size <= 4) && (size <= align)) {
1611 switch (size) {
1612 case 1:
1613 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1614 return;
1615 case 2:
1616 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1617 return;
1618 case 4:
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1620 return;
1621 #if SIZEOF_REGISTER == 8
1622 case 8:
1623 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1624 return;
1625 #endif
1629 val_reg = alloc_preg (cfg);
1631 if (SIZEOF_REGISTER == 8)
1632 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1633 else
1634 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1636 if (align < 4) {
1637 /* This could be optimized further if neccesary */
1638 while (size >= 1) {
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1640 offset += 1;
1641 size -= 1;
1643 return;
1646 #if !NO_UNALIGNED_ACCESS
1647 if (SIZEOF_REGISTER == 8) {
1648 if (offset % 8) {
1649 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 offset += 4;
1651 size -= 4;
1653 while (size >= 8) {
1654 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1655 offset += 8;
1656 size -= 8;
1659 #endif
1661 while (size >= 4) {
1662 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1663 offset += 4;
1664 size -= 4;
1666 while (size >= 2) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1668 offset += 2;
1669 size -= 2;
1671 while (size >= 1) {
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1673 offset += 1;
1674 size -= 1;
1678 #endif /* DISABLE_JIT */
1680 void
1681 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1683 int cur_reg;
1685 if (align == 0)
1686 align = 4;
1688 if (align < 4) {
1689 /* This could be optimized further if neccesary */
1690 while (size >= 1) {
1691 cur_reg = alloc_preg (cfg);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1694 doffset += 1;
1695 soffset += 1;
1696 size -= 1;
1700 #if !NO_UNALIGNED_ACCESS
1701 if (SIZEOF_REGISTER == 8) {
1702 while (size >= 8) {
1703 cur_reg = alloc_preg (cfg);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1706 doffset += 8;
1707 soffset += 8;
1708 size -= 8;
1711 #endif
1713 while (size >= 4) {
1714 cur_reg = alloc_preg (cfg);
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1716 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1717 doffset += 4;
1718 soffset += 4;
1719 size -= 4;
1721 while (size >= 2) {
1722 cur_reg = alloc_preg (cfg);
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1725 doffset += 2;
1726 soffset += 2;
1727 size -= 2;
1729 while (size >= 1) {
1730 cur_reg = alloc_preg (cfg);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1733 doffset += 1;
1734 soffset += 1;
1735 size -= 1;
1739 #ifndef DISABLE_JIT
1741 static int
1742 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1744 if (type->byref)
1745 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1747 handle_enum:
1748 type = mini_get_basic_type_from_generic (gsctx, type);
1749 switch (type->type) {
1750 case MONO_TYPE_VOID:
1751 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1752 case MONO_TYPE_I1:
1753 case MONO_TYPE_U1:
1754 case MONO_TYPE_BOOLEAN:
1755 case MONO_TYPE_I2:
1756 case MONO_TYPE_U2:
1757 case MONO_TYPE_CHAR:
1758 case MONO_TYPE_I4:
1759 case MONO_TYPE_U4:
1760 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1761 case MONO_TYPE_I:
1762 case MONO_TYPE_U:
1763 case MONO_TYPE_PTR:
1764 case MONO_TYPE_FNPTR:
1765 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 case MONO_TYPE_CLASS:
1767 case MONO_TYPE_STRING:
1768 case MONO_TYPE_OBJECT:
1769 case MONO_TYPE_SZARRAY:
1770 case MONO_TYPE_ARRAY:
1771 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1772 case MONO_TYPE_I8:
1773 case MONO_TYPE_U8:
1774 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1775 case MONO_TYPE_R4:
1776 case MONO_TYPE_R8:
1777 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1778 case MONO_TYPE_VALUETYPE:
1779 if (type->data.klass->enumtype) {
1780 type = mono_class_enum_basetype (type->data.klass);
1781 goto handle_enum;
1782 } else
1783 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1784 case MONO_TYPE_TYPEDBYREF:
1785 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1786 case MONO_TYPE_GENERICINST:
1787 type = &type->data.generic_class->container_class->byval_arg;
1788 goto handle_enum;
1789 default:
1790 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1792 return -1;
1796 * target_type_is_incompatible:
1797 * @cfg: MonoCompile context
1799 * Check that the item @arg on the evaluation stack can be stored
1800 * in the target type (can be a local, or field, etc).
1801 * The cfg arg can be used to check if we need verification or just
1802 * validity checks.
1804 * Returns: non-0 value if arg can't be stored on a target.
1806 static int
1807 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1809 MonoType *simple_type;
1810 MonoClass *klass;
1812 if (target->byref) {
1813 /* FIXME: check that the pointed to types match */
1814 if (arg->type == STACK_MP)
1815 return arg->klass != mono_class_from_mono_type (target);
1816 if (arg->type == STACK_PTR)
1817 return 0;
1818 return 1;
1821 simple_type = mono_type_get_underlying_type (target);
1822 switch (simple_type->type) {
1823 case MONO_TYPE_VOID:
1824 return 1;
1825 case MONO_TYPE_I1:
1826 case MONO_TYPE_U1:
1827 case MONO_TYPE_BOOLEAN:
1828 case MONO_TYPE_I2:
1829 case MONO_TYPE_U2:
1830 case MONO_TYPE_CHAR:
1831 case MONO_TYPE_I4:
1832 case MONO_TYPE_U4:
1833 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1834 return 1;
1835 return 0;
1836 case MONO_TYPE_PTR:
1837 /* STACK_MP is needed when setting pinned locals */
1838 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1839 return 1;
1840 return 0;
1841 case MONO_TYPE_I:
1842 case MONO_TYPE_U:
1843 case MONO_TYPE_FNPTR:
1844 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1845 return 1;
1846 return 0;
1847 case MONO_TYPE_CLASS:
1848 case MONO_TYPE_STRING:
1849 case MONO_TYPE_OBJECT:
1850 case MONO_TYPE_SZARRAY:
1851 case MONO_TYPE_ARRAY:
1852 if (arg->type != STACK_OBJ)
1853 return 1;
1854 /* FIXME: check type compatibility */
1855 return 0;
1856 case MONO_TYPE_I8:
1857 case MONO_TYPE_U8:
1858 if (arg->type != STACK_I8)
1859 return 1;
1860 return 0;
1861 case MONO_TYPE_R4:
1862 case MONO_TYPE_R8:
1863 if (arg->type != STACK_R8)
1864 return 1;
1865 return 0;
1866 case MONO_TYPE_VALUETYPE:
1867 if (arg->type != STACK_VTYPE)
1868 return 1;
1869 klass = mono_class_from_mono_type (simple_type);
1870 if (klass != arg->klass)
1871 return 1;
1872 return 0;
1873 case MONO_TYPE_TYPEDBYREF:
1874 if (arg->type != STACK_VTYPE)
1875 return 1;
1876 klass = mono_class_from_mono_type (simple_type);
1877 if (klass != arg->klass)
1878 return 1;
1879 return 0;
1880 case MONO_TYPE_GENERICINST:
1881 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1882 if (arg->type != STACK_VTYPE)
1883 return 1;
1884 klass = mono_class_from_mono_type (simple_type);
1885 if (klass != arg->klass)
1886 return 1;
1887 return 0;
1888 } else {
1889 if (arg->type != STACK_OBJ)
1890 return 1;
1891 /* FIXME: check type compatibility */
1892 return 0;
1894 case MONO_TYPE_VAR:
1895 case MONO_TYPE_MVAR:
1896 /* FIXME: all the arguments must be references for now,
1897 * later look inside cfg and see if the arg num is
1898 * really a reference
1900 g_assert (cfg->generic_sharing_context);
1901 if (arg->type != STACK_OBJ)
1902 return 1;
1903 return 0;
1904 default:
1905 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1907 return 1;
1911 * Prepare arguments for passing to a function call.
1912 * Return a non-zero value if the arguments can't be passed to the given
1913 * signature.
1914 * The type checks are not yet complete and some conversions may need
1915 * casts on 32 or 64 bit architectures.
1917 * FIXME: implement this using target_type_is_incompatible ()
1919 static int
1920 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1922 MonoType *simple_type;
1923 int i;
1925 if (sig->hasthis) {
1926 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1927 return 1;
1928 args++;
1930 for (i = 0; i < sig->param_count; ++i) {
1931 if (sig->params [i]->byref) {
1932 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1933 return 1;
1934 continue;
1936 simple_type = sig->params [i];
1937 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1938 handle_enum:
1939 switch (simple_type->type) {
1940 case MONO_TYPE_VOID:
1941 return 1;
1942 continue;
1943 case MONO_TYPE_I1:
1944 case MONO_TYPE_U1:
1945 case MONO_TYPE_BOOLEAN:
1946 case MONO_TYPE_I2:
1947 case MONO_TYPE_U2:
1948 case MONO_TYPE_CHAR:
1949 case MONO_TYPE_I4:
1950 case MONO_TYPE_U4:
1951 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1952 return 1;
1953 continue;
1954 case MONO_TYPE_I:
1955 case MONO_TYPE_U:
1956 case MONO_TYPE_PTR:
1957 case MONO_TYPE_FNPTR:
1958 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1959 return 1;
1960 continue;
1961 case MONO_TYPE_CLASS:
1962 case MONO_TYPE_STRING:
1963 case MONO_TYPE_OBJECT:
1964 case MONO_TYPE_SZARRAY:
1965 case MONO_TYPE_ARRAY:
1966 if (args [i]->type != STACK_OBJ)
1967 return 1;
1968 continue;
1969 case MONO_TYPE_I8:
1970 case MONO_TYPE_U8:
1971 if (args [i]->type != STACK_I8)
1972 return 1;
1973 continue;
1974 case MONO_TYPE_R4:
1975 case MONO_TYPE_R8:
1976 if (args [i]->type != STACK_R8)
1977 return 1;
1978 continue;
1979 case MONO_TYPE_VALUETYPE:
1980 if (simple_type->data.klass->enumtype) {
1981 simple_type = mono_class_enum_basetype (simple_type->data.klass);
1982 goto handle_enum;
1984 if (args [i]->type != STACK_VTYPE)
1985 return 1;
1986 continue;
1987 case MONO_TYPE_TYPEDBYREF:
1988 if (args [i]->type != STACK_VTYPE)
1989 return 1;
1990 continue;
1991 case MONO_TYPE_GENERICINST:
1992 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
1993 goto handle_enum;
1995 default:
1996 g_error ("unknown type 0x%02x in check_call_signature",
1997 simple_type->type);
2000 return 0;
2003 static int
2004 callvirt_to_call (int opcode)
2006 switch (opcode) {
2007 case OP_CALLVIRT:
2008 return OP_CALL;
2009 case OP_VOIDCALLVIRT:
2010 return OP_VOIDCALL;
2011 case OP_FCALLVIRT:
2012 return OP_FCALL;
2013 case OP_VCALLVIRT:
2014 return OP_VCALL;
2015 case OP_LCALLVIRT:
2016 return OP_LCALL;
2017 default:
2018 g_assert_not_reached ();
2021 return -1;
2024 static int
2025 callvirt_to_call_membase (int opcode)
2027 switch (opcode) {
2028 case OP_CALLVIRT:
2029 return OP_CALL_MEMBASE;
2030 case OP_VOIDCALLVIRT:
2031 return OP_VOIDCALL_MEMBASE;
2032 case OP_FCALLVIRT:
2033 return OP_FCALL_MEMBASE;
2034 case OP_LCALLVIRT:
2035 return OP_LCALL_MEMBASE;
2036 case OP_VCALLVIRT:
2037 return OP_VCALL_MEMBASE;
2038 default:
2039 g_assert_not_reached ();
2042 return -1;
2045 #ifdef MONO_ARCH_HAVE_IMT
2046 static void
2047 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2049 #ifdef MONO_ARCH_IMT_REG
2050 int method_reg = alloc_preg (cfg);
2052 if (imt_arg) {
2053 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2054 } else if (cfg->compile_aot) {
2055 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2056 } else {
2057 MonoInst *ins;
2058 MONO_INST_NEW (cfg, ins, OP_PCONST);
2059 ins->inst_p0 = call->method;
2060 ins->dreg = method_reg;
2061 MONO_ADD_INS (cfg->cbb, ins);
2064 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2065 #else
2066 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2067 #endif
2069 #endif
2071 static MonoJumpInfo *
2072 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2074 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2076 ji->ip.i = ip;
2077 ji->type = type;
2078 ji->data.target = target;
2080 return ji;
2083 inline static MonoInst*
2084 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2086 inline static MonoCallInst *
2087 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2088 MonoInst **args, int calli, int virtual)
2090 MonoCallInst *call;
2091 #ifdef MONO_ARCH_SOFT_FLOAT
2092 int i;
2093 #endif
2095 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2097 call->args = args;
2098 call->signature = sig;
2100 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2102 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2103 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2104 MonoInst *loada;
2106 temp->backend.is_pinvoke = sig->pinvoke;
2109 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2110 * address of return value to increase optimization opportunities.
2111 * Before vtype decomposition, the dreg of the call ins itself represents the
2112 * fact the call modifies the return value. After decomposition, the call will
2113 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2114 * will be transformed into an LDADDR.
2116 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2117 loada->dreg = alloc_preg (cfg);
2118 loada->inst_p0 = temp;
2119 /* We reference the call too since call->dreg could change during optimization */
2120 loada->inst_p1 = call;
2121 MONO_ADD_INS (cfg->cbb, loada);
2123 call->inst.dreg = temp->dreg;
2125 call->vret_var = loada;
2126 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2127 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2131 * If the call has a float argument, we would need to do an r8->r4 conversion using
2132 * an icall, but that cannot be done during the call sequence since it would clobber
2133 * the call registers + the stack. So we do it before emitting the call.
2135 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2136 MonoType *t;
2137 MonoInst *in = call->args [i];
2139 if (i >= sig->hasthis)
2140 t = sig->params [i - sig->hasthis];
2141 else
2142 t = &mono_defaults.int_class->byval_arg;
2143 t = mono_type_get_underlying_type (t);
2145 if (!t->byref && t->type == MONO_TYPE_R4) {
2146 MonoInst *iargs [1];
2147 MonoInst *conv;
2149 iargs [0] = in;
2150 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2152 /* The result will be in an int vreg */
2153 call->args [i] = conv;
2156 #endif
2158 mono_arch_emit_call (cfg, call);
2160 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2161 cfg->flags |= MONO_CFG_HAS_CALLS;
2163 return call;
2166 inline static MonoInst*
2167 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2169 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2171 call->inst.sreg1 = addr->dreg;
2173 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2175 return (MonoInst*)call;
2178 inline static MonoInst*
2179 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2181 #ifdef MONO_ARCH_RGCTX_REG
2182 MonoCallInst *call;
2183 int rgctx_reg = -1;
2185 if (rgctx_arg) {
2186 rgctx_reg = mono_alloc_preg (cfg);
2187 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2189 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2190 if (rgctx_arg) {
2191 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2192 cfg->uses_rgctx_reg = TRUE;
2194 return (MonoInst*)call;
2195 #else
2196 g_assert_not_reached ();
2197 return NULL;
2198 #endif
2201 static MonoInst*
2202 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2203 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2205 gboolean virtual = this != NULL;
2206 gboolean enable_for_aot = TRUE;
2207 MonoCallInst *call;
2209 if (method->string_ctor) {
2210 /* Create the real signature */
2211 /* FIXME: Cache these */
2212 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2213 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2215 sig = ctor_sig;
2218 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2220 if (this && sig->hasthis &&
2221 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2222 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2223 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2224 } else {
2225 call->method = method;
2227 call->inst.flags |= MONO_INST_HAS_METHOD;
2228 call->inst.inst_left = this;
2230 if (virtual) {
2231 int vtable_reg, slot_reg, this_reg;
2233 this_reg = this->dreg;
2235 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2236 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2237 /* Make a call to delegate->invoke_impl */
2238 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2239 call->inst.inst_basereg = this_reg;
2240 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2241 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2243 return (MonoInst*)call;
2245 #endif
2247 if ((!cfg->compile_aot || enable_for_aot) &&
2248 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2249 (MONO_METHOD_IS_FINAL (method) &&
2250 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2252 * the method is not virtual, we just need to ensure this is not null
2253 * and then we can call the method directly.
2255 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2256 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2259 if (!method->string_ctor) {
2260 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2261 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2262 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2265 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2267 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2269 return (MonoInst*)call;
2272 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2274 * the method is virtual, but we can statically dispatch since either
2275 * it's class or the method itself are sealed.
2276 * But first we need to ensure it's not a null reference.
2278 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2279 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2280 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2282 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2283 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2285 return (MonoInst*)call;
2288 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2290 vtable_reg = alloc_preg (cfg);
2291 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2292 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2293 slot_reg = -1;
2294 #ifdef MONO_ARCH_HAVE_IMT
2295 if (mono_use_imt) {
2296 guint32 imt_slot = mono_method_get_imt_slot (method);
2297 emit_imt_argument (cfg, call, imt_arg);
2298 slot_reg = vtable_reg;
2299 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2301 #endif
2302 if (slot_reg == -1) {
2303 slot_reg = alloc_preg (cfg);
2304 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2305 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2307 } else {
2308 slot_reg = vtable_reg;
2309 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2310 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2311 #ifdef MONO_ARCH_HAVE_IMT
2312 if (imt_arg) {
2313 g_assert (mono_method_signature (method)->generic_param_count);
2314 emit_imt_argument (cfg, call, imt_arg);
2316 #endif
2319 call->inst.sreg1 = slot_reg;
2320 call->virtual = TRUE;
2323 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2325 return (MonoInst*)call;
2328 static MonoInst*
2329 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2330 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2332 int rgctx_reg;
2333 MonoInst *ins;
2334 MonoCallInst *call;
2336 if (vtable_arg) {
2337 #ifdef MONO_ARCH_RGCTX_REG
2338 rgctx_reg = mono_alloc_preg (cfg);
2339 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2340 #else
2341 NOT_IMPLEMENTED;
2342 #endif
2344 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2346 call = (MonoCallInst*)ins;
2347 if (vtable_arg) {
2348 #ifdef MONO_ARCH_RGCTX_REG
2349 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2350 cfg->uses_rgctx_reg = TRUE;
2351 #else
2352 NOT_IMPLEMENTED;
2353 #endif
2356 return ins;
2359 static inline MonoInst*
2360 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2362 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2365 MonoInst*
2366 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2367 MonoInst **args)
2369 MonoCallInst *call;
2371 g_assert (sig);
2373 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2374 call->fptr = func;
2376 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2378 return (MonoInst*)call;
2381 inline static MonoInst*
2382 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2384 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2386 g_assert (info);
2388 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2392 * mono_emit_abs_call:
2394 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2396 inline static MonoInst*
2397 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2398 MonoMethodSignature *sig, MonoInst **args)
2400 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2401 MonoInst *ins;
2404 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2405 * handle it.
2407 if (cfg->abs_patches == NULL)
2408 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2409 g_hash_table_insert (cfg->abs_patches, ji, ji);
2410 ins = mono_emit_native_call (cfg, ji, sig, args);
2411 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2412 return ins;
2415 static MonoMethod*
2416 get_memcpy_method (void)
2418 static MonoMethod *memcpy_method = NULL;
2419 if (!memcpy_method) {
2420 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2421 if (!memcpy_method)
2422 g_error ("Old corlib found. Install a new one");
2424 return memcpy_method;
2428 * Emit code to copy a valuetype of type @klass whose address is stored in
2429 * @src->dreg to memory whose address is stored at @dest->dreg.
2431 void
2432 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2434 MonoInst *iargs [3];
2435 int n;
2436 guint32 align = 0;
2437 MonoMethod *memcpy_method;
2439 g_assert (klass);
2441 * This check breaks with spilled vars... need to handle it during verification anyway.
2442 * g_assert (klass && klass == src->klass && klass == dest->klass);
2445 if (native)
2446 n = mono_class_native_size (klass, &align);
2447 else
2448 n = mono_class_value_size (klass, &align);
2450 #if HAVE_WRITE_BARRIERS
2451 /* if native is true there should be no references in the struct */
2452 if (klass->has_references && !native) {
2453 /* Avoid barriers when storing to the stack */
2454 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2455 (dest->opcode == OP_LDADDR))) {
2456 iargs [0] = dest;
2457 iargs [1] = src;
2458 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2460 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2463 #endif
2465 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2466 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2467 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2468 } else {
2469 iargs [0] = dest;
2470 iargs [1] = src;
2471 EMIT_NEW_ICONST (cfg, iargs [2], n);
2473 memcpy_method = get_memcpy_method ();
2474 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2478 static MonoMethod*
2479 get_memset_method (void)
2481 static MonoMethod *memset_method = NULL;
2482 if (!memset_method) {
2483 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2484 if (!memset_method)
2485 g_error ("Old corlib found. Install a new one");
2487 return memset_method;
2490 void
2491 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2493 MonoInst *iargs [3];
2494 int n;
2495 guint32 align;
2496 MonoMethod *memset_method;
2498 /* FIXME: Optimize this for the case when dest is an LDADDR */
2500 mono_class_init (klass);
2501 n = mono_class_value_size (klass, &align);
2503 if (n <= sizeof (gpointer) * 5) {
2504 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2506 else {
2507 memset_method = get_memset_method ();
2508 iargs [0] = dest;
2509 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2510 EMIT_NEW_ICONST (cfg, iargs [2], n);
2511 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2515 static MonoInst*
2516 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2518 MonoInst *this = NULL;
2520 g_assert (cfg->generic_sharing_context);
2522 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2523 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2524 !method->klass->valuetype)
2525 EMIT_NEW_ARGLOAD (cfg, this, 0);
2527 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2528 MonoInst *mrgctx_loc, *mrgctx_var;
2530 g_assert (!this);
2531 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2533 mrgctx_loc = mono_get_vtable_var (cfg);
2534 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2536 return mrgctx_var;
2537 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2538 MonoInst *vtable_loc, *vtable_var;
2540 g_assert (!this);
2542 vtable_loc = mono_get_vtable_var (cfg);
2543 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2545 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2546 MonoInst *mrgctx_var = vtable_var;
2547 int vtable_reg;
2549 vtable_reg = alloc_preg (cfg);
2550 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2551 vtable_var->type = STACK_PTR;
2554 return vtable_var;
2555 } else {
2556 MonoInst *ins;
2557 int vtable_reg, res_reg;
2559 vtable_reg = alloc_preg (cfg);
2560 res_reg = alloc_preg (cfg);
2561 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2562 return ins;
2566 static MonoJumpInfoRgctxEntry *
2567 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2569 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2570 res->method = method;
2571 res->in_mrgctx = in_mrgctx;
2572 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2573 res->data->type = patch_type;
2574 res->data->data.target = patch_data;
2575 res->info_type = info_type;
2577 return res;
2580 static inline MonoInst*
2581 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2583 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2586 static MonoInst*
2587 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2588 MonoClass *klass, int rgctx_type)
2590 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2591 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2593 return emit_rgctx_fetch (cfg, rgctx, entry);
2596 static MonoInst*
2597 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2598 MonoMethod *cmethod, int rgctx_type)
2600 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2601 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2603 return emit_rgctx_fetch (cfg, rgctx, entry);
2606 static MonoInst*
2607 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2608 MonoClassField *field, int rgctx_type)
2610 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2611 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2613 return emit_rgctx_fetch (cfg, rgctx, entry);
2616 static void
2617 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2619 int vtable_reg = alloc_preg (cfg);
2620 int context_used = 0;
2622 if (cfg->generic_sharing_context)
2623 context_used = mono_class_check_context_used (array_class);
2625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2627 if (cfg->opt & MONO_OPT_SHARED) {
2628 int class_reg = alloc_preg (cfg);
2629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2630 if (cfg->compile_aot) {
2631 int klass_reg = alloc_preg (cfg);
2632 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2633 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2634 } else {
2635 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2637 } else if (context_used) {
2638 MonoInst *vtable_ins;
2640 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2642 } else {
2643 if (cfg->compile_aot) {
2644 int vt_reg = alloc_preg (cfg);
2645 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2646 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2647 } else {
2648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2652 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2655 static void
2656 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2658 if (mini_get_debug_options ()->better_cast_details) {
2659 int to_klass_reg = alloc_preg (cfg);
2660 int vtable_reg = alloc_preg (cfg);
2661 int klass_reg = alloc_preg (cfg);
2662 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2664 if (!tls_get) {
2665 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2666 exit (1);
2669 MONO_ADD_INS (cfg->cbb, tls_get);
2670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2674 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2679 static void
2680 reset_cast_details (MonoCompile *cfg)
2682 /* Reset the variables holding the cast details */
2683 if (mini_get_debug_options ()->better_cast_details) {
2684 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2686 MONO_ADD_INS (cfg->cbb, tls_get);
2687 /* It is enough to reset the from field */
2688 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2693 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2694 * generic code is generated.
2696 static MonoInst*
2697 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2699 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2701 if (context_used) {
2702 MonoInst *rgctx, *addr;
2704 /* FIXME: What if the class is shared? We might not
2705 have to get the address of the method from the
2706 RGCTX. */
2707 addr = emit_get_rgctx_method (cfg, context_used, method,
2708 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2710 rgctx = emit_get_rgctx (cfg, method, context_used);
2712 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2713 } else {
2714 return mono_emit_method_call (cfg, method, &val, NULL);
2718 static MonoInst*
2719 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2721 MonoInst *add;
2722 int obj_reg;
2723 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2724 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2725 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2726 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2728 obj_reg = sp [0]->dreg;
2729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2732 /* FIXME: generics */
2733 g_assert (klass->rank == 0);
2735 // Check rank == 0
2736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2737 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2742 if (context_used) {
2743 MonoInst *element_class;
2745 /* This assertion is from the unboxcast insn */
2746 g_assert (klass->rank == 0);
2748 element_class = emit_get_rgctx_klass (cfg, context_used,
2749 klass->element_class, MONO_RGCTX_INFO_KLASS);
2751 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2752 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2753 } else {
2754 save_cast_details (cfg, klass->element_class, obj_reg);
2755 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2756 reset_cast_details (cfg);
2759 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2760 MONO_ADD_INS (cfg->cbb, add);
2761 add->type = STACK_MP;
2762 add->klass = klass;
2764 return add;
2767 static MonoInst*
2768 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2770 MonoInst *iargs [2];
2771 void *alloc_ftn;
2773 if (cfg->opt & MONO_OPT_SHARED) {
2774 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2775 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2777 alloc_ftn = mono_object_new;
2778 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2779 /* This happens often in argument checking code, eg. throw new FooException... */
2780 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2781 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2782 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2783 } else {
2784 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2785 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2786 gboolean pass_lw;
2788 if (managed_alloc) {
2789 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2790 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2792 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2793 if (pass_lw) {
2794 guint32 lw = vtable->klass->instance_size;
2795 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2796 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2797 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2799 else {
2800 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2804 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2807 static MonoInst*
2808 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2809 gboolean for_box)
2811 MonoInst *iargs [2];
2812 MonoMethod *managed_alloc = NULL;
2813 void *alloc_ftn;
2816 FIXME: we cannot get managed_alloc here because we can't get
2817 the class's vtable (because it's not a closed class)
2819 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2820 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2823 if (cfg->opt & MONO_OPT_SHARED) {
2824 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2825 iargs [1] = data_inst;
2826 alloc_ftn = mono_object_new;
2827 } else {
2828 if (managed_alloc) {
2829 iargs [0] = data_inst;
2830 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2833 iargs [0] = data_inst;
2834 alloc_ftn = mono_object_new_specific;
2837 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2840 static MonoInst*
2841 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2843 MonoInst *alloc, *ins;
2845 if (mono_class_is_nullable (klass)) {
2846 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2847 return mono_emit_method_call (cfg, method, &val, NULL);
2850 alloc = handle_alloc (cfg, klass, TRUE);
2852 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2854 return alloc;
2857 static MonoInst *
2858 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2860 MonoInst *alloc, *ins;
2862 if (mono_class_is_nullable (klass)) {
2863 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2864 /* FIXME: What if the class is shared? We might not
2865 have to get the method address from the RGCTX. */
2866 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2867 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2868 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2870 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2871 } else {
2872 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2874 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2876 return alloc;
2880 static MonoInst*
2881 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2883 MonoBasicBlock *is_null_bb;
2884 int obj_reg = src->dreg;
2885 int vtable_reg = alloc_preg (cfg);
2887 NEW_BBLOCK (cfg, is_null_bb);
2889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2890 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2892 save_cast_details (cfg, klass, obj_reg);
2894 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2896 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2897 } else {
2898 int klass_reg = alloc_preg (cfg);
2900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2902 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2903 /* the remoting code is broken, access the class for now */
2904 if (0) {
2905 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2907 } else {
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2911 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2912 } else {
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2914 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2918 MONO_START_BB (cfg, is_null_bb);
2920 reset_cast_details (cfg);
2922 return src;
2925 static MonoInst*
2926 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2928 MonoInst *ins;
2929 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2930 int obj_reg = src->dreg;
2931 int vtable_reg = alloc_preg (cfg);
2932 int res_reg = alloc_preg (cfg);
2934 NEW_BBLOCK (cfg, is_null_bb);
2935 NEW_BBLOCK (cfg, false_bb);
2936 NEW_BBLOCK (cfg, end_bb);
2938 /* Do the assignment at the beginning, so the other assignment can be if converted */
2939 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2940 ins->type = STACK_OBJ;
2941 ins->klass = klass;
2943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2946 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2948 /* the is_null_bb target simply copies the input register to the output */
2949 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2950 } else {
2951 int klass_reg = alloc_preg (cfg);
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2955 if (klass->rank) {
2956 int rank_reg = alloc_preg (cfg);
2957 int eclass_reg = alloc_preg (cfg);
2959 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2960 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2964 if (klass->cast_class == mono_defaults.object_class) {
2965 int parent_reg = alloc_preg (cfg);
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2967 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2968 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2970 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2971 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2972 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2973 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2974 } else if (klass->cast_class == mono_defaults.enum_class) {
2975 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2976 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2977 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2978 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2979 } else {
2980 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2981 /* Check that the object is a vector too */
2982 int bounds_reg = alloc_preg (cfg);
2983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2984 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2988 /* the is_null_bb target simply copies the input register to the output */
2989 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2991 } else if (mono_class_is_nullable (klass)) {
2992 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2993 /* the is_null_bb target simply copies the input register to the output */
2994 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2995 } else {
2996 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2997 /* the remoting code is broken, access the class for now */
2998 if (0) {
2999 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3001 } else {
3002 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3007 } else {
3008 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3009 /* the is_null_bb target simply copies the input register to the output */
3010 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3015 MONO_START_BB (cfg, false_bb);
3017 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3020 MONO_START_BB (cfg, is_null_bb);
3022 MONO_START_BB (cfg, end_bb);
3024 return ins;
3027 static MonoInst*
3028 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3030 /* This opcode takes as input an object reference and a class, and returns:
3031 0) if the object is an instance of the class,
3032 1) if the object is not instance of the class,
3033 2) if the object is a proxy whose type cannot be determined */
3035 MonoInst *ins;
3036 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3037 int obj_reg = src->dreg;
3038 int dreg = alloc_ireg (cfg);
3039 int tmp_reg;
3040 int klass_reg = alloc_preg (cfg);
3042 NEW_BBLOCK (cfg, true_bb);
3043 NEW_BBLOCK (cfg, false_bb);
3044 NEW_BBLOCK (cfg, false2_bb);
3045 NEW_BBLOCK (cfg, end_bb);
3046 NEW_BBLOCK (cfg, no_proxy_bb);
3048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3049 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3051 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3052 NEW_BBLOCK (cfg, interface_fail_bb);
3054 tmp_reg = alloc_preg (cfg);
3055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3056 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3057 MONO_START_BB (cfg, interface_fail_bb);
3058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3060 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3062 tmp_reg = alloc_preg (cfg);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3066 } else {
3067 tmp_reg = alloc_preg (cfg);
3068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3071 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3072 tmp_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3076 tmp_reg = alloc_preg (cfg);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3081 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3084 MONO_START_BB (cfg, no_proxy_bb);
3086 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3089 MONO_START_BB (cfg, false_bb);
3091 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3094 MONO_START_BB (cfg, false2_bb);
3096 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3099 MONO_START_BB (cfg, true_bb);
3101 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3103 MONO_START_BB (cfg, end_bb);
3105 /* FIXME: */
3106 MONO_INST_NEW (cfg, ins, OP_ICONST);
3107 ins->dreg = dreg;
3108 ins->type = STACK_I4;
3110 return ins;
3113 static MonoInst*
3114 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3116 /* This opcode takes as input an object reference and a class, and returns:
3117 0) if the object is an instance of the class,
3118 1) if the object is a proxy whose type cannot be determined
3119 an InvalidCastException exception is thrown otherwhise*/
3121 MonoInst *ins;
3122 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3123 int obj_reg = src->dreg;
3124 int dreg = alloc_ireg (cfg);
3125 int tmp_reg = alloc_preg (cfg);
3126 int klass_reg = alloc_preg (cfg);
3128 NEW_BBLOCK (cfg, end_bb);
3129 NEW_BBLOCK (cfg, ok_result_bb);
3131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3134 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3135 NEW_BBLOCK (cfg, interface_fail_bb);
3137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3138 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3139 MONO_START_BB (cfg, interface_fail_bb);
3140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3142 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3144 tmp_reg = alloc_preg (cfg);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3147 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3149 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3152 } else {
3153 NEW_BBLOCK (cfg, no_proxy_bb);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3157 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3159 tmp_reg = alloc_preg (cfg);
3160 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3163 tmp_reg = alloc_preg (cfg);
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3168 NEW_BBLOCK (cfg, fail_1_bb);
3170 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3172 MONO_START_BB (cfg, fail_1_bb);
3174 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3177 MONO_START_BB (cfg, no_proxy_bb);
3179 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3182 MONO_START_BB (cfg, ok_result_bb);
3184 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3186 MONO_START_BB (cfg, end_bb);
3188 /* FIXME: */
3189 MONO_INST_NEW (cfg, ins, OP_ICONST);
3190 ins->dreg = dreg;
3191 ins->type = STACK_I4;
3193 return ins;
3196 static G_GNUC_UNUSED MonoInst*
3197 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3199 gpointer *trampoline;
3200 MonoInst *obj, *method_ins, *tramp_ins;
3201 MonoDomain *domain;
3202 guint8 **code_slot;
3204 obj = handle_alloc (cfg, klass, FALSE);
3206 /* Inline the contents of mono_delegate_ctor */
3208 /* Set target field */
3209 /* Optimize away setting of NULL target */
3210 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3211 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3213 /* Set method field */
3214 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3215 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3218 * To avoid looking up the compiled code belonging to the target method
3219 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3220 * store it, and we fill it after the method has been compiled.
3222 if (!cfg->compile_aot && !method->dynamic) {
3223 MonoInst *code_slot_ins;
3225 domain = mono_domain_get ();
3226 mono_domain_lock (domain);
3227 if (!domain_jit_info (domain)->method_code_hash)
3228 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3229 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3230 if (!code_slot) {
3231 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3232 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3234 mono_domain_unlock (domain);
3236 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3237 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3240 /* Set invoke_impl field */
3241 if (cfg->compile_aot) {
3242 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3243 } else {
3244 trampoline = mono_create_delegate_trampoline (klass);
3245 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3247 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3249 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3251 return obj;
3254 static MonoInst*
3255 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3257 MonoJitICallInfo *info;
3259 /* Need to register the icall so it gets an icall wrapper */
3260 info = mono_get_array_new_va_icall (rank);
3262 cfg->flags |= MONO_CFG_HAS_VARARGS;
3264 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3265 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3268 static void
3269 mono_emit_load_got_addr (MonoCompile *cfg)
3271 MonoInst *getaddr, *dummy_use;
3273 if (!cfg->got_var || cfg->got_var_allocated)
3274 return;
3276 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3277 getaddr->dreg = cfg->got_var->dreg;
3279 /* Add it to the start of the first bblock */
3280 if (cfg->bb_entry->code) {
3281 getaddr->next = cfg->bb_entry->code;
3282 cfg->bb_entry->code = getaddr;
3284 else
3285 MONO_ADD_INS (cfg->bb_entry, getaddr);
3287 cfg->got_var_allocated = TRUE;
3290 * Add a dummy use to keep the got_var alive, since real uses might
3291 * only be generated by the back ends.
3292 * Add it to end_bblock, so the variable's lifetime covers the whole
3293 * method.
3294 * It would be better to make the usage of the got var explicit in all
3295 * cases when the backend needs it (i.e. calls, throw etc.), so this
3296 * wouldn't be needed.
3298 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3299 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3302 static int inline_limit;
3303 static gboolean inline_limit_inited;
3305 static gboolean
3306 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3308 MonoMethodHeader *header;
3309 MonoVTable *vtable;
3310 #ifdef MONO_ARCH_SOFT_FLOAT
3311 MonoMethodSignature *sig = mono_method_signature (method);
3312 int i;
3313 #endif
3315 if (cfg->generic_sharing_context)
3316 return FALSE;
3318 #ifdef MONO_ARCH_HAVE_LMF_OPS
3319 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3320 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3321 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3322 return TRUE;
3323 #endif
3325 if (method->is_inflated)
3326 /* Avoid inflating the header */
3327 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3328 else
3329 header = mono_method_get_header (method);
3331 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3332 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3333 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3334 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3335 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3336 (method->klass->marshalbyref) ||
3337 !header || header->num_clauses)
3338 return FALSE;
3340 /* also consider num_locals? */
3341 /* Do the size check early to avoid creating vtables */
3342 if (!inline_limit_inited) {
3343 if (getenv ("MONO_INLINELIMIT"))
3344 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3345 else
3346 inline_limit = INLINE_LENGTH_LIMIT;
3347 inline_limit_inited = TRUE;
3349 if (header->code_size >= inline_limit)
3350 return FALSE;
3353 * if we can initialize the class of the method right away, we do,
3354 * otherwise we don't allow inlining if the class needs initialization,
3355 * since it would mean inserting a call to mono_runtime_class_init()
3356 * inside the inlined code
3358 if (!(cfg->opt & MONO_OPT_SHARED)) {
3359 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3360 if (cfg->run_cctors && method->klass->has_cctor) {
3361 if (!method->klass->runtime_info)
3362 /* No vtable created yet */
3363 return FALSE;
3364 vtable = mono_class_vtable (cfg->domain, method->klass);
3365 if (!vtable)
3366 return FALSE;
3367 /* This makes so that inline cannot trigger */
3368 /* .cctors: too many apps depend on them */
3369 /* running with a specific order... */
3370 if (! vtable->initialized)
3371 return FALSE;
3372 mono_runtime_class_init (vtable);
3374 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3375 if (!method->klass->runtime_info)
3376 /* No vtable created yet */
3377 return FALSE;
3378 vtable = mono_class_vtable (cfg->domain, method->klass);
3379 if (!vtable)
3380 return FALSE;
3381 if (!vtable->initialized)
3382 return FALSE;
3384 } else {
3386 * If we're compiling for shared code
3387 * the cctor will need to be run at aot method load time, for example,
3388 * or at the end of the compilation of the inlining method.
3390 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3391 return FALSE;
3395 * CAS - do not inline methods with declarative security
3396 * Note: this has to be before any possible return TRUE;
3398 if (mono_method_has_declsec (method))
3399 return FALSE;
3401 #ifdef MONO_ARCH_SOFT_FLOAT
3402 /* FIXME: */
3403 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3404 return FALSE;
3405 for (i = 0; i < sig->param_count; ++i)
3406 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3407 return FALSE;
3408 #endif
3410 return TRUE;
3413 static gboolean
3414 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3416 if (vtable->initialized && !cfg->compile_aot)
3417 return FALSE;
3419 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3420 return FALSE;
3422 if (!mono_class_needs_cctor_run (vtable->klass, method))
3423 return FALSE;
3425 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3426 /* The initialization is already done before the method is called */
3427 return FALSE;
3429 return TRUE;
3432 static MonoInst*
3433 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3435 MonoInst *ins;
3436 guint32 size;
3437 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3439 mono_class_init (klass);
3440 size = mono_class_array_element_size (klass);
3442 mult_reg = alloc_preg (cfg);
3443 array_reg = arr->dreg;
3444 index_reg = index->dreg;
3446 #if SIZEOF_REGISTER == 8
3447 /* The array reg is 64 bits but the index reg is only 32 */
3448 index2_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3450 #else
3451 if (index->type == STACK_I8) {
3452 index2_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3454 } else {
3455 index2_reg = index_reg;
3457 #endif
3459 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3461 #if defined(__i386__) || defined(__x86_64__)
3462 if (size == 1 || size == 2 || size == 4 || size == 8) {
3463 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3465 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3466 ins->type = STACK_PTR;
3468 return ins;
3470 #endif
3472 add_reg = alloc_preg (cfg);
3474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3475 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3476 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3477 ins->type = STACK_PTR;
3478 MONO_ADD_INS (cfg->cbb, ins);
3480 return ins;
3483 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3484 static MonoInst*
3485 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3487 int bounds_reg = alloc_preg (cfg);
3488 int add_reg = alloc_preg (cfg);
3489 int mult_reg = alloc_preg (cfg);
3490 int mult2_reg = alloc_preg (cfg);
3491 int low1_reg = alloc_preg (cfg);
3492 int low2_reg = alloc_preg (cfg);
3493 int high1_reg = alloc_preg (cfg);
3494 int high2_reg = alloc_preg (cfg);
3495 int realidx1_reg = alloc_preg (cfg);
3496 int realidx2_reg = alloc_preg (cfg);
3497 int sum_reg = alloc_preg (cfg);
3498 int index1, index2;
3499 MonoInst *ins;
3500 guint32 size;
3502 mono_class_init (klass);
3503 size = mono_class_array_element_size (klass);
3505 index1 = index_ins1->dreg;
3506 index2 = index_ins2->dreg;
3508 /* range checking */
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3510 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3513 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3514 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3516 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3517 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3518 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3521 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3522 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3523 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3524 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3525 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3526 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3528 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3529 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3531 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3532 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3534 ins->type = STACK_MP;
3535 ins->klass = klass;
3536 MONO_ADD_INS (cfg->cbb, ins);
3538 return ins;
3540 #endif
3542 static MonoInst*
3543 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3545 int rank;
3546 MonoInst *addr;
3547 MonoMethod *addr_method;
3548 int element_size;
3550 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3552 if (rank == 1)
3553 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3555 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3556 /* emit_ldelema_2 depends on OP_LMUL */
3557 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3558 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3560 #endif
3562 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3563 addr_method = mono_marshal_get_array_address (rank, element_size);
3564 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3566 return addr;
3569 static MonoInst*
3570 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3572 MonoInst *ins = NULL;
3574 static MonoClass *runtime_helpers_class = NULL;
3575 if (! runtime_helpers_class)
3576 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3577 "System.Runtime.CompilerServices", "RuntimeHelpers");
3579 if (cmethod->klass == mono_defaults.string_class) {
3580 if (strcmp (cmethod->name, "get_Chars") == 0) {
3581 int dreg = alloc_ireg (cfg);
3582 int index_reg = alloc_preg (cfg);
3583 int mult_reg = alloc_preg (cfg);
3584 int add_reg = alloc_preg (cfg);
3586 #if SIZEOF_REGISTER == 8
3587 /* The array reg is 64 bits but the index reg is only 32 */
3588 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3589 #else
3590 index_reg = args [1]->dreg;
3591 #endif
3592 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3594 #if defined(__i386__) || defined(__x86_64__)
3595 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3596 add_reg = ins->dreg;
3597 /* Avoid a warning */
3598 mult_reg = 0;
3599 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3600 add_reg, 0);
3601 #else
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3603 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3604 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3605 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3606 #endif
3607 type_from_op (ins, NULL, NULL);
3608 return ins;
3609 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3610 int dreg = alloc_ireg (cfg);
3611 /* Decompose later to allow more optimizations */
3612 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3613 ins->type = STACK_I4;
3614 cfg->cbb->has_array_access = TRUE;
3615 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3617 return ins;
3618 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3619 int mult_reg = alloc_preg (cfg);
3620 int add_reg = alloc_preg (cfg);
3622 /* The corlib functions check for oob already. */
3623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3624 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3625 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3626 } else
3627 return NULL;
3628 } else if (cmethod->klass == mono_defaults.object_class) {
3630 if (strcmp (cmethod->name, "GetType") == 0) {
3631 int dreg = alloc_preg (cfg);
3632 int vt_reg = alloc_preg (cfg);
3633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3634 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3635 type_from_op (ins, NULL, NULL);
3637 return ins;
3638 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3639 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3640 int dreg = alloc_ireg (cfg);
3641 int t1 = alloc_ireg (cfg);
3643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3644 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3645 ins->type = STACK_I4;
3647 return ins;
3648 #endif
3649 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3650 MONO_INST_NEW (cfg, ins, OP_NOP);
3651 MONO_ADD_INS (cfg->cbb, ins);
3652 return ins;
3653 } else
3654 return NULL;
3655 } else if (cmethod->klass == mono_defaults.array_class) {
3656 if (cmethod->name [0] != 'g')
3657 return NULL;
3659 if (strcmp (cmethod->name, "get_Rank") == 0) {
3660 int dreg = alloc_ireg (cfg);
3661 int vtable_reg = alloc_preg (cfg);
3662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3663 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3664 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3665 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3666 type_from_op (ins, NULL, NULL);
3668 return ins;
3669 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3670 int dreg = alloc_ireg (cfg);
3672 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3673 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3674 type_from_op (ins, NULL, NULL);
3676 return ins;
3677 } else
3678 return NULL;
3679 } else if (cmethod->klass == runtime_helpers_class) {
3681 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3682 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3683 return ins;
3684 } else
3685 return NULL;
3686 } else if (cmethod->klass == mono_defaults.thread_class) {
3687 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3688 ins->dreg = alloc_preg (cfg);
3689 ins->type = STACK_OBJ;
3690 MONO_ADD_INS (cfg->cbb, ins);
3691 return ins;
3692 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3693 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3694 MONO_ADD_INS (cfg->cbb, ins);
3695 return ins;
3696 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3697 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3698 MONO_ADD_INS (cfg->cbb, ins);
3699 return ins;
3701 } else if (cmethod->klass == mono_defaults.monitor_class) {
3702 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3703 if (strcmp (cmethod->name, "Enter") == 0) {
3704 MonoCallInst *call;
3706 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3707 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3708 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3709 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3711 return (MonoInst*)call;
3712 } else if (strcmp (cmethod->name, "Exit") == 0) {
3713 MonoCallInst *call;
3715 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3716 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3717 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3718 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3720 return (MonoInst*)call;
3722 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3723 MonoMethod *fast_method = NULL;
3725 /* Avoid infinite recursion */
3726 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3727 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3728 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3729 return NULL;
3731 if (strcmp (cmethod->name, "Enter") == 0 ||
3732 strcmp (cmethod->name, "Exit") == 0)
3733 fast_method = mono_monitor_get_fast_path (cmethod);
3734 if (!fast_method)
3735 return NULL;
3737 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3738 #endif
3739 } else if (mini_class_is_system_array (cmethod->klass) &&
3740 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3741 MonoInst *addr, *store, *load;
3742 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3744 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3745 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3747 return store;
3748 } else if (cmethod->klass->image == mono_defaults.corlib &&
3749 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3750 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3751 ins = NULL;
3753 #if SIZEOF_REGISTER == 8
3754 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3755 /* 64 bit reads are already atomic */
3756 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3757 ins->dreg = mono_alloc_preg (cfg);
3758 ins->inst_basereg = args [0]->dreg;
3759 ins->inst_offset = 0;
3760 MONO_ADD_INS (cfg->cbb, ins);
3762 #endif
3764 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3765 if (strcmp (cmethod->name, "Increment") == 0) {
3766 MonoInst *ins_iconst;
3767 guint32 opcode = 0;
3769 if (fsig->params [0]->type == MONO_TYPE_I4)
3770 opcode = OP_ATOMIC_ADD_NEW_I4;
3771 #if SIZEOF_REGISTER == 8
3772 else if (fsig->params [0]->type == MONO_TYPE_I8)
3773 opcode = OP_ATOMIC_ADD_NEW_I8;
3774 #endif
3775 if (opcode) {
3776 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3777 ins_iconst->inst_c0 = 1;
3778 ins_iconst->dreg = mono_alloc_ireg (cfg);
3779 MONO_ADD_INS (cfg->cbb, ins_iconst);
3781 MONO_INST_NEW (cfg, ins, opcode);
3782 ins->dreg = mono_alloc_ireg (cfg);
3783 ins->inst_basereg = args [0]->dreg;
3784 ins->inst_offset = 0;
3785 ins->sreg2 = ins_iconst->dreg;
3786 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3787 MONO_ADD_INS (cfg->cbb, ins);
3789 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3790 MonoInst *ins_iconst;
3791 guint32 opcode = 0;
3793 if (fsig->params [0]->type == MONO_TYPE_I4)
3794 opcode = OP_ATOMIC_ADD_NEW_I4;
3795 #if SIZEOF_REGISTER == 8
3796 else if (fsig->params [0]->type == MONO_TYPE_I8)
3797 opcode = OP_ATOMIC_ADD_NEW_I8;
3798 #endif
3799 if (opcode) {
3800 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3801 ins_iconst->inst_c0 = -1;
3802 ins_iconst->dreg = mono_alloc_ireg (cfg);
3803 MONO_ADD_INS (cfg->cbb, ins_iconst);
3805 MONO_INST_NEW (cfg, ins, opcode);
3806 ins->dreg = mono_alloc_ireg (cfg);
3807 ins->inst_basereg = args [0]->dreg;
3808 ins->inst_offset = 0;
3809 ins->sreg2 = ins_iconst->dreg;
3810 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3811 MONO_ADD_INS (cfg->cbb, ins);
3813 } else if (strcmp (cmethod->name, "Add") == 0) {
3814 guint32 opcode = 0;
3816 if (fsig->params [0]->type == MONO_TYPE_I4)
3817 opcode = OP_ATOMIC_ADD_NEW_I4;
3818 #if SIZEOF_REGISTER == 8
3819 else if (fsig->params [0]->type == MONO_TYPE_I8)
3820 opcode = OP_ATOMIC_ADD_NEW_I8;
3821 #endif
3823 if (opcode) {
3824 MONO_INST_NEW (cfg, ins, opcode);
3825 ins->dreg = mono_alloc_ireg (cfg);
3826 ins->inst_basereg = args [0]->dreg;
3827 ins->inst_offset = 0;
3828 ins->sreg2 = args [1]->dreg;
3829 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3830 MONO_ADD_INS (cfg->cbb, ins);
3833 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3835 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3836 if (strcmp (cmethod->name, "Exchange") == 0) {
3837 guint32 opcode;
3839 if (fsig->params [0]->type == MONO_TYPE_I4)
3840 opcode = OP_ATOMIC_EXCHANGE_I4;
3841 #if SIZEOF_REGISTER == 8
3842 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3843 (fsig->params [0]->type == MONO_TYPE_I) ||
3844 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3845 opcode = OP_ATOMIC_EXCHANGE_I8;
3846 #else
3847 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3848 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3849 opcode = OP_ATOMIC_EXCHANGE_I4;
3850 #endif
3851 else
3852 return NULL;
3854 MONO_INST_NEW (cfg, ins, opcode);
3855 ins->dreg = mono_alloc_ireg (cfg);
3856 ins->inst_basereg = args [0]->dreg;
3857 ins->inst_offset = 0;
3858 ins->sreg2 = args [1]->dreg;
3859 MONO_ADD_INS (cfg->cbb, ins);
3861 switch (fsig->params [0]->type) {
3862 case MONO_TYPE_I4:
3863 ins->type = STACK_I4;
3864 break;
3865 case MONO_TYPE_I8:
3866 case MONO_TYPE_I:
3867 ins->type = STACK_I8;
3868 break;
3869 case MONO_TYPE_OBJECT:
3870 ins->type = STACK_OBJ;
3871 break;
3872 default:
3873 g_assert_not_reached ();
3876 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3878 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3880 * Can't implement CompareExchange methods this way since they have
3881 * three arguments. We can implement one of the common cases, where the new
3882 * value is a constant.
3884 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3885 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3886 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3887 && args [2]->opcode == OP_ICONST) {
3888 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3889 ins->dreg = alloc_ireg (cfg);
3890 ins->sreg1 = args [0]->dreg;
3891 ins->sreg2 = args [1]->dreg;
3892 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3893 ins->type = STACK_I4;
3894 MONO_ADD_INS (cfg->cbb, ins);
3896 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3898 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3900 if (ins)
3901 return ins;
3902 } else if (cmethod->klass->image == mono_defaults.corlib) {
3903 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3904 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3905 MONO_INST_NEW (cfg, ins, OP_BREAK);
3906 MONO_ADD_INS (cfg->cbb, ins);
3907 return ins;
3909 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3910 && strcmp (cmethod->klass->name, "Environment") == 0) {
3911 #ifdef PLATFORM_WIN32
3912 EMIT_NEW_ICONST (cfg, ins, 1);
3913 #else
3914 EMIT_NEW_ICONST (cfg, ins, 0);
3915 #endif
3916 return ins;
3918 } else if (cmethod->klass == mono_defaults.math_class) {
3920 * There is general branches code for Min/Max, but it does not work for
3921 * all inputs:
3922 * http://everything2.com/?node_id=1051618
3926 #ifdef MONO_ARCH_SIMD_INTRINSICS
3927 if (cfg->opt & MONO_OPT_SIMD) {
3928 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3929 if (ins)
3930 return ins;
3932 #endif
3934 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3938 * This entry point could be used later for arbitrary method
3939 * redirection.
3941 inline static MonoInst*
3942 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3943 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3945 if (method->klass == mono_defaults.string_class) {
3946 /* managed string allocation support */
3947 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3948 MonoInst *iargs [2];
3949 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3950 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3951 if (!managed_alloc)
3952 return NULL;
3953 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3954 iargs [1] = args [0];
3955 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3958 return NULL;
3961 static void
3962 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3964 MonoInst *store, *temp;
3965 int i;
3967 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3968 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3971 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3972 * would be different than the MonoInst's used to represent arguments, and
3973 * the ldelema implementation can't deal with that.
3974 * Solution: When ldelema is used on an inline argument, create a var for
3975 * it, emit ldelema on that var, and emit the saving code below in
3976 * inline_method () if needed.
3978 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3979 cfg->args [i] = temp;
3980 /* This uses cfg->args [i] which is set by the preceeding line */
3981 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3982 store->cil_code = sp [0]->cil_code;
3983 sp++;
3987 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3988 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3990 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3991 static gboolean
3992 check_inline_called_method_name_limit (MonoMethod *called_method)
3994 int strncmp_result;
3995 static char *limit = NULL;
3997 if (limit == NULL) {
3998 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4000 if (limit_string != NULL)
4001 limit = limit_string;
4002 else
4003 limit = (char *) "";
4006 if (limit [0] != '\0') {
4007 char *called_method_name = mono_method_full_name (called_method, TRUE);
4009 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4010 g_free (called_method_name);
4012 //return (strncmp_result <= 0);
4013 return (strncmp_result == 0);
4014 } else {
4015 return TRUE;
4018 #endif
4020 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4021 static gboolean
4022 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4024 int strncmp_result;
4025 static char *limit = NULL;
4027 if (limit == NULL) {
4028 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4029 if (limit_string != NULL) {
4030 limit = limit_string;
4031 } else {
4032 limit = (char *) "";
4036 if (limit [0] != '\0') {
4037 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4039 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4040 g_free (caller_method_name);
4042 //return (strncmp_result <= 0);
4043 return (strncmp_result == 0);
4044 } else {
4045 return TRUE;
4048 #endif
4050 static int
4051 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4052 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4054 MonoInst *ins, *rvar = NULL;
4055 MonoMethodHeader *cheader;
4056 MonoBasicBlock *ebblock, *sbblock;
4057 int i, costs;
4058 MonoMethod *prev_inlined_method;
4059 MonoInst **prev_locals, **prev_args;
4060 MonoType **prev_arg_types;
4061 guint prev_real_offset;
4062 GHashTable *prev_cbb_hash;
4063 MonoBasicBlock **prev_cil_offset_to_bb;
4064 MonoBasicBlock *prev_cbb;
4065 unsigned char* prev_cil_start;
4066 guint32 prev_cil_offset_to_bb_len;
4067 MonoMethod *prev_current_method;
4068 MonoGenericContext *prev_generic_context;
4069 gboolean ret_var_set, prev_ret_var_set;
4071 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4073 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4074 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4075 return 0;
4076 #endif
4077 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4078 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4079 return 0;
4080 #endif
4082 if (cfg->verbose_level > 2)
4083 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4085 if (!cmethod->inline_info) {
4086 mono_jit_stats.inlineable_methods++;
4087 cmethod->inline_info = 1;
4089 /* allocate space to store the return value */
4090 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4091 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4094 /* allocate local variables */
4095 cheader = mono_method_get_header (cmethod);
4096 prev_locals = cfg->locals;
4097 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4098 for (i = 0; i < cheader->num_locals; ++i)
4099 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4101 /* allocate start and end blocks */
4102 /* This is needed so if the inline is aborted, we can clean up */
4103 NEW_BBLOCK (cfg, sbblock);
4104 sbblock->real_offset = real_offset;
4106 NEW_BBLOCK (cfg, ebblock);
4107 ebblock->block_num = cfg->num_bblocks++;
4108 ebblock->real_offset = real_offset;
4110 prev_args = cfg->args;
4111 prev_arg_types = cfg->arg_types;
4112 prev_inlined_method = cfg->inlined_method;
4113 cfg->inlined_method = cmethod;
4114 cfg->ret_var_set = FALSE;
4115 prev_real_offset = cfg->real_offset;
4116 prev_cbb_hash = cfg->cbb_hash;
4117 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4118 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4119 prev_cil_start = cfg->cil_start;
4120 prev_cbb = cfg->cbb;
4121 prev_current_method = cfg->current_method;
4122 prev_generic_context = cfg->generic_context;
4123 prev_ret_var_set = cfg->ret_var_set;
4125 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4127 ret_var_set = cfg->ret_var_set;
4129 cfg->inlined_method = prev_inlined_method;
4130 cfg->real_offset = prev_real_offset;
4131 cfg->cbb_hash = prev_cbb_hash;
4132 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4133 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4134 cfg->cil_start = prev_cil_start;
4135 cfg->locals = prev_locals;
4136 cfg->args = prev_args;
4137 cfg->arg_types = prev_arg_types;
4138 cfg->current_method = prev_current_method;
4139 cfg->generic_context = prev_generic_context;
4140 cfg->ret_var_set = prev_ret_var_set;
4142 if ((costs >= 0 && costs < 60) || inline_allways) {
4143 if (cfg->verbose_level > 2)
4144 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4146 mono_jit_stats.inlined_methods++;
4148 /* always add some code to avoid block split failures */
4149 MONO_INST_NEW (cfg, ins, OP_NOP);
4150 MONO_ADD_INS (prev_cbb, ins);
4152 prev_cbb->next_bb = sbblock;
4153 link_bblock (cfg, prev_cbb, sbblock);
4156 * Get rid of the begin and end bblocks if possible to aid local
4157 * optimizations.
4159 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4161 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4162 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4164 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4165 MonoBasicBlock *prev = ebblock->in_bb [0];
4166 mono_merge_basic_blocks (cfg, prev, ebblock);
4167 cfg->cbb = prev;
4168 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4169 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4170 cfg->cbb = prev_cbb;
4172 } else {
4173 cfg->cbb = ebblock;
4176 if (rvar) {
4178 * If the inlined method contains only a throw, then the ret var is not
4179 * set, so set it to a dummy value.
4181 if (!ret_var_set) {
4182 static double r8_0 = 0.0;
4184 switch (rvar->type) {
4185 case STACK_I4:
4186 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4187 break;
4188 case STACK_I8:
4189 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4190 break;
4191 case STACK_PTR:
4192 case STACK_MP:
4193 case STACK_OBJ:
4194 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4195 break;
4196 case STACK_R8:
4197 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4198 ins->type = STACK_R8;
4199 ins->inst_p0 = (void*)&r8_0;
4200 ins->dreg = rvar->dreg;
4201 MONO_ADD_INS (cfg->cbb, ins);
4202 break;
4203 case STACK_VTYPE:
4204 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4205 break;
4206 default:
4207 g_assert_not_reached ();
4211 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4212 *sp++ = ins;
4214 return costs + 1;
4215 } else {
4216 if (cfg->verbose_level > 2)
4217 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4218 cfg->exception_type = MONO_EXCEPTION_NONE;
4219 mono_loader_clear_error ();
4221 /* This gets rid of the newly added bblocks */
4222 cfg->cbb = prev_cbb;
4224 return 0;
4228 * Some of these comments may well be out-of-date.
4229 * Design decisions: we do a single pass over the IL code (and we do bblock
4230 * splitting/merging in the few cases when it's required: a back jump to an IL
4231 * address that was not already seen as bblock starting point).
4232 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4233 * Complex operations are decomposed in simpler ones right away. We need to let the
4234 * arch-specific code peek and poke inside this process somehow (except when the
4235 * optimizations can take advantage of the full semantic info of coarse opcodes).
4236 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4237 * MonoInst->opcode initially is the IL opcode or some simplification of that
4238 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4239 * opcode with value bigger than OP_LAST.
4240 * At this point the IR can be handed over to an interpreter, a dumb code generator
4241 * or to the optimizing code generator that will translate it to SSA form.
4243 * Profiling directed optimizations.
4244 * We may compile by default with few or no optimizations and instrument the code
4245 * or the user may indicate what methods to optimize the most either in a config file
4246 * or through repeated runs where the compiler applies offline the optimizations to
4247 * each method and then decides if it was worth it.
4250 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4251 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4252 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4253 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4254 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4255 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4256 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4257 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4259 /* offset from br.s -> br like opcodes */
4260 #define BIG_BRANCH_OFFSET 13
4262 static gboolean
4263 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4265 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4267 return b == NULL || b == bb;
4270 static int
4271 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4273 unsigned char *ip = start;
4274 unsigned char *target;
4275 int i;
4276 guint cli_addr;
4277 MonoBasicBlock *bblock;
4278 const MonoOpcode *opcode;
4280 while (ip < end) {
4281 cli_addr = ip - start;
4282 i = mono_opcode_value ((const guint8 **)&ip, end);
4283 if (i < 0)
4284 UNVERIFIED;
4285 opcode = &mono_opcodes [i];
4286 switch (opcode->argument) {
4287 case MonoInlineNone:
4288 ip++;
4289 break;
4290 case MonoInlineString:
4291 case MonoInlineType:
4292 case MonoInlineField:
4293 case MonoInlineMethod:
4294 case MonoInlineTok:
4295 case MonoInlineSig:
4296 case MonoShortInlineR:
4297 case MonoInlineI:
4298 ip += 5;
4299 break;
4300 case MonoInlineVar:
4301 ip += 3;
4302 break;
4303 case MonoShortInlineVar:
4304 case MonoShortInlineI:
4305 ip += 2;
4306 break;
4307 case MonoShortInlineBrTarget:
4308 target = start + cli_addr + 2 + (signed char)ip [1];
4309 GET_BBLOCK (cfg, bblock, target);
4310 ip += 2;
4311 if (ip < end)
4312 GET_BBLOCK (cfg, bblock, ip);
4313 break;
4314 case MonoInlineBrTarget:
4315 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4316 GET_BBLOCK (cfg, bblock, target);
4317 ip += 5;
4318 if (ip < end)
4319 GET_BBLOCK (cfg, bblock, ip);
4320 break;
4321 case MonoInlineSwitch: {
4322 guint32 n = read32 (ip + 1);
4323 guint32 j;
4324 ip += 5;
4325 cli_addr += 5 + 4 * n;
4326 target = start + cli_addr;
4327 GET_BBLOCK (cfg, bblock, target);
4329 for (j = 0; j < n; ++j) {
4330 target = start + cli_addr + (gint32)read32 (ip);
4331 GET_BBLOCK (cfg, bblock, target);
4332 ip += 4;
4334 break;
4336 case MonoInlineR:
4337 case MonoInlineI8:
4338 ip += 9;
4339 break;
4340 default:
4341 g_assert_not_reached ();
4344 if (i == CEE_THROW) {
4345 unsigned char *bb_start = ip - 1;
4347 /* Find the start of the bblock containing the throw */
4348 bblock = NULL;
4349 while ((bb_start >= start) && !bblock) {
4350 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4351 bb_start --;
4353 if (bblock)
4354 bblock->out_of_line = 1;
4357 return 0;
4358 unverified:
4359 *pos = ip;
4360 return 1;
4363 static inline MonoMethod *
4364 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4366 MonoMethod *method;
4368 if (m->wrapper_type != MONO_WRAPPER_NONE)
4369 return mono_method_get_wrapper_data (m, token);
4371 method = mono_get_method_full (m->klass->image, token, klass, context);
4373 return method;
4376 static inline MonoMethod *
4377 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4379 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4381 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4382 return NULL;
4384 return method;
4387 static inline MonoClass*
4388 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4390 MonoClass *klass;
4392 if (method->wrapper_type != MONO_WRAPPER_NONE)
4393 klass = mono_method_get_wrapper_data (method, token);
4394 else
4395 klass = mono_class_get_full (method->klass->image, token, context);
4396 if (klass)
4397 mono_class_init (klass);
4398 return klass;
4402 * Returns TRUE if the JIT should abort inlining because "callee"
4403 * is influenced by security attributes.
4405 static
4406 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4408 guint32 result;
4410 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4411 return TRUE;
4414 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4415 if (result == MONO_JIT_SECURITY_OK)
4416 return FALSE;
4418 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4419 /* Generate code to throw a SecurityException before the actual call/link */
4420 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4421 MonoInst *args [2];
4423 NEW_ICONST (cfg, args [0], 4);
4424 NEW_METHODCONST (cfg, args [1], caller);
4425 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4426 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4427 /* don't hide previous results */
4428 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4429 cfg->exception_data = result;
4430 return TRUE;
4433 return FALSE;
4436 static MonoMethod*
4437 method_access_exception (void)
4439 static MonoMethod *method = NULL;
4441 if (!method) {
4442 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4443 method = mono_class_get_method_from_name (secman->securitymanager,
4444 "MethodAccessException", 2);
4446 g_assert (method);
4447 return method;
4450 static void
4451 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4452 MonoBasicBlock *bblock, unsigned char *ip)
4454 MonoMethod *thrower = method_access_exception ();
4455 MonoInst *args [2];
4457 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4458 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4459 mono_emit_method_call (cfg, thrower, args, NULL);
4462 static MonoMethod*
4463 verification_exception (void)
4465 static MonoMethod *method = NULL;
4467 if (!method) {
4468 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4469 method = mono_class_get_method_from_name (secman->securitymanager,
4470 "VerificationException", 0);
4472 g_assert (method);
4473 return method;
4476 static void
4477 emit_throw_verification_exception (MonoCompile *cfg, MonoBasicBlock *bblock, unsigned char *ip)
4479 MonoMethod *thrower = verification_exception ();
4481 mono_emit_method_call (cfg, thrower, NULL, NULL);
4484 static void
4485 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4486 MonoBasicBlock *bblock, unsigned char *ip)
4488 MonoSecurityCoreCLRLevel caller_level = mono_security_core_clr_method_level (caller, TRUE);
4489 MonoSecurityCoreCLRLevel callee_level = mono_security_core_clr_method_level (callee, TRUE);
4490 gboolean is_safe = TRUE;
4492 if (!(caller_level >= callee_level ||
4493 caller_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL ||
4494 callee_level == MONO_SECURITY_CORE_CLR_SAFE_CRITICAL)) {
4495 is_safe = FALSE;
4498 if (!is_safe)
4499 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4502 static gboolean
4503 method_is_safe (MonoMethod *method)
4506 if (strcmp (method->name, "unsafeMethod") == 0)
4507 return FALSE;
4509 return TRUE;
4513 * Check that the IL instructions at ip are the array initialization
4514 * sequence and return the pointer to the data and the size.
4516 static const char*
4517 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4520 * newarr[System.Int32]
4521 * dup
4522 * ldtoken field valuetype ...
4523 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4525 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4526 guint32 token = read32 (ip + 7);
4527 guint32 field_token = read32 (ip + 2);
4528 guint32 field_index = field_token & 0xffffff;
4529 guint32 rva;
4530 const char *data_ptr;
4531 int size = 0;
4532 MonoMethod *cmethod;
4533 MonoClass *dummy_class;
4534 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4535 int dummy_align;
4537 if (!field)
4538 return NULL;
4540 *out_field_token = field_token;
4542 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4543 if (!cmethod)
4544 return NULL;
4545 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4546 return NULL;
4547 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4548 case MONO_TYPE_BOOLEAN:
4549 case MONO_TYPE_I1:
4550 case MONO_TYPE_U1:
4551 size = 1; break;
4552 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4553 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4554 case MONO_TYPE_CHAR:
4555 case MONO_TYPE_I2:
4556 case MONO_TYPE_U2:
4557 size = 2; break;
4558 case MONO_TYPE_I4:
4559 case MONO_TYPE_U4:
4560 case MONO_TYPE_R4:
4561 size = 4; break;
4562 case MONO_TYPE_R8:
4563 #ifdef ARM_FPU_FPA
4564 return NULL; /* stupid ARM FP swapped format */
4565 #endif
4566 case MONO_TYPE_I8:
4567 case MONO_TYPE_U8:
4568 size = 8; break;
4569 #endif
4570 default:
4571 return NULL;
4573 size *= len;
4574 if (size > mono_type_size (field->type, &dummy_align))
4575 return NULL;
4576 *out_size = size;
4577 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4578 if (!method->klass->image->dynamic) {
4579 field_index = read32 (ip + 2) & 0xffffff;
4580 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4581 data_ptr = mono_image_rva_map (method->klass->image, rva);
4582 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4583 /* for aot code we do the lookup on load */
4584 if (aot && data_ptr)
4585 return GUINT_TO_POINTER (rva);
4586 } else {
4587 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4588 g_assert (!aot);
4589 data_ptr = mono_field_get_data (field);
4591 return data_ptr;
4593 return NULL;
4596 static void
4597 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4599 char *method_fname = mono_method_full_name (method, TRUE);
4600 char *method_code;
4602 if (mono_method_get_header (method)->code_size == 0)
4603 method_code = g_strdup ("method body is empty.");
4604 else
4605 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4606 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4607 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4608 g_free (method_fname);
4609 g_free (method_code);
4612 static void
4613 set_exception_object (MonoCompile *cfg, MonoException *exception)
4615 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4616 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4617 cfg->exception_ptr = exception;
4620 static gboolean
4621 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4623 MonoType *type;
4625 if (cfg->generic_sharing_context)
4626 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4627 else
4628 type = &klass->byval_arg;
4629 return MONO_TYPE_IS_REFERENCE (type);
4633 * mono_decompose_array_access_opts:
4635 * Decompose array access opcodes.
4636 * This should be in decompose.c, but it emits calls so it has to stay here until
4637 * the old JIT is gone.
4639 void
4640 mono_decompose_array_access_opts (MonoCompile *cfg)
4642 MonoBasicBlock *bb, *first_bb;
4645 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4646 * can be executed anytime. It should be run before decompose_long
4650 * Create a dummy bblock and emit code into it so we can use the normal
4651 * code generation macros.
4653 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4654 first_bb = cfg->cbb;
4656 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4657 MonoInst *ins;
4658 MonoInst *prev = NULL;
4659 MonoInst *dest;
4660 MonoInst *iargs [3];
4661 gboolean restart;
4663 if (!bb->has_array_access)
4664 continue;
4666 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4668 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4669 restart = TRUE;
4671 while (restart) {
4672 restart = FALSE;
4674 for (ins = bb->code; ins; ins = ins->next) {
4675 switch (ins->opcode) {
4676 case OP_LDLEN:
4677 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4678 G_STRUCT_OFFSET (MonoArray, max_length));
4679 MONO_ADD_INS (cfg->cbb, dest);
4680 break;
4681 case OP_BOUNDS_CHECK:
4682 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4683 break;
4684 case OP_NEWARR:
4685 if (cfg->opt & MONO_OPT_SHARED) {
4686 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4687 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4688 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4689 iargs [2]->dreg = ins->sreg1;
4691 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4692 dest->dreg = ins->dreg;
4693 } else {
4694 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4696 g_assert (vtable);
4697 NEW_VTABLECONST (cfg, iargs [0], vtable);
4698 MONO_ADD_INS (cfg->cbb, iargs [0]);
4699 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4700 iargs [1]->dreg = ins->sreg1;
4702 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4703 dest->dreg = ins->dreg;
4705 break;
4706 case OP_STRLEN:
4707 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4708 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4709 MONO_ADD_INS (cfg->cbb, dest);
4710 break;
4711 default:
4712 break;
4715 g_assert (cfg->cbb == first_bb);
4717 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4718 /* Replace the original instruction with the new code sequence */
4720 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4721 first_bb->code = first_bb->last_ins = NULL;
4722 first_bb->in_count = first_bb->out_count = 0;
4723 cfg->cbb = first_bb;
4725 else
4726 prev = ins;
4730 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4734 typedef union {
4735 guint32 vali [2];
4736 gint64 vall;
4737 double vald;
4738 } DVal;
4740 #ifdef MONO_ARCH_SOFT_FLOAT
4743 * mono_decompose_soft_float:
4745 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4746 * similar to long support on 32 bit platforms. 32 bit float values require special
4747 * handling when used as locals, arguments, and in calls.
4748 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4750 void
4751 mono_decompose_soft_float (MonoCompile *cfg)
4753 MonoBasicBlock *bb, *first_bb;
4756 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4760 * Create a dummy bblock and emit code into it so we can use the normal
4761 * code generation macros.
4763 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4764 first_bb = cfg->cbb;
4766 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4767 MonoInst *ins;
4768 MonoInst *prev = NULL;
4769 gboolean restart;
4771 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4773 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4774 restart = TRUE;
4776 while (restart) {
4777 restart = FALSE;
4779 for (ins = bb->code; ins; ins = ins->next) {
4780 const char *spec = INS_INFO (ins->opcode);
4782 /* Most fp operations are handled automatically by opcode emulation */
4784 switch (ins->opcode) {
4785 case OP_R8CONST: {
4786 DVal d;
4787 d.vald = *(double*)ins->inst_p0;
4788 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4789 break;
4791 case OP_R4CONST: {
4792 DVal d;
4793 /* We load the r8 value */
4794 d.vald = *(float*)ins->inst_p0;
4795 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4796 break;
4798 case OP_FMOVE:
4799 ins->opcode = OP_LMOVE;
4800 break;
4801 case OP_FGETLOW32:
4802 ins->opcode = OP_MOVE;
4803 ins->sreg1 = ins->sreg1 + 1;
4804 break;
4805 case OP_FGETHIGH32:
4806 ins->opcode = OP_MOVE;
4807 ins->sreg1 = ins->sreg1 + 2;
4808 break;
4809 case OP_SETFRET: {
4810 int reg = ins->sreg1;
4812 ins->opcode = OP_SETLRET;
4813 ins->dreg = -1;
4814 ins->sreg1 = reg + 1;
4815 ins->sreg2 = reg + 2;
4816 break;
4818 case OP_LOADR8_MEMBASE:
4819 ins->opcode = OP_LOADI8_MEMBASE;
4820 break;
4821 case OP_STORER8_MEMBASE_REG:
4822 ins->opcode = OP_STOREI8_MEMBASE_REG;
4823 break;
4824 case OP_STORER4_MEMBASE_REG: {
4825 MonoInst *iargs [2];
4826 int addr_reg;
4828 /* Arg 1 is the double value */
4829 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4830 iargs [0]->dreg = ins->sreg1;
4832 /* Arg 2 is the address to store to */
4833 addr_reg = mono_alloc_preg (cfg);
4834 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4835 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4836 restart = TRUE;
4837 break;
4839 case OP_LOADR4_MEMBASE: {
4840 MonoInst *iargs [1];
4841 MonoInst *conv;
4842 int addr_reg;
4844 addr_reg = mono_alloc_preg (cfg);
4845 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4846 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4847 conv->dreg = ins->dreg;
4848 break;
4850 case OP_FCALL:
4851 case OP_FCALL_REG:
4852 case OP_FCALL_MEMBASE: {
4853 MonoCallInst *call = (MonoCallInst*)ins;
4854 if (call->signature->ret->type == MONO_TYPE_R4) {
4855 MonoCallInst *call2;
4856 MonoInst *iargs [1];
4857 MonoInst *conv;
4859 /* Convert the call into a call returning an int */
4860 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4861 memcpy (call2, call, sizeof (MonoCallInst));
4862 switch (ins->opcode) {
4863 case OP_FCALL:
4864 call2->inst.opcode = OP_CALL;
4865 break;
4866 case OP_FCALL_REG:
4867 call2->inst.opcode = OP_CALL_REG;
4868 break;
4869 case OP_FCALL_MEMBASE:
4870 call2->inst.opcode = OP_CALL_MEMBASE;
4871 break;
4872 default:
4873 g_assert_not_reached ();
4875 call2->inst.dreg = mono_alloc_ireg (cfg);
4876 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4878 /* FIXME: Optimize this */
4880 /* Emit an r4->r8 conversion */
4881 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4882 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4883 conv->dreg = ins->dreg;
4884 } else {
4885 switch (ins->opcode) {
4886 case OP_FCALL:
4887 ins->opcode = OP_LCALL;
4888 break;
4889 case OP_FCALL_REG:
4890 ins->opcode = OP_LCALL_REG;
4891 break;
4892 case OP_FCALL_MEMBASE:
4893 ins->opcode = OP_LCALL_MEMBASE;
4894 break;
4895 default:
4896 g_assert_not_reached ();
4899 break;
4901 case OP_FCOMPARE: {
4902 MonoJitICallInfo *info;
4903 MonoInst *iargs [2];
4904 MonoInst *call, *cmp, *br;
4906 /* Convert fcompare+fbcc to icall+icompare+beq */
4908 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4909 g_assert (info);
4911 /* Create dummy MonoInst's for the arguments */
4912 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4913 iargs [0]->dreg = ins->sreg1;
4914 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4915 iargs [1]->dreg = ins->sreg2;
4917 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4919 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4920 cmp->sreg1 = call->dreg;
4921 cmp->inst_imm = 0;
4922 MONO_ADD_INS (cfg->cbb, cmp);
4924 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4925 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4926 br->inst_true_bb = ins->next->inst_true_bb;
4927 br->inst_false_bb = ins->next->inst_false_bb;
4928 MONO_ADD_INS (cfg->cbb, br);
4930 /* The call sequence might include fp ins */
4931 restart = TRUE;
4933 /* Skip fbcc or fccc */
4934 NULLIFY_INS (ins->next);
4935 break;
4937 case OP_FCEQ:
4938 case OP_FCGT:
4939 case OP_FCGT_UN:
4940 case OP_FCLT:
4941 case OP_FCLT_UN: {
4942 MonoJitICallInfo *info;
4943 MonoInst *iargs [2];
4944 MonoInst *call;
4946 /* Convert fccc to icall+icompare+iceq */
4948 info = mono_find_jit_opcode_emulation (ins->opcode);
4949 g_assert (info);
4951 /* Create dummy MonoInst's for the arguments */
4952 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4953 iargs [0]->dreg = ins->sreg1;
4954 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4955 iargs [1]->dreg = ins->sreg2;
4957 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4959 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4960 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4962 /* The call sequence might include fp ins */
4963 restart = TRUE;
4964 break;
4966 case OP_CKFINITE: {
4967 MonoInst *iargs [2];
4968 MonoInst *call, *cmp;
4970 /* Convert to icall+icompare+cond_exc+move */
4972 /* Create dummy MonoInst's for the arguments */
4973 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4974 iargs [0]->dreg = ins->sreg1;
4976 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
4978 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4979 cmp->sreg1 = call->dreg;
4980 cmp->inst_imm = 1;
4981 MONO_ADD_INS (cfg->cbb, cmp);
4983 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
4985 /* Do the assignment if the value is finite */
4986 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
4988 restart = TRUE;
4989 break;
4991 default:
4992 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
4993 mono_print_ins (ins);
4994 g_assert_not_reached ();
4996 break;
4999 g_assert (cfg->cbb == first_bb);
5001 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5002 /* Replace the original instruction with the new code sequence */
5004 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5005 first_bb->code = first_bb->last_ins = NULL;
5006 first_bb->in_count = first_bb->out_count = 0;
5007 cfg->cbb = first_bb;
5009 else
5010 prev = ins;
5014 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5017 mono_decompose_long_opts (cfg);
5020 #endif
5022 static void
5023 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5025 MonoInst *ins;
5026 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5027 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5028 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5029 /* Optimize reg-reg moves away */
5031 * Can't optimize other opcodes, since sp[0] might point to
5032 * the last ins of a decomposed opcode.
5034 sp [0]->dreg = (cfg)->locals [n]->dreg;
5035 } else {
5036 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5041 * ldloca inhibits many optimizations so try to get rid of it in common
5042 * cases.
5044 static inline unsigned char *
5045 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5047 int local, token;
5048 MonoClass *klass;
5050 if (size == 1) {
5051 local = ip [1];
5052 ip += 2;
5053 } else {
5054 local = read16 (ip + 2);
5055 ip += 4;
5058 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5059 gboolean skip = FALSE;
5061 /* From the INITOBJ case */
5062 token = read32 (ip + 2);
5063 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5064 CHECK_TYPELOAD (klass);
5065 if (generic_class_is_reference_type (cfg, klass)) {
5066 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5067 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5068 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5069 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5070 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5071 } else {
5072 skip = TRUE;
5075 if (!skip)
5076 return ip + 6;
5078 load_error:
5079 return NULL;
5082 static gboolean
5083 is_exception_class (MonoClass *class)
5085 while (class) {
5086 if (class == mono_defaults.exception_class)
5087 return TRUE;
5088 class = class->parent;
5090 return FALSE;
5094 * mono_method_to_ir:
5096 * Translate the .net IL into linear IR.
5099 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5100 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5101 guint inline_offset, gboolean is_virtual_call)
5103 MonoInst *ins, **sp, **stack_start;
5104 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5105 MonoMethod *cmethod, *method_definition;
5106 MonoInst **arg_array;
5107 MonoMethodHeader *header;
5108 MonoImage *image;
5109 guint32 token, ins_flag;
5110 MonoClass *klass;
5111 MonoClass *constrained_call = NULL;
5112 unsigned char *ip, *end, *target, *err_pos;
5113 static double r8_0 = 0.0;
5114 MonoMethodSignature *sig;
5115 MonoGenericContext *generic_context = NULL;
5116 MonoGenericContainer *generic_container = NULL;
5117 MonoType **param_types;
5118 int i, n, start_new_bblock, dreg;
5119 int num_calls = 0, inline_costs = 0;
5120 int breakpoint_id = 0;
5121 guint num_args;
5122 MonoBoolean security, pinvoke;
5123 MonoSecurityManager* secman = NULL;
5124 MonoDeclSecurityActions actions;
5125 GSList *class_inits = NULL;
5126 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5127 int context_used;
5129 /* serialization and xdomain stuff may need access to private fields and methods */
5130 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5131 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5132 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5133 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5134 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5135 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5137 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5139 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5140 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5141 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5142 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5144 image = method->klass->image;
5145 header = mono_method_get_header (method);
5146 generic_container = mono_method_get_generic_container (method);
5147 sig = mono_method_signature (method);
5148 num_args = sig->hasthis + sig->param_count;
5149 ip = (unsigned char*)header->code;
5150 cfg->cil_start = ip;
5151 end = ip + header->code_size;
5152 mono_jit_stats.cil_code_size += header->code_size;
5154 method_definition = method;
5155 while (method_definition->is_inflated) {
5156 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5157 method_definition = imethod->declaring;
5160 /* SkipVerification is not allowed if core-clr is enabled */
5161 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5162 dont_verify = TRUE;
5163 dont_verify_stloc = TRUE;
5166 if (!dont_verify && mini_method_verify (cfg, method_definition))
5167 goto exception_exit;
5169 if (mono_debug_using_mono_debugger ())
5170 cfg->keep_cil_nops = TRUE;
5172 if (sig->is_inflated)
5173 generic_context = mono_method_get_context (method);
5174 else if (generic_container)
5175 generic_context = &generic_container->context;
5176 cfg->generic_context = generic_context;
5178 if (!cfg->generic_sharing_context)
5179 g_assert (!sig->has_type_parameters);
5181 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5182 g_assert (method->is_inflated);
5183 g_assert (mono_method_get_context (method)->method_inst);
5185 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5186 g_assert (sig->generic_param_count);
5188 if (cfg->method == method) {
5189 cfg->real_offset = 0;
5190 } else {
5191 cfg->real_offset = inline_offset;
5194 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5195 cfg->cil_offset_to_bb_len = header->code_size;
5197 cfg->current_method = method;
5199 if (cfg->verbose_level > 2)
5200 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5202 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5203 if (sig->hasthis)
5204 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5205 for (n = 0; n < sig->param_count; ++n)
5206 param_types [n + sig->hasthis] = sig->params [n];
5207 cfg->arg_types = param_types;
5209 dont_inline = g_list_prepend (dont_inline, method);
5210 if (cfg->method == method) {
5212 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5213 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5215 /* ENTRY BLOCK */
5216 NEW_BBLOCK (cfg, start_bblock);
5217 cfg->bb_entry = start_bblock;
5218 start_bblock->cil_code = NULL;
5219 start_bblock->cil_length = 0;
5221 /* EXIT BLOCK */
5222 NEW_BBLOCK (cfg, end_bblock);
5223 cfg->bb_exit = end_bblock;
5224 end_bblock->cil_code = NULL;
5225 end_bblock->cil_length = 0;
5226 g_assert (cfg->num_bblocks == 2);
5228 arg_array = cfg->args;
5230 if (header->num_clauses) {
5231 cfg->spvars = g_hash_table_new (NULL, NULL);
5232 cfg->exvars = g_hash_table_new (NULL, NULL);
5234 /* handle exception clauses */
5235 for (i = 0; i < header->num_clauses; ++i) {
5236 MonoBasicBlock *try_bb;
5237 MonoExceptionClause *clause = &header->clauses [i];
5238 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5239 try_bb->real_offset = clause->try_offset;
5240 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5241 tblock->real_offset = clause->handler_offset;
5242 tblock->flags |= BB_EXCEPTION_HANDLER;
5244 link_bblock (cfg, try_bb, tblock);
5246 if (*(ip + clause->handler_offset) == CEE_POP)
5247 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5249 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5250 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5251 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5252 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5253 MONO_ADD_INS (tblock, ins);
5255 /* todo: is a fault block unsafe to optimize? */
5256 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5257 tblock->flags |= BB_EXCEPTION_UNSAFE;
5261 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5262 while (p < end) {
5263 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5265 /* catch and filter blocks get the exception object on the stack */
5266 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5267 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5268 MonoInst *dummy_use;
5270 /* mostly like handle_stack_args (), but just sets the input args */
5271 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5272 tblock->in_scount = 1;
5273 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5274 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5277 * Add a dummy use for the exvar so its liveness info will be
5278 * correct.
5280 cfg->cbb = tblock;
5281 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5283 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5284 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5285 tblock->flags |= BB_EXCEPTION_HANDLER;
5286 tblock->real_offset = clause->data.filter_offset;
5287 tblock->in_scount = 1;
5288 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5289 /* The filter block shares the exvar with the handler block */
5290 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5291 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5292 MONO_ADD_INS (tblock, ins);
5296 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5297 clause->data.catch_class &&
5298 cfg->generic_sharing_context &&
5299 mono_class_check_context_used (clause->data.catch_class)) {
5301 * In shared generic code with catch
5302 * clauses containing type variables
5303 * the exception handling code has to
5304 * be able to get to the rgctx.
5305 * Therefore we have to make sure that
5306 * the vtable/mrgctx argument (for
5307 * static or generic methods) or the
5308 * "this" argument (for non-static
5309 * methods) are live.
5311 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5312 mini_method_get_context (method)->method_inst ||
5313 method->klass->valuetype) {
5314 mono_get_vtable_var (cfg);
5315 } else {
5316 MonoInst *dummy_use;
5318 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5322 } else {
5323 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5324 cfg->cbb = start_bblock;
5325 cfg->args = arg_array;
5326 mono_save_args (cfg, sig, inline_args);
5329 /* FIRST CODE BLOCK */
5330 NEW_BBLOCK (cfg, bblock);
5331 bblock->cil_code = ip;
5332 cfg->cbb = bblock;
5333 cfg->ip = ip;
5335 ADD_BBLOCK (cfg, bblock);
5337 if (cfg->method == method) {
5338 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5339 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5340 MONO_INST_NEW (cfg, ins, OP_BREAK);
5341 MONO_ADD_INS (bblock, ins);
5345 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5346 secman = mono_security_manager_get_methods ();
5348 security = (secman && mono_method_has_declsec (method));
5349 /* at this point having security doesn't mean we have any code to generate */
5350 if (security && (cfg->method == method)) {
5351 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5352 * And we do not want to enter the next section (with allocation) if we
5353 * have nothing to generate */
5354 security = mono_declsec_get_demands (method, &actions);
5357 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5358 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5359 if (pinvoke) {
5360 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5361 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5362 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5364 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5365 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5366 pinvoke = FALSE;
5368 if (custom)
5369 mono_custom_attrs_free (custom);
5371 if (pinvoke) {
5372 custom = mono_custom_attrs_from_class (wrapped->klass);
5373 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5374 pinvoke = FALSE;
5376 if (custom)
5377 mono_custom_attrs_free (custom);
5379 } else {
5380 /* not a P/Invoke after all */
5381 pinvoke = FALSE;
5385 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5386 /* we use a separate basic block for the initialization code */
5387 NEW_BBLOCK (cfg, init_localsbb);
5388 cfg->bb_init = init_localsbb;
5389 init_localsbb->real_offset = cfg->real_offset;
5390 start_bblock->next_bb = init_localsbb;
5391 init_localsbb->next_bb = bblock;
5392 link_bblock (cfg, start_bblock, init_localsbb);
5393 link_bblock (cfg, init_localsbb, bblock);
5395 cfg->cbb = init_localsbb;
5396 } else {
5397 start_bblock->next_bb = bblock;
5398 link_bblock (cfg, start_bblock, bblock);
5401 /* at this point we know, if security is TRUE, that some code needs to be generated */
5402 if (security && (cfg->method == method)) {
5403 MonoInst *args [2];
5405 mono_jit_stats.cas_demand_generation++;
5407 if (actions.demand.blob) {
5408 /* Add code for SecurityAction.Demand */
5409 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5410 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5411 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5412 mono_emit_method_call (cfg, secman->demand, args, NULL);
5414 if (actions.noncasdemand.blob) {
5415 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5416 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5417 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5418 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5419 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5420 mono_emit_method_call (cfg, secman->demand, args, NULL);
5422 if (actions.demandchoice.blob) {
5423 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5424 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5425 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5426 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5427 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5431 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5432 if (pinvoke) {
5433 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5436 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5437 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5438 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5439 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5440 if (!(method->klass && method->klass->image &&
5441 mono_security_core_clr_is_platform_image (method->klass->image))) {
5442 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5446 if (!method_is_safe (method))
5447 emit_throw_verification_exception (cfg, bblock, ip);
5450 if (header->code_size == 0)
5451 UNVERIFIED;
5453 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5454 ip = err_pos;
5455 UNVERIFIED;
5458 if (cfg->method == method)
5459 mono_debug_init_method (cfg, bblock, breakpoint_id);
5461 for (n = 0; n < header->num_locals; ++n) {
5462 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5463 UNVERIFIED;
5465 class_inits = NULL;
5467 /* We force the vtable variable here for all shared methods
5468 for the possibility that they might show up in a stack
5469 trace where their exact instantiation is needed. */
5470 if (cfg->generic_sharing_context && method == cfg->method) {
5471 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5472 mini_method_get_context (method)->method_inst ||
5473 method->klass->valuetype) {
5474 mono_get_vtable_var (cfg);
5475 } else {
5476 /* FIXME: Is there a better way to do this?
5477 We need the variable live for the duration
5478 of the whole method. */
5479 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5483 /* add a check for this != NULL to inlined methods */
5484 if (is_virtual_call) {
5485 MonoInst *arg_ins;
5487 NEW_ARGLOAD (cfg, arg_ins, 0);
5488 MONO_ADD_INS (cfg->cbb, arg_ins);
5489 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5490 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5491 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5494 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5495 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5497 ins_flag = 0;
5498 start_new_bblock = 0;
5499 cfg->cbb = bblock;
5500 while (ip < end) {
5502 if (cfg->method == method)
5503 cfg->real_offset = ip - header->code;
5504 else
5505 cfg->real_offset = inline_offset;
5506 cfg->ip = ip;
5508 context_used = 0;
5510 if (start_new_bblock) {
5511 bblock->cil_length = ip - bblock->cil_code;
5512 if (start_new_bblock == 2) {
5513 g_assert (ip == tblock->cil_code);
5514 } else {
5515 GET_BBLOCK (cfg, tblock, ip);
5517 bblock->next_bb = tblock;
5518 bblock = tblock;
5519 cfg->cbb = bblock;
5520 start_new_bblock = 0;
5521 for (i = 0; i < bblock->in_scount; ++i) {
5522 if (cfg->verbose_level > 3)
5523 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5524 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5525 *sp++ = ins;
5527 if (class_inits)
5528 g_slist_free (class_inits);
5529 class_inits = NULL;
5530 } else {
5531 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5532 link_bblock (cfg, bblock, tblock);
5533 if (sp != stack_start) {
5534 handle_stack_args (cfg, stack_start, sp - stack_start);
5535 sp = stack_start;
5536 CHECK_UNVERIFIABLE (cfg);
5538 bblock->next_bb = tblock;
5539 bblock = tblock;
5540 cfg->cbb = bblock;
5541 for (i = 0; i < bblock->in_scount; ++i) {
5542 if (cfg->verbose_level > 3)
5543 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5544 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5545 *sp++ = ins;
5547 g_slist_free (class_inits);
5548 class_inits = NULL;
5552 bblock->real_offset = cfg->real_offset;
5554 if ((cfg->method == method) && cfg->coverage_info) {
5555 guint32 cil_offset = ip - header->code;
5556 cfg->coverage_info->data [cil_offset].cil_code = ip;
5558 /* TODO: Use an increment here */
5559 #if defined(__i386__)
5560 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5561 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5562 ins->inst_imm = 1;
5563 MONO_ADD_INS (cfg->cbb, ins);
5564 #else
5565 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5566 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5567 #endif
5570 if (cfg->verbose_level > 3)
5571 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5573 switch (*ip) {
5574 case CEE_NOP:
5575 if (cfg->keep_cil_nops)
5576 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5577 else
5578 MONO_INST_NEW (cfg, ins, OP_NOP);
5579 ip++;
5580 MONO_ADD_INS (bblock, ins);
5581 break;
5582 case CEE_BREAK:
5583 MONO_INST_NEW (cfg, ins, OP_BREAK);
5584 ip++;
5585 MONO_ADD_INS (bblock, ins);
5586 break;
5587 case CEE_LDARG_0:
5588 case CEE_LDARG_1:
5589 case CEE_LDARG_2:
5590 case CEE_LDARG_3:
5591 CHECK_STACK_OVF (1);
5592 n = (*ip)-CEE_LDARG_0;
5593 CHECK_ARG (n);
5594 EMIT_NEW_ARGLOAD (cfg, ins, n);
5595 ip++;
5596 *sp++ = ins;
5597 break;
5598 case CEE_LDLOC_0:
5599 case CEE_LDLOC_1:
5600 case CEE_LDLOC_2:
5601 case CEE_LDLOC_3:
5602 CHECK_STACK_OVF (1);
5603 n = (*ip)-CEE_LDLOC_0;
5604 CHECK_LOCAL (n);
5605 EMIT_NEW_LOCLOAD (cfg, ins, n);
5606 ip++;
5607 *sp++ = ins;
5608 break;
5609 case CEE_STLOC_0:
5610 case CEE_STLOC_1:
5611 case CEE_STLOC_2:
5612 case CEE_STLOC_3: {
5613 CHECK_STACK (1);
5614 n = (*ip)-CEE_STLOC_0;
5615 CHECK_LOCAL (n);
5616 --sp;
5617 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5618 UNVERIFIED;
5619 emit_stloc_ir (cfg, sp, header, n);
5620 ++ip;
5621 inline_costs += 1;
5622 break;
5624 case CEE_LDARG_S:
5625 CHECK_OPSIZE (2);
5626 CHECK_STACK_OVF (1);
5627 n = ip [1];
5628 CHECK_ARG (n);
5629 EMIT_NEW_ARGLOAD (cfg, ins, n);
5630 *sp++ = ins;
5631 ip += 2;
5632 break;
5633 case CEE_LDARGA_S:
5634 CHECK_OPSIZE (2);
5635 CHECK_STACK_OVF (1);
5636 n = ip [1];
5637 CHECK_ARG (n);
5638 NEW_ARGLOADA (cfg, ins, n);
5639 MONO_ADD_INS (cfg->cbb, ins);
5640 *sp++ = ins;
5641 ip += 2;
5642 break;
5643 case CEE_STARG_S:
5644 CHECK_OPSIZE (2);
5645 CHECK_STACK (1);
5646 --sp;
5647 n = ip [1];
5648 CHECK_ARG (n);
5649 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5650 UNVERIFIED;
5651 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5652 ip += 2;
5653 break;
5654 case CEE_LDLOC_S:
5655 CHECK_OPSIZE (2);
5656 CHECK_STACK_OVF (1);
5657 n = ip [1];
5658 CHECK_LOCAL (n);
5659 EMIT_NEW_LOCLOAD (cfg, ins, n);
5660 *sp++ = ins;
5661 ip += 2;
5662 break;
5663 case CEE_LDLOCA_S: {
5664 unsigned char *tmp_ip;
5665 CHECK_OPSIZE (2);
5666 CHECK_STACK_OVF (1);
5667 CHECK_LOCAL (ip [1]);
5669 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5670 ip = tmp_ip;
5671 inline_costs += 1;
5672 break;
5675 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5676 *sp++ = ins;
5677 ip += 2;
5678 break;
5680 case CEE_STLOC_S:
5681 CHECK_OPSIZE (2);
5682 CHECK_STACK (1);
5683 --sp;
5684 CHECK_LOCAL (ip [1]);
5685 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5686 UNVERIFIED;
5687 emit_stloc_ir (cfg, sp, header, ip [1]);
5688 ip += 2;
5689 inline_costs += 1;
5690 break;
5691 case CEE_LDNULL:
5692 CHECK_STACK_OVF (1);
5693 EMIT_NEW_PCONST (cfg, ins, NULL);
5694 ins->type = STACK_OBJ;
5695 ++ip;
5696 *sp++ = ins;
5697 break;
5698 case CEE_LDC_I4_M1:
5699 CHECK_STACK_OVF (1);
5700 EMIT_NEW_ICONST (cfg, ins, -1);
5701 ++ip;
5702 *sp++ = ins;
5703 break;
5704 case CEE_LDC_I4_0:
5705 case CEE_LDC_I4_1:
5706 case CEE_LDC_I4_2:
5707 case CEE_LDC_I4_3:
5708 case CEE_LDC_I4_4:
5709 case CEE_LDC_I4_5:
5710 case CEE_LDC_I4_6:
5711 case CEE_LDC_I4_7:
5712 case CEE_LDC_I4_8:
5713 CHECK_STACK_OVF (1);
5714 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5715 ++ip;
5716 *sp++ = ins;
5717 break;
5718 case CEE_LDC_I4_S:
5719 CHECK_OPSIZE (2);
5720 CHECK_STACK_OVF (1);
5721 ++ip;
5722 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5723 ++ip;
5724 *sp++ = ins;
5725 break;
5726 case CEE_LDC_I4:
5727 CHECK_OPSIZE (5);
5728 CHECK_STACK_OVF (1);
5729 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5730 ip += 5;
5731 *sp++ = ins;
5732 break;
5733 case CEE_LDC_I8:
5734 CHECK_OPSIZE (9);
5735 CHECK_STACK_OVF (1);
5736 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5737 ins->type = STACK_I8;
5738 ins->dreg = alloc_dreg (cfg, STACK_I8);
5739 ++ip;
5740 ins->inst_l = (gint64)read64 (ip);
5741 MONO_ADD_INS (bblock, ins);
5742 ip += 8;
5743 *sp++ = ins;
5744 break;
5745 case CEE_LDC_R4: {
5746 float *f;
5747 /* FIXME: we should really allocate this only late in the compilation process */
5748 f = mono_domain_alloc (cfg->domain, sizeof (float));
5749 CHECK_OPSIZE (5);
5750 CHECK_STACK_OVF (1);
5751 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5752 ins->type = STACK_R8;
5753 ins->dreg = alloc_dreg (cfg, STACK_R8);
5754 ++ip;
5755 readr4 (ip, f);
5756 ins->inst_p0 = f;
5757 MONO_ADD_INS (bblock, ins);
5759 ip += 4;
5760 *sp++ = ins;
5761 break;
5763 case CEE_LDC_R8: {
5764 double *d;
5765 /* FIXME: we should really allocate this only late in the compilation process */
5766 d = mono_domain_alloc (cfg->domain, sizeof (double));
5767 CHECK_OPSIZE (9);
5768 CHECK_STACK_OVF (1);
5769 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5770 ins->type = STACK_R8;
5771 ins->dreg = alloc_dreg (cfg, STACK_R8);
5772 ++ip;
5773 readr8 (ip, d);
5774 ins->inst_p0 = d;
5775 MONO_ADD_INS (bblock, ins);
5777 ip += 8;
5778 *sp++ = ins;
5779 break;
5781 case CEE_DUP: {
5782 MonoInst *temp, *store;
5783 CHECK_STACK (1);
5784 CHECK_STACK_OVF (1);
5785 sp--;
5786 ins = *sp;
5788 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5789 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5791 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5792 *sp++ = ins;
5794 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5795 *sp++ = ins;
5797 ++ip;
5798 inline_costs += 2;
5799 break;
5801 case CEE_POP:
5802 CHECK_STACK (1);
5803 ip++;
5804 --sp;
5806 #ifdef __i386__
5807 if (sp [0]->type == STACK_R8)
5808 /* we need to pop the value from the x86 FP stack */
5809 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5810 #endif
5811 break;
5812 case CEE_JMP: {
5813 MonoCallInst *call;
5815 INLINE_FAILURE;
5817 CHECK_OPSIZE (5);
5818 if (stack_start != sp)
5819 UNVERIFIED;
5820 token = read32 (ip + 1);
5821 /* FIXME: check the signature matches */
5822 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5824 if (!cmethod)
5825 goto load_error;
5827 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5828 GENERIC_SHARING_FAILURE (CEE_JMP);
5830 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5831 CHECK_CFG_EXCEPTION;
5833 #ifdef __x86_64__
5835 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5836 int i, n;
5838 /* Handle tail calls similarly to calls */
5839 n = fsig->param_count + fsig->hasthis;
5841 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5842 call->method = cmethod;
5843 call->tail_call = TRUE;
5844 call->signature = mono_method_signature (cmethod);
5845 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5846 call->inst.inst_p0 = cmethod;
5847 for (i = 0; i < n; ++i)
5848 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5850 mono_arch_emit_call (cfg, call);
5851 MONO_ADD_INS (bblock, (MonoInst*)call);
5853 #else
5854 for (i = 0; i < num_args; ++i)
5855 /* Prevent arguments from being optimized away */
5856 arg_array [i]->flags |= MONO_INST_VOLATILE;
5858 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5859 ins = (MonoInst*)call;
5860 ins->inst_p0 = cmethod;
5861 MONO_ADD_INS (bblock, ins);
5862 #endif
5864 ip += 5;
5865 start_new_bblock = 1;
5866 break;
5868 case CEE_CALLI:
5869 case CEE_CALL:
5870 case CEE_CALLVIRT: {
5871 MonoInst *addr = NULL;
5872 MonoMethodSignature *fsig = NULL;
5873 int array_rank = 0;
5874 int virtual = *ip == CEE_CALLVIRT;
5875 int calli = *ip == CEE_CALLI;
5876 gboolean pass_imt_from_rgctx = FALSE;
5877 MonoInst *imt_arg = NULL;
5878 gboolean pass_vtable = FALSE;
5879 gboolean pass_mrgctx = FALSE;
5880 MonoInst *vtable_arg = NULL;
5881 gboolean check_this = FALSE;
5883 CHECK_OPSIZE (5);
5884 token = read32 (ip + 1);
5886 if (calli) {
5887 cmethod = NULL;
5888 CHECK_STACK (1);
5889 --sp;
5890 addr = *sp;
5891 if (method->wrapper_type != MONO_WRAPPER_NONE)
5892 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5893 else
5894 fsig = mono_metadata_parse_signature (image, token);
5896 n = fsig->param_count + fsig->hasthis;
5897 } else {
5898 MonoMethod *cil_method;
5900 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5901 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5902 cil_method = cmethod;
5903 } else if (constrained_call) {
5904 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
5906 * This is needed since get_method_constrained can't find
5907 * the method in klass representing a type var.
5908 * The type var is guaranteed to be a reference type in this
5909 * case.
5911 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5912 cil_method = cmethod;
5913 g_assert (!cmethod->klass->valuetype);
5914 } else {
5915 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5917 } else {
5918 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5919 cil_method = cmethod;
5922 if (!cmethod)
5923 goto load_error;
5924 if (!dont_verify && !cfg->skip_visibility) {
5925 MonoMethod *target_method = cil_method;
5926 if (method->is_inflated) {
5927 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5929 if (!mono_method_can_access_method (method_definition, target_method) &&
5930 !mono_method_can_access_method (method, cil_method))
5931 METHOD_ACCESS_FAILURE;
5934 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5935 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5937 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5938 /* MS.NET seems to silently convert this to a callvirt */
5939 virtual = 1;
5941 if (!cmethod->klass->inited)
5942 if (!mono_class_init (cmethod->klass))
5943 goto load_error;
5945 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5946 mini_class_is_system_array (cmethod->klass)) {
5947 array_rank = cmethod->klass->rank;
5948 fsig = mono_method_signature (cmethod);
5949 } else {
5950 if (mono_method_signature (cmethod)->pinvoke) {
5951 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5952 check_for_pending_exc, FALSE);
5953 fsig = mono_method_signature (wrapper);
5954 } else if (constrained_call) {
5955 fsig = mono_method_signature (cmethod);
5956 } else {
5957 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5961 mono_save_token_info (cfg, image, token, cil_method);
5963 n = fsig->param_count + fsig->hasthis;
5965 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5966 if (check_linkdemand (cfg, method, cmethod))
5967 INLINE_FAILURE;
5968 CHECK_CFG_EXCEPTION;
5971 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
5972 g_assert_not_reached ();
5975 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5976 UNVERIFIED;
5978 if (!cfg->generic_sharing_context && cmethod)
5979 g_assert (!mono_method_check_context_used (cmethod));
5981 CHECK_STACK (n);
5983 //g_assert (!virtual || fsig->hasthis);
5985 sp -= n;
5987 if (constrained_call) {
5989 * We have the `constrained.' prefix opcode.
5991 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
5992 int dreg;
5995 * The type parameter is instantiated as a valuetype,
5996 * but that type doesn't override the method we're
5997 * calling, so we need to box `this'.
5999 dreg = alloc_dreg (cfg, STACK_VTYPE);
6000 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6001 ins->klass = constrained_call;
6002 sp [0] = handle_box (cfg, ins, constrained_call);
6003 } else if (!constrained_call->valuetype) {
6004 int dreg = alloc_preg (cfg);
6007 * The type parameter is instantiated as a reference
6008 * type. We have a managed pointer on the stack, so
6009 * we need to dereference it here.
6011 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6012 ins->type = STACK_OBJ;
6013 sp [0] = ins;
6014 } else if (cmethod->klass->valuetype)
6015 virtual = 0;
6016 constrained_call = NULL;
6019 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6020 UNVERIFIED;
6023 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6024 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6025 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6026 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6027 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6030 * Pass vtable iff target method might
6031 * be shared, which means that sharing
6032 * is enabled for its class and its
6033 * context is sharable (and it's not a
6034 * generic method).
6036 if (sharing_enabled && context_sharable &&
6037 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6038 pass_vtable = TRUE;
6041 if (cmethod && mini_method_get_context (cmethod) &&
6042 mini_method_get_context (cmethod)->method_inst) {
6043 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6044 MonoGenericContext *context = mini_method_get_context (cmethod);
6045 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6047 g_assert (!pass_vtable);
6049 if (sharing_enabled && context_sharable)
6050 pass_mrgctx = TRUE;
6053 if (cfg->generic_sharing_context && cmethod) {
6054 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6056 context_used = mono_method_check_context_used (cmethod);
6058 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6059 /* Generic method interface
6060 calls are resolved via a
6061 helper function and don't
6062 need an imt. */
6063 if (!cmethod_context || !cmethod_context->method_inst)
6064 pass_imt_from_rgctx = TRUE;
6068 * If a shared method calls another
6069 * shared method then the caller must
6070 * have a generic sharing context
6071 * because the magic trampoline
6072 * requires it. FIXME: We shouldn't
6073 * have to force the vtable/mrgctx
6074 * variable here. Instead there
6075 * should be a flag in the cfg to
6076 * request a generic sharing context.
6078 if (context_used &&
6079 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6080 mono_get_vtable_var (cfg);
6083 if (pass_vtable) {
6084 if (context_used) {
6085 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6086 } else {
6087 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6089 CHECK_TYPELOAD (cmethod->klass);
6090 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6094 if (pass_mrgctx) {
6095 g_assert (!vtable_arg);
6097 if (context_used) {
6098 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6099 } else {
6100 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6103 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6104 MONO_METHOD_IS_FINAL (cmethod)) {
6105 if (virtual)
6106 check_this = TRUE;
6107 virtual = 0;
6111 if (pass_imt_from_rgctx) {
6112 g_assert (!pass_vtable);
6113 g_assert (cmethod);
6115 imt_arg = emit_get_rgctx_method (cfg, context_used,
6116 cmethod, MONO_RGCTX_INFO_METHOD);
6119 if (check_this) {
6120 MonoInst *check;
6122 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6123 check->sreg1 = sp [0]->dreg;
6124 MONO_ADD_INS (cfg->cbb, check);
6127 /* Calling virtual generic methods */
6128 if (cmethod && virtual &&
6129 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6130 !(MONO_METHOD_IS_FINAL (cmethod) &&
6131 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6132 mono_method_signature (cmethod)->generic_param_count) {
6133 MonoInst *this_temp, *this_arg_temp, *store;
6134 MonoInst *iargs [4];
6136 g_assert (mono_method_signature (cmethod)->is_inflated);
6138 /* Prevent inlining of methods that contain indirect calls */
6139 INLINE_FAILURE;
6141 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6142 if (cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6143 g_assert (!imt_arg);
6144 if (context_used) {
6145 imt_arg = emit_get_rgctx_method (cfg, context_used,
6146 cmethod, MONO_RGCTX_INFO_METHOD);
6148 } else {
6149 g_assert (cmethod->is_inflated);
6150 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6152 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6153 } else
6154 #endif
6156 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6157 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6158 MONO_ADD_INS (bblock, store);
6160 /* FIXME: This should be a managed pointer */
6161 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6163 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6164 if (context_used) {
6165 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6166 cmethod, MONO_RGCTX_INFO_METHOD);
6167 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6168 addr = mono_emit_jit_icall (cfg,
6169 mono_helper_compile_generic_method, iargs);
6170 } else {
6171 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6172 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6173 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6176 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6178 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6181 if (!MONO_TYPE_IS_VOID (fsig->ret))
6182 *sp++ = ins;
6184 ip += 5;
6185 ins_flag = 0;
6186 break;
6189 /* Tail prefix */
6190 /* FIXME: runtime generic context pointer for jumps? */
6191 /* FIXME: handle this for generic sharing eventually */
6192 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6193 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6194 MonoCallInst *call;
6196 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6197 INLINE_FAILURE;
6199 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6200 call->tail_call = TRUE;
6201 call->method = cmethod;
6202 call->signature = mono_method_signature (cmethod);
6204 #ifdef __x86_64__
6205 /* Handle tail calls similarly to calls */
6206 call->inst.opcode = OP_TAILCALL;
6207 call->args = sp;
6208 mono_arch_emit_call (cfg, call);
6209 #else
6211 * We implement tail calls by storing the actual arguments into the
6212 * argument variables, then emitting a CEE_JMP.
6214 for (i = 0; i < n; ++i) {
6215 /* Prevent argument from being register allocated */
6216 arg_array [i]->flags |= MONO_INST_VOLATILE;
6217 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6219 #endif
6221 ins = (MonoInst*)call;
6222 ins->inst_p0 = cmethod;
6223 ins->inst_p1 = arg_array [0];
6224 MONO_ADD_INS (bblock, ins);
6225 link_bblock (cfg, bblock, end_bblock);
6226 start_new_bblock = 1;
6227 /* skip CEE_RET as well */
6228 ip += 6;
6229 ins_flag = 0;
6230 break;
6233 /* Conversion to a JIT intrinsic */
6234 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6235 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6236 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6237 *sp = ins;
6238 sp++;
6241 ip += 5;
6242 ins_flag = 0;
6243 break;
6246 /* Inlining */
6247 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6248 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6249 mono_method_check_inlining (cfg, cmethod) &&
6250 !g_list_find (dont_inline, cmethod)) {
6251 int costs;
6252 gboolean allways = FALSE;
6254 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6255 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6256 /* Prevent inlining of methods that call wrappers */
6257 INLINE_FAILURE;
6258 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6259 allways = TRUE;
6262 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6263 ip += 5;
6264 cfg->real_offset += 5;
6265 bblock = cfg->cbb;
6267 if (!MONO_TYPE_IS_VOID (fsig->ret))
6268 /* *sp is already set by inline_method */
6269 sp++;
6271 inline_costs += costs;
6272 ins_flag = 0;
6273 break;
6277 inline_costs += 10 * num_calls++;
6279 /* Tail recursion elimination */
6280 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6281 gboolean has_vtargs = FALSE;
6282 int i;
6284 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6285 INLINE_FAILURE;
6287 /* keep it simple */
6288 for (i = fsig->param_count - 1; i >= 0; i--) {
6289 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6290 has_vtargs = TRUE;
6293 if (!has_vtargs) {
6294 for (i = 0; i < n; ++i)
6295 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6296 MONO_INST_NEW (cfg, ins, OP_BR);
6297 MONO_ADD_INS (bblock, ins);
6298 tblock = start_bblock->out_bb [0];
6299 link_bblock (cfg, bblock, tblock);
6300 ins->inst_target_bb = tblock;
6301 start_new_bblock = 1;
6303 /* skip the CEE_RET, too */
6304 if (ip_in_bb (cfg, bblock, ip + 5))
6305 ip += 6;
6306 else
6307 ip += 5;
6309 ins_flag = 0;
6310 break;
6314 /* Generic sharing */
6315 /* FIXME: only do this for generic methods if
6316 they are not shared! */
6317 if (context_used && !imt_arg && !array_rank &&
6318 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6319 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6320 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6321 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6322 INLINE_FAILURE;
6324 g_assert (cfg->generic_sharing_context && cmethod);
6325 g_assert (!addr);
6328 * We are compiling a call to a
6329 * generic method from shared code,
6330 * which means that we have to look up
6331 * the method in the rgctx and do an
6332 * indirect call.
6334 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6337 /* Indirect calls */
6338 if (addr) {
6339 g_assert (!imt_arg);
6341 if (*ip == CEE_CALL)
6342 g_assert (context_used);
6343 else if (*ip == CEE_CALLI)
6344 g_assert (!vtable_arg);
6345 else
6346 /* FIXME: what the hell is this??? */
6347 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6348 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6350 /* Prevent inlining of methods with indirect calls */
6351 INLINE_FAILURE;
6353 if (vtable_arg) {
6354 #ifdef MONO_ARCH_RGCTX_REG
6355 MonoCallInst *call;
6356 int rgctx_reg = mono_alloc_preg (cfg);
6358 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6359 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6360 call = (MonoCallInst*)ins;
6361 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6362 cfg->uses_rgctx_reg = TRUE;
6363 #else
6364 NOT_IMPLEMENTED;
6365 #endif
6366 } else {
6367 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6369 * Instead of emitting an indirect call, emit a direct call
6370 * with the contents of the aotconst as the patch info.
6372 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6373 NULLIFY_INS (addr);
6374 } else {
6375 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6378 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6379 if (fsig->pinvoke && !fsig->ret->byref) {
6380 int widen_op = -1;
6383 * Native code might return non register sized integers
6384 * without initializing the upper bits.
6386 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6387 case OP_LOADI1_MEMBASE:
6388 widen_op = OP_ICONV_TO_I1;
6389 break;
6390 case OP_LOADU1_MEMBASE:
6391 widen_op = OP_ICONV_TO_U1;
6392 break;
6393 case OP_LOADI2_MEMBASE:
6394 widen_op = OP_ICONV_TO_I2;
6395 break;
6396 case OP_LOADU2_MEMBASE:
6397 widen_op = OP_ICONV_TO_U2;
6398 break;
6399 default:
6400 break;
6403 if (widen_op != -1) {
6404 int dreg = alloc_preg (cfg);
6405 MonoInst *widen;
6407 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6408 widen->type = ins->type;
6409 ins = widen;
6413 *sp++ = ins;
6416 ip += 5;
6417 ins_flag = 0;
6418 break;
6421 /* Array methods */
6422 if (array_rank) {
6423 MonoInst *addr;
6425 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6426 if (sp [fsig->param_count]->type == STACK_OBJ) {
6427 MonoInst *iargs [2];
6429 iargs [0] = sp [0];
6430 iargs [1] = sp [fsig->param_count];
6432 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6435 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6436 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6437 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6438 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6440 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6442 *sp++ = ins;
6443 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6444 if (!cmethod->klass->element_class->valuetype && !readonly)
6445 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6447 readonly = FALSE;
6448 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6449 *sp++ = addr;
6450 } else {
6451 g_assert_not_reached ();
6454 ip += 5;
6455 ins_flag = 0;
6456 break;
6459 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6460 if (ins) {
6461 if (!MONO_TYPE_IS_VOID (fsig->ret))
6462 *sp++ = ins;
6464 ip += 5;
6465 ins_flag = 0;
6466 break;
6469 /* Common call */
6470 INLINE_FAILURE;
6471 if (vtable_arg) {
6472 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6473 NULL, vtable_arg);
6474 } else if (imt_arg) {
6475 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6476 } else {
6477 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6480 if (!MONO_TYPE_IS_VOID (fsig->ret))
6481 *sp++ = ins;
6483 ip += 5;
6484 ins_flag = 0;
6485 break;
6487 case CEE_RET:
6488 if (cfg->method != method) {
6489 /* return from inlined method */
6491 * If in_count == 0, that means the ret is unreachable due to
6492 * being preceeded by a throw. In that case, inline_method () will
6493 * handle setting the return value
6494 * (test case: test_0_inline_throw ()).
6496 if (return_var && cfg->cbb->in_count) {
6497 MonoInst *store;
6498 CHECK_STACK (1);
6499 --sp;
6500 //g_assert (returnvar != -1);
6501 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6502 cfg->ret_var_set = TRUE;
6504 } else {
6505 if (cfg->ret) {
6506 MonoType *ret_type = mono_method_signature (method)->ret;
6508 g_assert (!return_var);
6509 CHECK_STACK (1);
6510 --sp;
6511 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6512 MonoInst *ret_addr;
6514 if (!cfg->vret_addr) {
6515 MonoInst *ins;
6517 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6518 } else {
6519 EMIT_NEW_RETLOADA (cfg, ret_addr);
6521 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6522 ins->klass = mono_class_from_mono_type (ret_type);
6524 } else {
6525 #ifdef MONO_ARCH_SOFT_FLOAT
6526 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6527 MonoInst *iargs [1];
6528 MonoInst *conv;
6530 iargs [0] = *sp;
6531 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6532 mono_arch_emit_setret (cfg, method, conv);
6533 } else {
6534 mono_arch_emit_setret (cfg, method, *sp);
6536 #else
6537 mono_arch_emit_setret (cfg, method, *sp);
6538 #endif
6542 if (sp != stack_start)
6543 UNVERIFIED;
6544 MONO_INST_NEW (cfg, ins, OP_BR);
6545 ip++;
6546 ins->inst_target_bb = end_bblock;
6547 MONO_ADD_INS (bblock, ins);
6548 link_bblock (cfg, bblock, end_bblock);
6549 start_new_bblock = 1;
6550 break;
6551 case CEE_BR_S:
6552 CHECK_OPSIZE (2);
6553 MONO_INST_NEW (cfg, ins, OP_BR);
6554 ip++;
6555 target = ip + 1 + (signed char)(*ip);
6556 ++ip;
6557 GET_BBLOCK (cfg, tblock, target);
6558 link_bblock (cfg, bblock, tblock);
6559 ins->inst_target_bb = tblock;
6560 if (sp != stack_start) {
6561 handle_stack_args (cfg, stack_start, sp - stack_start);
6562 sp = stack_start;
6563 CHECK_UNVERIFIABLE (cfg);
6565 MONO_ADD_INS (bblock, ins);
6566 start_new_bblock = 1;
6567 inline_costs += BRANCH_COST;
6568 break;
6569 case CEE_BEQ_S:
6570 case CEE_BGE_S:
6571 case CEE_BGT_S:
6572 case CEE_BLE_S:
6573 case CEE_BLT_S:
6574 case CEE_BNE_UN_S:
6575 case CEE_BGE_UN_S:
6576 case CEE_BGT_UN_S:
6577 case CEE_BLE_UN_S:
6578 case CEE_BLT_UN_S:
6579 CHECK_OPSIZE (2);
6580 CHECK_STACK (2);
6581 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6582 ip++;
6583 target = ip + 1 + *(signed char*)ip;
6584 ip++;
6586 ADD_BINCOND (NULL);
6588 sp = stack_start;
6589 inline_costs += BRANCH_COST;
6590 break;
6591 case CEE_BR:
6592 CHECK_OPSIZE (5);
6593 MONO_INST_NEW (cfg, ins, OP_BR);
6594 ip++;
6596 target = ip + 4 + (gint32)read32(ip);
6597 ip += 4;
6598 GET_BBLOCK (cfg, tblock, target);
6599 link_bblock (cfg, bblock, tblock);
6600 ins->inst_target_bb = tblock;
6601 if (sp != stack_start) {
6602 handle_stack_args (cfg, stack_start, sp - stack_start);
6603 sp = stack_start;
6604 CHECK_UNVERIFIABLE (cfg);
6607 MONO_ADD_INS (bblock, ins);
6609 start_new_bblock = 1;
6610 inline_costs += BRANCH_COST;
6611 break;
6612 case CEE_BRFALSE_S:
6613 case CEE_BRTRUE_S:
6614 case CEE_BRFALSE:
6615 case CEE_BRTRUE: {
6616 MonoInst *cmp;
6617 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6618 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6619 guint32 opsize = is_short ? 1 : 4;
6621 CHECK_OPSIZE (opsize);
6622 CHECK_STACK (1);
6623 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6624 UNVERIFIED;
6625 ip ++;
6626 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6627 ip += opsize;
6629 sp--;
6631 GET_BBLOCK (cfg, tblock, target);
6632 link_bblock (cfg, bblock, tblock);
6633 GET_BBLOCK (cfg, tblock, ip);
6634 link_bblock (cfg, bblock, tblock);
6636 if (sp != stack_start) {
6637 handle_stack_args (cfg, stack_start, sp - stack_start);
6638 CHECK_UNVERIFIABLE (cfg);
6641 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6642 cmp->sreg1 = sp [0]->dreg;
6643 type_from_op (cmp, sp [0], NULL);
6644 CHECK_TYPE (cmp);
6646 #if SIZEOF_REGISTER == 4
6647 if (cmp->opcode == OP_LCOMPARE_IMM) {
6648 /* Convert it to OP_LCOMPARE */
6649 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6650 ins->type = STACK_I8;
6651 ins->dreg = alloc_dreg (cfg, STACK_I8);
6652 ins->inst_l = 0;
6653 MONO_ADD_INS (bblock, ins);
6654 cmp->opcode = OP_LCOMPARE;
6655 cmp->sreg2 = ins->dreg;
6657 #endif
6658 MONO_ADD_INS (bblock, cmp);
6660 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6661 type_from_op (ins, sp [0], NULL);
6662 MONO_ADD_INS (bblock, ins);
6663 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6664 GET_BBLOCK (cfg, tblock, target);
6665 ins->inst_true_bb = tblock;
6666 GET_BBLOCK (cfg, tblock, ip);
6667 ins->inst_false_bb = tblock;
6668 start_new_bblock = 2;
6670 sp = stack_start;
6671 inline_costs += BRANCH_COST;
6672 break;
6674 case CEE_BEQ:
6675 case CEE_BGE:
6676 case CEE_BGT:
6677 case CEE_BLE:
6678 case CEE_BLT:
6679 case CEE_BNE_UN:
6680 case CEE_BGE_UN:
6681 case CEE_BGT_UN:
6682 case CEE_BLE_UN:
6683 case CEE_BLT_UN:
6684 CHECK_OPSIZE (5);
6685 CHECK_STACK (2);
6686 MONO_INST_NEW (cfg, ins, *ip);
6687 ip++;
6688 target = ip + 4 + (gint32)read32(ip);
6689 ip += 4;
6691 ADD_BINCOND (NULL);
6693 sp = stack_start;
6694 inline_costs += BRANCH_COST;
6695 break;
6696 case CEE_SWITCH: {
6697 MonoInst *src1;
6698 MonoBasicBlock **targets;
6699 MonoBasicBlock *default_bblock;
6700 MonoJumpInfoBBTable *table;
6701 int offset_reg = alloc_preg (cfg);
6702 int target_reg = alloc_preg (cfg);
6703 int table_reg = alloc_preg (cfg);
6704 int sum_reg = alloc_preg (cfg);
6705 gboolean use_op_switch;
6707 CHECK_OPSIZE (5);
6708 CHECK_STACK (1);
6709 n = read32 (ip + 1);
6710 --sp;
6711 src1 = sp [0];
6712 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6713 UNVERIFIED;
6715 ip += 5;
6716 CHECK_OPSIZE (n * sizeof (guint32));
6717 target = ip + n * sizeof (guint32);
6719 GET_BBLOCK (cfg, default_bblock, target);
6721 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6722 for (i = 0; i < n; ++i) {
6723 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6724 targets [i] = tblock;
6725 ip += 4;
6728 if (sp != stack_start) {
6730 * Link the current bb with the targets as well, so handle_stack_args
6731 * will set their in_stack correctly.
6733 link_bblock (cfg, bblock, default_bblock);
6734 for (i = 0; i < n; ++i)
6735 link_bblock (cfg, bblock, targets [i]);
6737 handle_stack_args (cfg, stack_start, sp - stack_start);
6738 sp = stack_start;
6739 CHECK_UNVERIFIABLE (cfg);
6742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6744 bblock = cfg->cbb;
6746 for (i = 0; i < n; ++i)
6747 link_bblock (cfg, bblock, targets [i]);
6749 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6750 table->table = targets;
6751 table->table_size = n;
6753 use_op_switch = FALSE;
6754 #ifdef __arm__
6755 /* ARM implements SWITCH statements differently */
6756 /* FIXME: Make it use the generic implementation */
6757 if (!cfg->compile_aot)
6758 use_op_switch = TRUE;
6759 #endif
6761 if (use_op_switch) {
6762 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6763 ins->sreg1 = src1->dreg;
6764 ins->inst_p0 = table;
6765 ins->inst_many_bb = targets;
6766 ins->klass = GUINT_TO_POINTER (n);
6767 MONO_ADD_INS (cfg->cbb, ins);
6768 } else {
6769 if (sizeof (gpointer) == 8)
6770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6771 else
6772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6774 #if SIZEOF_REGISTER == 8
6775 /* The upper word might not be zero, and we add it to a 64 bit address later */
6776 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6777 #endif
6779 if (cfg->compile_aot) {
6780 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6781 } else {
6782 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6783 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6784 ins->inst_p0 = table;
6785 ins->dreg = table_reg;
6786 MONO_ADD_INS (cfg->cbb, ins);
6789 /* FIXME: Use load_memindex */
6790 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6792 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6794 start_new_bblock = 1;
6795 inline_costs += (BRANCH_COST * 2);
6796 break;
6798 case CEE_LDIND_I1:
6799 case CEE_LDIND_U1:
6800 case CEE_LDIND_I2:
6801 case CEE_LDIND_U2:
6802 case CEE_LDIND_I4:
6803 case CEE_LDIND_U4:
6804 case CEE_LDIND_I8:
6805 case CEE_LDIND_I:
6806 case CEE_LDIND_R4:
6807 case CEE_LDIND_R8:
6808 case CEE_LDIND_REF:
6809 CHECK_STACK (1);
6810 --sp;
6812 switch (*ip) {
6813 case CEE_LDIND_R4:
6814 case CEE_LDIND_R8:
6815 dreg = alloc_freg (cfg);
6816 break;
6817 case CEE_LDIND_I8:
6818 dreg = alloc_lreg (cfg);
6819 break;
6820 default:
6821 dreg = alloc_preg (cfg);
6824 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6825 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6826 ins->flags |= ins_flag;
6827 ins_flag = 0;
6828 MONO_ADD_INS (bblock, ins);
6829 *sp++ = ins;
6830 ++ip;
6831 break;
6832 case CEE_STIND_REF:
6833 case CEE_STIND_I1:
6834 case CEE_STIND_I2:
6835 case CEE_STIND_I4:
6836 case CEE_STIND_I8:
6837 case CEE_STIND_R4:
6838 case CEE_STIND_R8:
6839 case CEE_STIND_I:
6840 CHECK_STACK (2);
6841 sp -= 2;
6843 #if HAVE_WRITE_BARRIERS
6844 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6845 /* insert call to write barrier */
6846 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6847 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6848 ins_flag = 0;
6849 ip++;
6850 break;
6852 #endif
6854 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6855 ins->flags |= ins_flag;
6856 ins_flag = 0;
6857 MONO_ADD_INS (bblock, ins);
6858 inline_costs += 1;
6859 ++ip;
6860 break;
6862 case CEE_MUL:
6863 CHECK_STACK (2);
6865 MONO_INST_NEW (cfg, ins, (*ip));
6866 sp -= 2;
6867 ins->sreg1 = sp [0]->dreg;
6868 ins->sreg2 = sp [1]->dreg;
6869 type_from_op (ins, sp [0], sp [1]);
6870 CHECK_TYPE (ins);
6871 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6873 /* Use the immediate opcodes if possible */
6874 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6875 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6876 if (imm_opcode != -1) {
6877 ins->opcode = imm_opcode;
6878 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6879 ins->sreg2 = -1;
6881 sp [1]->opcode = OP_NOP;
6885 MONO_ADD_INS ((cfg)->cbb, (ins));
6886 *sp++ = ins;
6888 mono_decompose_opcode (cfg, ins);
6889 ip++;
6890 break;
6891 case CEE_ADD:
6892 case CEE_SUB:
6893 case CEE_DIV:
6894 case CEE_DIV_UN:
6895 case CEE_REM:
6896 case CEE_REM_UN:
6897 case CEE_AND:
6898 case CEE_OR:
6899 case CEE_XOR:
6900 case CEE_SHL:
6901 case CEE_SHR:
6902 case CEE_SHR_UN:
6903 CHECK_STACK (2);
6905 MONO_INST_NEW (cfg, ins, (*ip));
6906 sp -= 2;
6907 ins->sreg1 = sp [0]->dreg;
6908 ins->sreg2 = sp [1]->dreg;
6909 type_from_op (ins, sp [0], sp [1]);
6910 CHECK_TYPE (ins);
6911 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6912 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6914 /* FIXME: Pass opcode to is_inst_imm */
6916 /* Use the immediate opcodes if possible */
6917 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6918 int imm_opcode;
6920 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6921 if (imm_opcode != -1) {
6922 ins->opcode = imm_opcode;
6923 if (sp [1]->opcode == OP_I8CONST) {
6924 #if SIZEOF_REGISTER == 8
6925 ins->inst_imm = sp [1]->inst_l;
6926 #else
6927 ins->inst_ls_word = sp [1]->inst_ls_word;
6928 ins->inst_ms_word = sp [1]->inst_ms_word;
6929 #endif
6931 else
6932 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6933 ins->sreg2 = -1;
6935 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6936 if (sp [1]->next == NULL)
6937 sp [1]->opcode = OP_NOP;
6940 MONO_ADD_INS ((cfg)->cbb, (ins));
6941 *sp++ = ins;
6943 mono_decompose_opcode (cfg, ins);
6944 ip++;
6945 break;
6946 case CEE_NEG:
6947 case CEE_NOT:
6948 case CEE_CONV_I1:
6949 case CEE_CONV_I2:
6950 case CEE_CONV_I4:
6951 case CEE_CONV_R4:
6952 case CEE_CONV_R8:
6953 case CEE_CONV_U4:
6954 case CEE_CONV_I8:
6955 case CEE_CONV_U8:
6956 case CEE_CONV_OVF_I8:
6957 case CEE_CONV_OVF_U8:
6958 case CEE_CONV_R_UN:
6959 CHECK_STACK (1);
6961 /* Special case this earlier so we have long constants in the IR */
6962 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6963 int data = sp [-1]->inst_c0;
6964 sp [-1]->opcode = OP_I8CONST;
6965 sp [-1]->type = STACK_I8;
6966 #if SIZEOF_REGISTER == 8
6967 if ((*ip) == CEE_CONV_U8)
6968 sp [-1]->inst_c0 = (guint32)data;
6969 else
6970 sp [-1]->inst_c0 = data;
6971 #else
6972 sp [-1]->inst_ls_word = data;
6973 if ((*ip) == CEE_CONV_U8)
6974 sp [-1]->inst_ms_word = 0;
6975 else
6976 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6977 #endif
6978 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6980 else {
6981 ADD_UNOP (*ip);
6983 ip++;
6984 break;
6985 case CEE_CONV_OVF_I4:
6986 case CEE_CONV_OVF_I1:
6987 case CEE_CONV_OVF_I2:
6988 case CEE_CONV_OVF_I:
6989 case CEE_CONV_OVF_U:
6990 CHECK_STACK (1);
6992 if (sp [-1]->type == STACK_R8) {
6993 ADD_UNOP (CEE_CONV_OVF_I8);
6994 ADD_UNOP (*ip);
6995 } else {
6996 ADD_UNOP (*ip);
6998 ip++;
6999 break;
7000 case CEE_CONV_OVF_U1:
7001 case CEE_CONV_OVF_U2:
7002 case CEE_CONV_OVF_U4:
7003 CHECK_STACK (1);
7005 if (sp [-1]->type == STACK_R8) {
7006 ADD_UNOP (CEE_CONV_OVF_U8);
7007 ADD_UNOP (*ip);
7008 } else {
7009 ADD_UNOP (*ip);
7011 ip++;
7012 break;
7013 case CEE_CONV_OVF_I1_UN:
7014 case CEE_CONV_OVF_I2_UN:
7015 case CEE_CONV_OVF_I4_UN:
7016 case CEE_CONV_OVF_I8_UN:
7017 case CEE_CONV_OVF_U1_UN:
7018 case CEE_CONV_OVF_U2_UN:
7019 case CEE_CONV_OVF_U4_UN:
7020 case CEE_CONV_OVF_U8_UN:
7021 case CEE_CONV_OVF_I_UN:
7022 case CEE_CONV_OVF_U_UN:
7023 case CEE_CONV_U2:
7024 case CEE_CONV_U1:
7025 case CEE_CONV_I:
7026 case CEE_CONV_U:
7027 CHECK_STACK (1);
7028 ADD_UNOP (*ip);
7029 ip++;
7030 break;
7031 case CEE_ADD_OVF:
7032 case CEE_ADD_OVF_UN:
7033 case CEE_MUL_OVF:
7034 case CEE_MUL_OVF_UN:
7035 case CEE_SUB_OVF:
7036 case CEE_SUB_OVF_UN:
7037 CHECK_STACK (2);
7038 ADD_BINOP (*ip);
7039 ip++;
7040 break;
7041 case CEE_CPOBJ:
7042 CHECK_OPSIZE (5);
7043 CHECK_STACK (2);
7044 token = read32 (ip + 1);
7045 klass = mini_get_class (method, token, generic_context);
7046 CHECK_TYPELOAD (klass);
7047 sp -= 2;
7048 if (generic_class_is_reference_type (cfg, klass)) {
7049 MonoInst *store, *load;
7050 int dreg = alloc_preg (cfg);
7052 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7053 load->flags |= ins_flag;
7054 MONO_ADD_INS (cfg->cbb, load);
7056 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7057 store->flags |= ins_flag;
7058 MONO_ADD_INS (cfg->cbb, store);
7059 } else {
7060 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7062 ins_flag = 0;
7063 ip += 5;
7064 break;
7065 case CEE_LDOBJ: {
7066 int loc_index = -1;
7067 int stloc_len = 0;
7069 CHECK_OPSIZE (5);
7070 CHECK_STACK (1);
7071 --sp;
7072 token = read32 (ip + 1);
7073 klass = mini_get_class (method, token, generic_context);
7074 CHECK_TYPELOAD (klass);
7076 /* Optimize the common ldobj+stloc combination */
7077 switch (ip [5]) {
7078 case CEE_STLOC_S:
7079 loc_index = ip [6];
7080 stloc_len = 2;
7081 break;
7082 case CEE_STLOC_0:
7083 case CEE_STLOC_1:
7084 case CEE_STLOC_2:
7085 case CEE_STLOC_3:
7086 loc_index = ip [5] - CEE_STLOC_0;
7087 stloc_len = 1;
7088 break;
7089 default:
7090 break;
7093 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7094 CHECK_LOCAL (loc_index);
7096 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7097 ins->dreg = cfg->locals [loc_index]->dreg;
7098 ip += 5;
7099 ip += stloc_len;
7100 break;
7103 /* Optimize the ldobj+stobj combination */
7104 /* The reference case ends up being a load+store anyway */
7105 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7106 CHECK_STACK (1);
7108 sp --;
7110 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7112 ip += 5 + 5;
7113 ins_flag = 0;
7114 break;
7117 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7118 *sp++ = ins;
7120 ip += 5;
7121 ins_flag = 0;
7122 inline_costs += 1;
7123 break;
7125 case CEE_LDSTR:
7126 CHECK_STACK_OVF (1);
7127 CHECK_OPSIZE (5);
7128 n = read32 (ip + 1);
7130 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7131 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7132 ins->type = STACK_OBJ;
7133 *sp = ins;
7135 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7136 MonoInst *iargs [1];
7138 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7139 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7140 } else {
7141 if (cfg->opt & MONO_OPT_SHARED) {
7142 MonoInst *iargs [3];
7144 if (cfg->compile_aot) {
7145 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7147 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7148 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7149 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7150 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7151 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7152 } else {
7153 if (bblock->out_of_line) {
7154 MonoInst *iargs [2];
7156 if (image == mono_defaults.corlib) {
7158 * Avoid relocations in AOT and save some space by using a
7159 * version of helper_ldstr specialized to mscorlib.
7161 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7162 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7163 } else {
7164 /* Avoid creating the string object */
7165 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7166 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7167 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7170 else
7171 if (cfg->compile_aot) {
7172 NEW_LDSTRCONST (cfg, ins, image, n);
7173 *sp = ins;
7174 MONO_ADD_INS (bblock, ins);
7176 else {
7177 NEW_PCONST (cfg, ins, NULL);
7178 ins->type = STACK_OBJ;
7179 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7180 *sp = ins;
7181 MONO_ADD_INS (bblock, ins);
7186 sp++;
7187 ip += 5;
7188 break;
7189 case CEE_NEWOBJ: {
7190 MonoInst *iargs [2];
7191 MonoMethodSignature *fsig;
7192 MonoInst this_ins;
7193 MonoInst *alloc;
7194 MonoInst *vtable_arg = NULL;
7196 CHECK_OPSIZE (5);
7197 token = read32 (ip + 1);
7198 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7199 if (!cmethod)
7200 goto load_error;
7201 fsig = mono_method_get_signature (cmethod, image, token);
7203 mono_save_token_info (cfg, image, token, cmethod);
7205 if (!mono_class_init (cmethod->klass))
7206 goto load_error;
7208 if (cfg->generic_sharing_context)
7209 context_used = mono_method_check_context_used (cmethod);
7211 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7212 if (check_linkdemand (cfg, method, cmethod))
7213 INLINE_FAILURE;
7214 CHECK_CFG_EXCEPTION;
7215 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7216 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7219 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7220 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7221 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7222 if (context_used) {
7223 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7224 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7225 } else {
7226 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7228 } else {
7229 if (context_used) {
7230 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7231 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7232 } else {
7233 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7235 CHECK_TYPELOAD (cmethod->klass);
7236 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7241 n = fsig->param_count;
7242 CHECK_STACK (n);
7245 * Generate smaller code for the common newobj <exception> instruction in
7246 * argument checking code.
7248 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7249 is_exception_class (cmethod->klass) && n <= 2 &&
7250 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7251 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7252 MonoInst *iargs [3];
7254 g_assert (!vtable_arg);
7256 sp -= n;
7258 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7259 switch (n) {
7260 case 0:
7261 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7262 break;
7263 case 1:
7264 iargs [1] = sp [0];
7265 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7266 break;
7267 case 2:
7268 iargs [1] = sp [0];
7269 iargs [2] = sp [1];
7270 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7271 break;
7272 default:
7273 g_assert_not_reached ();
7276 ip += 5;
7277 inline_costs += 5;
7278 break;
7281 /* move the args to allow room for 'this' in the first position */
7282 while (n--) {
7283 --sp;
7284 sp [1] = sp [0];
7287 /* check_call_signature () requires sp[0] to be set */
7288 this_ins.type = STACK_OBJ;
7289 sp [0] = &this_ins;
7290 if (check_call_signature (cfg, fsig, sp))
7291 UNVERIFIED;
7293 iargs [0] = NULL;
7295 if (mini_class_is_system_array (cmethod->klass)) {
7296 g_assert (!vtable_arg);
7298 if (context_used) {
7299 *sp = emit_get_rgctx_method (cfg, context_used,
7300 cmethod, MONO_RGCTX_INFO_METHOD);
7301 } else {
7302 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7305 /* Avoid varargs in the common case */
7306 if (fsig->param_count == 1)
7307 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7308 else if (fsig->param_count == 2)
7309 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7310 else
7311 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7312 } else if (cmethod->string_ctor) {
7313 g_assert (!context_used);
7314 g_assert (!vtable_arg);
7315 /* we simply pass a null pointer */
7316 EMIT_NEW_PCONST (cfg, *sp, NULL);
7317 /* now call the string ctor */
7318 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7319 } else {
7320 MonoInst* callvirt_this_arg = NULL;
7322 if (cmethod->klass->valuetype) {
7323 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7324 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7325 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7327 alloc = NULL;
7330 * The code generated by mini_emit_virtual_call () expects
7331 * iargs [0] to be a boxed instance, but luckily the vcall
7332 * will be transformed into a normal call there.
7334 } else if (context_used) {
7335 MonoInst *data;
7336 int rgctx_info;
7338 if (cfg->opt & MONO_OPT_SHARED)
7339 rgctx_info = MONO_RGCTX_INFO_KLASS;
7340 else
7341 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7342 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7344 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7345 *sp = alloc;
7346 } else {
7347 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7349 CHECK_TYPELOAD (cmethod->klass);
7352 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7353 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7354 * As a workaround, we call class cctors before allocating objects.
7356 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7357 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7358 if (cfg->verbose_level > 2)
7359 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7360 class_inits = g_slist_prepend (class_inits, vtable);
7363 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7364 *sp = alloc;
7367 if (alloc)
7368 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7370 /* Now call the actual ctor */
7371 /* Avoid virtual calls to ctors if possible */
7372 if (cmethod->klass->marshalbyref)
7373 callvirt_this_arg = sp [0];
7375 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7376 mono_method_check_inlining (cfg, cmethod) &&
7377 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7378 !g_list_find (dont_inline, cmethod)) {
7379 int costs;
7381 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7382 cfg->real_offset += 5;
7383 bblock = cfg->cbb;
7385 inline_costs += costs - 5;
7386 } else {
7387 INLINE_FAILURE;
7388 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7390 } else if (context_used &&
7391 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7392 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7393 MonoInst *cmethod_addr;
7395 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7396 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7398 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7399 } else {
7400 INLINE_FAILURE;
7401 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7402 callvirt_this_arg, NULL, vtable_arg);
7403 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7404 GENERIC_SHARING_FAILURE (*ip);
7408 if (alloc == NULL) {
7409 /* Valuetype */
7410 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7411 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7412 *sp++= ins;
7414 else
7415 *sp++ = alloc;
7417 ip += 5;
7418 inline_costs += 5;
7419 break;
7421 case CEE_CASTCLASS:
7422 CHECK_STACK (1);
7423 --sp;
7424 CHECK_OPSIZE (5);
7425 token = read32 (ip + 1);
7426 klass = mini_get_class (method, token, generic_context);
7427 CHECK_TYPELOAD (klass);
7428 if (sp [0]->type != STACK_OBJ)
7429 UNVERIFIED;
7431 if (cfg->generic_sharing_context)
7432 context_used = mono_class_check_context_used (klass);
7434 if (context_used) {
7435 MonoInst *args [2];
7437 /* obj */
7438 args [0] = *sp;
7440 /* klass */
7441 args [1] = emit_get_rgctx_klass (cfg, context_used,
7442 klass, MONO_RGCTX_INFO_KLASS);
7444 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7445 *sp ++ = ins;
7446 ip += 5;
7447 inline_costs += 2;
7448 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7449 MonoMethod *mono_castclass;
7450 MonoInst *iargs [1];
7451 int costs;
7453 mono_castclass = mono_marshal_get_castclass (klass);
7454 iargs [0] = sp [0];
7456 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7457 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7458 g_assert (costs > 0);
7460 ip += 5;
7461 cfg->real_offset += 5;
7462 bblock = cfg->cbb;
7464 *sp++ = iargs [0];
7466 inline_costs += costs;
7468 else {
7469 ins = handle_castclass (cfg, klass, *sp);
7470 bblock = cfg->cbb;
7471 *sp ++ = ins;
7472 ip += 5;
7474 break;
7475 case CEE_ISINST: {
7476 CHECK_STACK (1);
7477 --sp;
7478 CHECK_OPSIZE (5);
7479 token = read32 (ip + 1);
7480 klass = mini_get_class (method, token, generic_context);
7481 CHECK_TYPELOAD (klass);
7482 if (sp [0]->type != STACK_OBJ)
7483 UNVERIFIED;
7485 if (cfg->generic_sharing_context)
7486 context_used = mono_class_check_context_used (klass);
7488 if (context_used) {
7489 MonoInst *args [2];
7491 /* obj */
7492 args [0] = *sp;
7494 /* klass */
7495 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7497 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7498 sp++;
7499 ip += 5;
7500 inline_costs += 2;
7501 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7502 MonoMethod *mono_isinst;
7503 MonoInst *iargs [1];
7504 int costs;
7506 mono_isinst = mono_marshal_get_isinst (klass);
7507 iargs [0] = sp [0];
7509 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7510 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7511 g_assert (costs > 0);
7513 ip += 5;
7514 cfg->real_offset += 5;
7515 bblock = cfg->cbb;
7517 *sp++= iargs [0];
7519 inline_costs += costs;
7521 else {
7522 ins = handle_isinst (cfg, klass, *sp);
7523 bblock = cfg->cbb;
7524 *sp ++ = ins;
7525 ip += 5;
7527 break;
7529 case CEE_UNBOX_ANY: {
7530 CHECK_STACK (1);
7531 --sp;
7532 CHECK_OPSIZE (5);
7533 token = read32 (ip + 1);
7534 klass = mini_get_class (method, token, generic_context);
7535 CHECK_TYPELOAD (klass);
7537 mono_save_token_info (cfg, image, token, klass);
7539 if (cfg->generic_sharing_context)
7540 context_used = mono_class_check_context_used (klass);
7542 if (generic_class_is_reference_type (cfg, klass)) {
7543 /* CASTCLASS */
7544 if (context_used) {
7545 MonoInst *iargs [2];
7547 /* obj */
7548 iargs [0] = *sp;
7549 /* klass */
7550 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7551 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7552 *sp ++ = ins;
7553 ip += 5;
7554 inline_costs += 2;
7555 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7556 MonoMethod *mono_castclass;
7557 MonoInst *iargs [1];
7558 int costs;
7560 mono_castclass = mono_marshal_get_castclass (klass);
7561 iargs [0] = sp [0];
7563 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7564 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7566 g_assert (costs > 0);
7568 ip += 5;
7569 cfg->real_offset += 5;
7570 bblock = cfg->cbb;
7572 *sp++ = iargs [0];
7573 inline_costs += costs;
7574 } else {
7575 ins = handle_castclass (cfg, klass, *sp);
7576 bblock = cfg->cbb;
7577 *sp ++ = ins;
7578 ip += 5;
7580 break;
7583 if (mono_class_is_nullable (klass)) {
7584 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7585 *sp++= ins;
7586 ip += 5;
7587 break;
7590 /* UNBOX */
7591 ins = handle_unbox (cfg, klass, sp, context_used);
7592 *sp = ins;
7594 ip += 5;
7596 /* LDOBJ */
7597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7598 *sp++ = ins;
7600 inline_costs += 2;
7601 break;
7603 case CEE_BOX: {
7604 MonoInst *val;
7606 CHECK_STACK (1);
7607 --sp;
7608 val = *sp;
7609 CHECK_OPSIZE (5);
7610 token = read32 (ip + 1);
7611 klass = mini_get_class (method, token, generic_context);
7612 CHECK_TYPELOAD (klass);
7614 mono_save_token_info (cfg, image, token, klass);
7616 if (cfg->generic_sharing_context)
7617 context_used = mono_class_check_context_used (klass);
7619 if (generic_class_is_reference_type (cfg, klass)) {
7620 *sp++ = val;
7621 ip += 5;
7622 break;
7625 if (klass == mono_defaults.void_class)
7626 UNVERIFIED;
7627 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7628 UNVERIFIED;
7629 /* frequent check in generic code: box (struct), brtrue */
7630 if (!mono_class_is_nullable (klass) &&
7631 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7632 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7633 ip += 5;
7634 MONO_INST_NEW (cfg, ins, OP_BR);
7635 if (*ip == CEE_BRTRUE_S) {
7636 CHECK_OPSIZE (2);
7637 ip++;
7638 target = ip + 1 + (signed char)(*ip);
7639 ip++;
7640 } else {
7641 CHECK_OPSIZE (5);
7642 ip++;
7643 target = ip + 4 + (gint)(read32 (ip));
7644 ip += 4;
7646 GET_BBLOCK (cfg, tblock, target);
7647 link_bblock (cfg, bblock, tblock);
7648 ins->inst_target_bb = tblock;
7649 GET_BBLOCK (cfg, tblock, ip);
7651 * This leads to some inconsistency, since the two bblocks are
7652 * not really connected, but it is needed for handling stack
7653 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7654 * FIXME: This should only be needed if sp != stack_start, but that
7655 * doesn't work for some reason (test failure in mcs/tests on x86).
7657 link_bblock (cfg, bblock, tblock);
7658 if (sp != stack_start) {
7659 handle_stack_args (cfg, stack_start, sp - stack_start);
7660 sp = stack_start;
7661 CHECK_UNVERIFIABLE (cfg);
7663 MONO_ADD_INS (bblock, ins);
7664 start_new_bblock = 1;
7665 break;
7668 if (context_used) {
7669 MonoInst *data;
7670 int rgctx_info;
7672 if (cfg->opt & MONO_OPT_SHARED)
7673 rgctx_info = MONO_RGCTX_INFO_KLASS;
7674 else
7675 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7676 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7677 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7678 } else {
7679 *sp++ = handle_box (cfg, val, klass);
7682 ip += 5;
7683 inline_costs += 1;
7684 break;
7686 case CEE_UNBOX: {
7687 CHECK_STACK (1);
7688 --sp;
7689 CHECK_OPSIZE (5);
7690 token = read32 (ip + 1);
7691 klass = mini_get_class (method, token, generic_context);
7692 CHECK_TYPELOAD (klass);
7694 mono_save_token_info (cfg, image, token, klass);
7696 if (cfg->generic_sharing_context)
7697 context_used = mono_class_check_context_used (klass);
7699 if (mono_class_is_nullable (klass)) {
7700 MonoInst *val;
7702 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7703 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7705 *sp++= ins;
7706 } else {
7707 ins = handle_unbox (cfg, klass, sp, context_used);
7708 *sp++ = ins;
7710 ip += 5;
7711 inline_costs += 2;
7712 break;
7714 case CEE_LDFLD:
7715 case CEE_LDFLDA:
7716 case CEE_STFLD: {
7717 MonoClassField *field;
7718 int costs;
7719 guint foffset;
7721 if (*ip == CEE_STFLD) {
7722 CHECK_STACK (2);
7723 sp -= 2;
7724 } else {
7725 CHECK_STACK (1);
7726 --sp;
7728 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7729 UNVERIFIED;
7730 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7731 UNVERIFIED;
7732 CHECK_OPSIZE (5);
7733 token = read32 (ip + 1);
7734 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7735 field = mono_method_get_wrapper_data (method, token);
7736 klass = field->parent;
7738 else {
7739 field = mono_field_from_token (image, token, &klass, generic_context);
7741 if (!field)
7742 goto load_error;
7743 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7744 FIELD_ACCESS_FAILURE;
7745 mono_class_init (klass);
7747 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7748 if (*ip == CEE_STFLD) {
7749 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7750 UNVERIFIED;
7751 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7752 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7753 MonoInst *iargs [5];
7755 iargs [0] = sp [0];
7756 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7757 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7758 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7759 field->offset);
7760 iargs [4] = sp [1];
7762 if (cfg->opt & MONO_OPT_INLINE) {
7763 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7764 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7765 g_assert (costs > 0);
7767 cfg->real_offset += 5;
7768 bblock = cfg->cbb;
7770 inline_costs += costs;
7771 } else {
7772 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7774 } else {
7775 MonoInst *store;
7777 #if HAVE_WRITE_BARRIERS
7778 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7779 /* insert call to write barrier */
7780 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7781 MonoInst *iargs [2];
7782 int dreg;
7784 dreg = alloc_preg (cfg);
7785 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7786 iargs [1] = sp [1];
7787 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7789 #endif
7791 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7793 store->flags |= ins_flag;
7795 ins_flag = 0;
7796 ip += 5;
7797 break;
7800 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7801 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7802 MonoInst *iargs [4];
7804 iargs [0] = sp [0];
7805 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7806 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7807 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7808 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7809 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7810 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7811 bblock = cfg->cbb;
7812 g_assert (costs > 0);
7814 cfg->real_offset += 5;
7816 *sp++ = iargs [0];
7818 inline_costs += costs;
7819 } else {
7820 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7821 *sp++ = ins;
7823 } else {
7824 if (sp [0]->type == STACK_VTYPE) {
7825 MonoInst *var;
7827 /* Have to compute the address of the variable */
7829 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7830 if (!var)
7831 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7832 else
7833 g_assert (var->klass == klass);
7835 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7836 sp [0] = ins;
7839 if (*ip == CEE_LDFLDA) {
7840 dreg = alloc_preg (cfg);
7842 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7843 ins->klass = mono_class_from_mono_type (field->type);
7844 ins->type = STACK_MP;
7845 *sp++ = ins;
7846 } else {
7847 MonoInst *load;
7849 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7850 load->flags |= ins_flag;
7851 *sp++ = load;
7854 ins_flag = 0;
7855 ip += 5;
7856 break;
7858 case CEE_LDSFLD:
7859 case CEE_LDSFLDA:
7860 case CEE_STSFLD: {
7861 MonoClassField *field;
7862 gpointer addr = NULL;
7863 gboolean is_special_static;
7865 CHECK_OPSIZE (5);
7866 token = read32 (ip + 1);
7868 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7869 field = mono_method_get_wrapper_data (method, token);
7870 klass = field->parent;
7872 else
7873 field = mono_field_from_token (image, token, &klass, generic_context);
7874 if (!field)
7875 goto load_error;
7876 mono_class_init (klass);
7877 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7878 FIELD_ACCESS_FAILURE;
7881 * We can only support shared generic static
7882 * field access on architectures where the
7883 * trampoline code has been extended to handle
7884 * the generic class init.
7886 #ifndef MONO_ARCH_VTABLE_REG
7887 GENERIC_SHARING_FAILURE (*ip);
7888 #endif
7890 if (cfg->generic_sharing_context)
7891 context_used = mono_class_check_context_used (klass);
7893 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7895 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7896 * to be called here.
7898 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7899 mono_class_vtable (cfg->domain, klass);
7900 CHECK_TYPELOAD (klass);
7902 mono_domain_lock (cfg->domain);
7903 if (cfg->domain->special_static_fields)
7904 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7905 mono_domain_unlock (cfg->domain);
7907 is_special_static = mono_class_field_is_special_static (field);
7909 /* Generate IR to compute the field address */
7911 if ((cfg->opt & MONO_OPT_SHARED) ||
7912 (cfg->compile_aot && is_special_static) ||
7913 (context_used && is_special_static)) {
7914 MonoInst *iargs [2];
7916 g_assert (field->parent);
7917 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7918 if (context_used) {
7919 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7920 field, MONO_RGCTX_INFO_CLASS_FIELD);
7921 } else {
7922 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7924 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7925 } else if (context_used) {
7926 MonoInst *static_data;
7929 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7930 method->klass->name_space, method->klass->name, method->name,
7931 depth, field->offset);
7934 if (mono_class_needs_cctor_run (klass, method)) {
7935 MonoCallInst *call;
7936 MonoInst *vtable;
7938 vtable = emit_get_rgctx_klass (cfg, context_used,
7939 klass, MONO_RGCTX_INFO_VTABLE);
7941 // FIXME: This doesn't work since it tries to pass the argument
7942 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7944 * The vtable pointer is always passed in a register regardless of
7945 * the calling convention, so assign it manually, and make a call
7946 * using a signature without parameters.
7948 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7949 #ifdef MONO_ARCH_VTABLE_REG
7950 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7951 cfg->uses_vtable_reg = TRUE;
7952 #else
7953 NOT_IMPLEMENTED;
7954 #endif
7958 * The pointer we're computing here is
7960 * super_info.static_data + field->offset
7962 static_data = emit_get_rgctx_klass (cfg, context_used,
7963 klass, MONO_RGCTX_INFO_STATIC_DATA);
7965 if (field->offset == 0) {
7966 ins = static_data;
7967 } else {
7968 int addr_reg = mono_alloc_preg (cfg);
7969 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
7971 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
7972 MonoInst *iargs [2];
7974 g_assert (field->parent);
7975 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7976 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7977 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7978 } else {
7979 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
7981 CHECK_TYPELOAD (klass);
7982 if (!addr) {
7983 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7984 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7985 if (cfg->verbose_level > 2)
7986 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
7987 class_inits = g_slist_prepend (class_inits, vtable);
7988 } else {
7989 if (cfg->run_cctors) {
7990 MonoException *ex;
7991 /* This makes so that inline cannot trigger */
7992 /* .cctors: too many apps depend on them */
7993 /* running with a specific order... */
7994 if (! vtable->initialized)
7995 INLINE_FAILURE;
7996 ex = mono_runtime_class_init_full (vtable, FALSE);
7997 if (ex) {
7998 set_exception_object (cfg, ex);
7999 goto exception_exit;
8003 addr = (char*)vtable->data + field->offset;
8005 if (cfg->compile_aot)
8006 EMIT_NEW_SFLDACONST (cfg, ins, field);
8007 else
8008 EMIT_NEW_PCONST (cfg, ins, addr);
8009 } else {
8011 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8012 * This could be later optimized to do just a couple of
8013 * memory dereferences with constant offsets.
8015 MonoInst *iargs [1];
8016 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8017 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8021 /* Generate IR to do the actual load/store operation */
8023 if (*ip == CEE_LDSFLDA) {
8024 ins->klass = mono_class_from_mono_type (field->type);
8025 ins->type = STACK_PTR;
8026 *sp++ = ins;
8027 } else if (*ip == CEE_STSFLD) {
8028 MonoInst *store;
8029 CHECK_STACK (1);
8030 sp--;
8032 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8033 store->flags |= ins_flag;
8034 } else {
8035 gboolean is_const = FALSE;
8036 MonoVTable *vtable = NULL;
8038 if (!context_used) {
8039 vtable = mono_class_vtable (cfg->domain, klass);
8040 CHECK_TYPELOAD (klass);
8042 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8043 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8044 gpointer addr = (char*)vtable->data + field->offset;
8045 int ro_type = field->type->type;
8046 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8047 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8049 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8050 is_const = TRUE;
8051 switch (ro_type) {
8052 case MONO_TYPE_BOOLEAN:
8053 case MONO_TYPE_U1:
8054 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8055 sp++;
8056 break;
8057 case MONO_TYPE_I1:
8058 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8059 sp++;
8060 break;
8061 case MONO_TYPE_CHAR:
8062 case MONO_TYPE_U2:
8063 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8064 sp++;
8065 break;
8066 case MONO_TYPE_I2:
8067 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8068 sp++;
8069 break;
8070 break;
8071 case MONO_TYPE_I4:
8072 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8073 sp++;
8074 break;
8075 case MONO_TYPE_U4:
8076 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8077 sp++;
8078 break;
8079 #ifndef HAVE_MOVING_COLLECTOR
8080 case MONO_TYPE_I:
8081 case MONO_TYPE_U:
8082 case MONO_TYPE_STRING:
8083 case MONO_TYPE_OBJECT:
8084 case MONO_TYPE_CLASS:
8085 case MONO_TYPE_SZARRAY:
8086 case MONO_TYPE_PTR:
8087 case MONO_TYPE_FNPTR:
8088 case MONO_TYPE_ARRAY:
8089 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8090 type_to_eval_stack_type ((cfg), field->type, *sp);
8091 sp++;
8092 break;
8093 #endif
8094 case MONO_TYPE_I8:
8095 case MONO_TYPE_U8:
8096 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8097 sp++;
8098 break;
8099 case MONO_TYPE_R4:
8100 case MONO_TYPE_R8:
8101 case MONO_TYPE_VALUETYPE:
8102 default:
8103 is_const = FALSE;
8104 break;
8108 if (!is_const) {
8109 MonoInst *load;
8111 CHECK_STACK_OVF (1);
8113 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8114 load->flags |= ins_flag;
8115 ins_flag = 0;
8116 *sp++ = load;
8119 ins_flag = 0;
8120 ip += 5;
8121 break;
8123 case CEE_STOBJ:
8124 CHECK_STACK (2);
8125 sp -= 2;
8126 CHECK_OPSIZE (5);
8127 token = read32 (ip + 1);
8128 klass = mini_get_class (method, token, generic_context);
8129 CHECK_TYPELOAD (klass);
8130 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8131 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8132 ins_flag = 0;
8133 ip += 5;
8134 inline_costs += 1;
8135 break;
8138 * Array opcodes
8140 case CEE_NEWARR: {
8141 MonoInst *len_ins;
8142 const char *data_ptr;
8143 int data_size = 0;
8144 guint32 field_token;
8146 CHECK_STACK (1);
8147 --sp;
8149 CHECK_OPSIZE (5);
8150 token = read32 (ip + 1);
8152 klass = mini_get_class (method, token, generic_context);
8153 CHECK_TYPELOAD (klass);
8155 if (cfg->generic_sharing_context)
8156 context_used = mono_class_check_context_used (klass);
8158 if (context_used) {
8159 MonoInst *args [2];
8161 /* FIXME: Decompose later to help abcrem */
8163 /* vtable */
8164 args [0] = emit_get_rgctx_klass (cfg, context_used,
8165 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8167 /* array len */
8168 args [1] = sp [0];
8170 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8171 } else {
8172 if (cfg->opt & MONO_OPT_SHARED) {
8173 /* Decompose now to avoid problems with references to the domainvar */
8174 MonoInst *iargs [3];
8176 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8177 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8178 iargs [2] = sp [0];
8180 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8181 } else {
8182 /* Decompose later since it is needed by abcrem */
8183 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8184 ins->dreg = alloc_preg (cfg);
8185 ins->sreg1 = sp [0]->dreg;
8186 ins->inst_newa_class = klass;
8187 ins->type = STACK_OBJ;
8188 ins->klass = klass;
8189 MONO_ADD_INS (cfg->cbb, ins);
8190 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8191 cfg->cbb->has_array_access = TRUE;
8193 /* Needed so mono_emit_load_get_addr () gets called */
8194 mono_get_got_var (cfg);
8198 len_ins = sp [0];
8199 ip += 5;
8200 *sp++ = ins;
8201 inline_costs += 1;
8204 * we inline/optimize the initialization sequence if possible.
8205 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8206 * for small sizes open code the memcpy
8207 * ensure the rva field is big enough
8209 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8210 MonoMethod *memcpy_method = get_memcpy_method ();
8211 MonoInst *iargs [3];
8212 int add_reg = alloc_preg (cfg);
8214 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8215 if (cfg->compile_aot) {
8216 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8217 } else {
8218 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8220 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8221 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8222 ip += 11;
8225 break;
8227 case CEE_LDLEN:
8228 CHECK_STACK (1);
8229 --sp;
8230 if (sp [0]->type != STACK_OBJ)
8231 UNVERIFIED;
8233 dreg = alloc_preg (cfg);
8234 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8235 ins->dreg = alloc_preg (cfg);
8236 ins->sreg1 = sp [0]->dreg;
8237 ins->type = STACK_I4;
8238 MONO_ADD_INS (cfg->cbb, ins);
8239 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8240 cfg->cbb->has_array_access = TRUE;
8241 ip ++;
8242 *sp++ = ins;
8243 break;
8244 case CEE_LDELEMA:
8245 CHECK_STACK (2);
8246 sp -= 2;
8247 CHECK_OPSIZE (5);
8248 if (sp [0]->type != STACK_OBJ)
8249 UNVERIFIED;
8251 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8253 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8254 CHECK_TYPELOAD (klass);
8255 /* we need to make sure that this array is exactly the type it needs
8256 * to be for correctness. the wrappers are lax with their usage
8257 * so we need to ignore them here
8259 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8260 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8262 readonly = FALSE;
8263 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8264 *sp++ = ins;
8265 ip += 5;
8266 break;
8267 case CEE_LDELEM_ANY:
8268 case CEE_LDELEM_I1:
8269 case CEE_LDELEM_U1:
8270 case CEE_LDELEM_I2:
8271 case CEE_LDELEM_U2:
8272 case CEE_LDELEM_I4:
8273 case CEE_LDELEM_U4:
8274 case CEE_LDELEM_I8:
8275 case CEE_LDELEM_I:
8276 case CEE_LDELEM_R4:
8277 case CEE_LDELEM_R8:
8278 case CEE_LDELEM_REF: {
8279 MonoInst *addr;
8281 CHECK_STACK (2);
8282 sp -= 2;
8284 if (*ip == CEE_LDELEM_ANY) {
8285 CHECK_OPSIZE (5);
8286 token = read32 (ip + 1);
8287 klass = mini_get_class (method, token, generic_context);
8288 CHECK_TYPELOAD (klass);
8289 mono_class_init (klass);
8291 else
8292 klass = array_access_to_klass (*ip);
8294 if (sp [0]->type != STACK_OBJ)
8295 UNVERIFIED;
8297 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8299 if (sp [1]->opcode == OP_ICONST) {
8300 int array_reg = sp [0]->dreg;
8301 int index_reg = sp [1]->dreg;
8302 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8304 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8305 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8306 } else {
8307 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8308 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8310 *sp++ = ins;
8311 if (*ip == CEE_LDELEM_ANY)
8312 ip += 5;
8313 else
8314 ++ip;
8315 break;
8317 case CEE_STELEM_I:
8318 case CEE_STELEM_I1:
8319 case CEE_STELEM_I2:
8320 case CEE_STELEM_I4:
8321 case CEE_STELEM_I8:
8322 case CEE_STELEM_R4:
8323 case CEE_STELEM_R8:
8324 case CEE_STELEM_REF:
8325 case CEE_STELEM_ANY: {
8326 MonoInst *addr;
8328 CHECK_STACK (3);
8329 sp -= 3;
8331 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8333 if (*ip == CEE_STELEM_ANY) {
8334 CHECK_OPSIZE (5);
8335 token = read32 (ip + 1);
8336 klass = mini_get_class (method, token, generic_context);
8337 CHECK_TYPELOAD (klass);
8338 mono_class_init (klass);
8340 else
8341 klass = array_access_to_klass (*ip);
8343 if (sp [0]->type != STACK_OBJ)
8344 UNVERIFIED;
8346 /* storing a NULL doesn't need any of the complex checks in stelemref */
8347 if (generic_class_is_reference_type (cfg, klass) &&
8348 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8349 MonoMethod* helper = mono_marshal_get_stelemref ();
8350 MonoInst *iargs [3];
8352 if (sp [0]->type != STACK_OBJ)
8353 UNVERIFIED;
8354 if (sp [2]->type != STACK_OBJ)
8355 UNVERIFIED;
8357 iargs [2] = sp [2];
8358 iargs [1] = sp [1];
8359 iargs [0] = sp [0];
8361 mono_emit_method_call (cfg, helper, iargs, NULL);
8362 } else {
8363 if (sp [1]->opcode == OP_ICONST) {
8364 int array_reg = sp [0]->dreg;
8365 int index_reg = sp [1]->dreg;
8366 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8368 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8369 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8370 } else {
8371 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8372 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8376 if (*ip == CEE_STELEM_ANY)
8377 ip += 5;
8378 else
8379 ++ip;
8380 inline_costs += 1;
8381 break;
8383 case CEE_CKFINITE: {
8384 CHECK_STACK (1);
8385 --sp;
8387 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8388 ins->sreg1 = sp [0]->dreg;
8389 ins->dreg = alloc_freg (cfg);
8390 ins->type = STACK_R8;
8391 MONO_ADD_INS (bblock, ins);
8392 *sp++ = ins;
8394 mono_decompose_opcode (cfg, ins);
8396 ++ip;
8397 break;
8399 case CEE_REFANYVAL: {
8400 MonoInst *src_var, *src;
8402 int klass_reg = alloc_preg (cfg);
8403 int dreg = alloc_preg (cfg);
8405 CHECK_STACK (1);
8406 MONO_INST_NEW (cfg, ins, *ip);
8407 --sp;
8408 CHECK_OPSIZE (5);
8409 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8410 CHECK_TYPELOAD (klass);
8411 mono_class_init (klass);
8413 if (cfg->generic_sharing_context)
8414 context_used = mono_class_check_context_used (klass);
8416 // FIXME:
8417 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8418 if (!src_var)
8419 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8420 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8423 if (context_used) {
8424 MonoInst *klass_ins;
8426 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8427 klass, MONO_RGCTX_INFO_KLASS);
8429 // FIXME:
8430 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8431 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8432 } else {
8433 mini_emit_class_check (cfg, klass_reg, klass);
8435 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8436 ins->type = STACK_MP;
8437 *sp++ = ins;
8438 ip += 5;
8439 break;
8441 case CEE_MKREFANY: {
8442 MonoInst *loc, *addr;
8444 CHECK_STACK (1);
8445 MONO_INST_NEW (cfg, ins, *ip);
8446 --sp;
8447 CHECK_OPSIZE (5);
8448 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8449 CHECK_TYPELOAD (klass);
8450 mono_class_init (klass);
8452 if (cfg->generic_sharing_context)
8453 context_used = mono_class_check_context_used (klass);
8455 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8456 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8458 if (context_used) {
8459 MonoInst *const_ins;
8460 int type_reg = alloc_preg (cfg);
8462 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8463 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8464 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8465 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8466 } else if (cfg->compile_aot) {
8467 int const_reg = alloc_preg (cfg);
8468 int type_reg = alloc_preg (cfg);
8470 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8471 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8473 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8474 } else {
8475 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8476 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8478 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8480 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8481 ins->type = STACK_VTYPE;
8482 ins->klass = mono_defaults.typed_reference_class;
8483 *sp++ = ins;
8484 ip += 5;
8485 break;
8487 case CEE_LDTOKEN: {
8488 gpointer handle;
8489 MonoClass *handle_class;
8491 CHECK_STACK_OVF (1);
8493 CHECK_OPSIZE (5);
8494 n = read32 (ip + 1);
8496 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8497 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8498 handle = mono_method_get_wrapper_data (method, n);
8499 handle_class = mono_method_get_wrapper_data (method, n + 1);
8500 if (handle_class == mono_defaults.typehandle_class)
8501 handle = &((MonoClass*)handle)->byval_arg;
8503 else {
8504 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8506 if (!handle)
8507 goto load_error;
8508 mono_class_init (handle_class);
8509 if (cfg->generic_sharing_context) {
8510 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8511 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8512 /* This case handles ldtoken
8513 of an open type, like for
8514 typeof(Gen<>). */
8515 context_used = 0;
8516 } else if (handle_class == mono_defaults.typehandle_class) {
8517 /* If we get a MONO_TYPE_CLASS
8518 then we need to provide the
8519 open type, not an
8520 instantiation of it. */
8521 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8522 context_used = 0;
8523 else
8524 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8525 } else if (handle_class == mono_defaults.fieldhandle_class)
8526 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8527 else if (handle_class == mono_defaults.methodhandle_class)
8528 context_used = mono_method_check_context_used (handle);
8529 else
8530 g_assert_not_reached ();
8533 if ((cfg->opt & MONO_OPT_SHARED) &&
8534 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8535 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8536 MonoInst *addr, *vtvar, *iargs [3];
8537 int method_context_used;
8539 if (cfg->generic_sharing_context)
8540 method_context_used = mono_method_check_context_used (method);
8541 else
8542 method_context_used = 0;
8544 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8546 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8547 EMIT_NEW_ICONST (cfg, iargs [1], n);
8548 if (method_context_used) {
8549 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8550 method, MONO_RGCTX_INFO_METHOD);
8551 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8552 } else {
8553 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8554 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8556 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8558 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8560 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8561 } else {
8562 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8563 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8564 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8565 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8566 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8567 MonoClass *tclass = mono_class_from_mono_type (handle);
8569 mono_class_init (tclass);
8570 if (context_used) {
8571 ins = emit_get_rgctx_klass (cfg, context_used,
8572 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8573 } else if (cfg->compile_aot) {
8574 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8575 } else {
8576 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8578 ins->type = STACK_OBJ;
8579 ins->klass = cmethod->klass;
8580 ip += 5;
8581 } else {
8582 MonoInst *addr, *vtvar;
8584 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8586 if (context_used) {
8587 if (handle_class == mono_defaults.typehandle_class) {
8588 ins = emit_get_rgctx_klass (cfg, context_used,
8589 mono_class_from_mono_type (handle),
8590 MONO_RGCTX_INFO_TYPE);
8591 } else if (handle_class == mono_defaults.methodhandle_class) {
8592 ins = emit_get_rgctx_method (cfg, context_used,
8593 handle, MONO_RGCTX_INFO_METHOD);
8594 } else if (handle_class == mono_defaults.fieldhandle_class) {
8595 ins = emit_get_rgctx_field (cfg, context_used,
8596 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8597 } else {
8598 g_assert_not_reached ();
8600 } else if (cfg->compile_aot) {
8601 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8602 } else {
8603 EMIT_NEW_PCONST (cfg, ins, handle);
8605 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8606 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8607 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8611 *sp++ = ins;
8612 ip += 5;
8613 break;
8615 case CEE_THROW:
8616 CHECK_STACK (1);
8617 MONO_INST_NEW (cfg, ins, OP_THROW);
8618 --sp;
8619 ins->sreg1 = sp [0]->dreg;
8620 ip++;
8621 bblock->out_of_line = TRUE;
8622 MONO_ADD_INS (bblock, ins);
8623 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8624 MONO_ADD_INS (bblock, ins);
8625 sp = stack_start;
8627 link_bblock (cfg, bblock, end_bblock);
8628 start_new_bblock = 1;
8629 break;
8630 case CEE_ENDFINALLY:
8631 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8632 MONO_ADD_INS (bblock, ins);
8633 ip++;
8634 start_new_bblock = 1;
8637 * Control will leave the method so empty the stack, otherwise
8638 * the next basic block will start with a nonempty stack.
8640 while (sp != stack_start) {
8641 sp--;
8643 break;
8644 case CEE_LEAVE:
8645 case CEE_LEAVE_S: {
8646 GList *handlers;
8648 if (*ip == CEE_LEAVE) {
8649 CHECK_OPSIZE (5);
8650 target = ip + 5 + (gint32)read32(ip + 1);
8651 } else {
8652 CHECK_OPSIZE (2);
8653 target = ip + 2 + (signed char)(ip [1]);
8656 /* empty the stack */
8657 while (sp != stack_start) {
8658 sp--;
8662 * If this leave statement is in a catch block, check for a
8663 * pending exception, and rethrow it if necessary.
8665 for (i = 0; i < header->num_clauses; ++i) {
8666 MonoExceptionClause *clause = &header->clauses [i];
8669 * Use <= in the final comparison to handle clauses with multiple
8670 * leave statements, like in bug #78024.
8671 * The ordering of the exception clauses guarantees that we find the
8672 * innermost clause.
8674 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8675 MonoInst *exc_ins;
8676 MonoBasicBlock *dont_throw;
8679 MonoInst *load;
8681 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8684 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8686 NEW_BBLOCK (cfg, dont_throw);
8689 * Currently, we allways rethrow the abort exception, despite the
8690 * fact that this is not correct. See thread6.cs for an example.
8691 * But propagating the abort exception is more important than
8692 * getting the sematics right.
8694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8696 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8698 MONO_START_BB (cfg, dont_throw);
8699 bblock = cfg->cbb;
8703 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8704 GList *tmp;
8705 for (tmp = handlers; tmp; tmp = tmp->next) {
8706 tblock = tmp->data;
8707 link_bblock (cfg, bblock, tblock);
8708 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8709 ins->inst_target_bb = tblock;
8710 MONO_ADD_INS (bblock, ins);
8712 g_list_free (handlers);
8715 MONO_INST_NEW (cfg, ins, OP_BR);
8716 MONO_ADD_INS (bblock, ins);
8717 GET_BBLOCK (cfg, tblock, target);
8718 link_bblock (cfg, bblock, tblock);
8719 ins->inst_target_bb = tblock;
8720 start_new_bblock = 1;
8722 if (*ip == CEE_LEAVE)
8723 ip += 5;
8724 else
8725 ip += 2;
8727 break;
8731 * Mono specific opcodes
8733 case MONO_CUSTOM_PREFIX: {
8735 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8737 CHECK_OPSIZE (2);
8738 switch (ip [1]) {
8739 case CEE_MONO_ICALL: {
8740 gpointer func;
8741 MonoJitICallInfo *info;
8743 token = read32 (ip + 2);
8744 func = mono_method_get_wrapper_data (method, token);
8745 info = mono_find_jit_icall_by_addr (func);
8746 g_assert (info);
8748 CHECK_STACK (info->sig->param_count);
8749 sp -= info->sig->param_count;
8751 ins = mono_emit_jit_icall (cfg, info->func, sp);
8752 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8753 *sp++ = ins;
8755 ip += 6;
8756 inline_costs += 10 * num_calls++;
8758 break;
8760 case CEE_MONO_LDPTR: {
8761 gpointer ptr;
8763 CHECK_STACK_OVF (1);
8764 CHECK_OPSIZE (6);
8765 token = read32 (ip + 2);
8767 ptr = mono_method_get_wrapper_data (method, token);
8768 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8769 MonoJitICallInfo *callinfo;
8770 const char *icall_name;
8772 icall_name = method->name + strlen ("__icall_wrapper_");
8773 g_assert (icall_name);
8774 callinfo = mono_find_jit_icall_by_name (icall_name);
8775 g_assert (callinfo);
8777 if (ptr == callinfo->func) {
8778 /* Will be transformed into an AOTCONST later */
8779 EMIT_NEW_PCONST (cfg, ins, ptr);
8780 *sp++ = ins;
8781 ip += 6;
8782 break;
8785 /* FIXME: Generalize this */
8786 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8787 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8788 *sp++ = ins;
8789 ip += 6;
8790 break;
8792 EMIT_NEW_PCONST (cfg, ins, ptr);
8793 *sp++ = ins;
8794 ip += 6;
8795 inline_costs += 10 * num_calls++;
8796 /* Can't embed random pointers into AOT code */
8797 cfg->disable_aot = 1;
8798 break;
8800 case CEE_MONO_ICALL_ADDR: {
8801 MonoMethod *cmethod;
8802 gpointer ptr;
8804 CHECK_STACK_OVF (1);
8805 CHECK_OPSIZE (6);
8806 token = read32 (ip + 2);
8808 cmethod = mono_method_get_wrapper_data (method, token);
8810 if (cfg->compile_aot) {
8811 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8812 } else {
8813 ptr = mono_lookup_internal_call (cmethod);
8814 g_assert (ptr);
8815 EMIT_NEW_PCONST (cfg, ins, ptr);
8817 *sp++ = ins;
8818 ip += 6;
8819 break;
8821 case CEE_MONO_VTADDR: {
8822 MonoInst *src_var, *src;
8824 CHECK_STACK (1);
8825 --sp;
8827 // FIXME:
8828 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8829 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8830 *sp++ = src;
8831 ip += 2;
8832 break;
8834 case CEE_MONO_NEWOBJ: {
8835 MonoInst *iargs [2];
8837 CHECK_STACK_OVF (1);
8838 CHECK_OPSIZE (6);
8839 token = read32 (ip + 2);
8840 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8841 mono_class_init (klass);
8842 NEW_DOMAINCONST (cfg, iargs [0]);
8843 MONO_ADD_INS (cfg->cbb, iargs [0]);
8844 NEW_CLASSCONST (cfg, iargs [1], klass);
8845 MONO_ADD_INS (cfg->cbb, iargs [1]);
8846 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8847 ip += 6;
8848 inline_costs += 10 * num_calls++;
8849 break;
8851 case CEE_MONO_OBJADDR:
8852 CHECK_STACK (1);
8853 --sp;
8854 MONO_INST_NEW (cfg, ins, OP_MOVE);
8855 ins->dreg = alloc_preg (cfg);
8856 ins->sreg1 = sp [0]->dreg;
8857 ins->type = STACK_MP;
8858 MONO_ADD_INS (cfg->cbb, ins);
8859 *sp++ = ins;
8860 ip += 2;
8861 break;
8862 case CEE_MONO_LDNATIVEOBJ:
8864 * Similar to LDOBJ, but instead load the unmanaged
8865 * representation of the vtype to the stack.
8867 CHECK_STACK (1);
8868 CHECK_OPSIZE (6);
8869 --sp;
8870 token = read32 (ip + 2);
8871 klass = mono_method_get_wrapper_data (method, token);
8872 g_assert (klass->valuetype);
8873 mono_class_init (klass);
8876 MonoInst *src, *dest, *temp;
8878 src = sp [0];
8879 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8880 temp->backend.is_pinvoke = 1;
8881 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8882 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8884 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8885 dest->type = STACK_VTYPE;
8886 dest->klass = klass;
8888 *sp ++ = dest;
8889 ip += 6;
8891 break;
8892 case CEE_MONO_RETOBJ: {
8894 * Same as RET, but return the native representation of a vtype
8895 * to the caller.
8897 g_assert (cfg->ret);
8898 g_assert (mono_method_signature (method)->pinvoke);
8899 CHECK_STACK (1);
8900 --sp;
8902 CHECK_OPSIZE (6);
8903 token = read32 (ip + 2);
8904 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8906 if (!cfg->vret_addr) {
8907 g_assert (cfg->ret_var_is_local);
8909 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8910 } else {
8911 EMIT_NEW_RETLOADA (cfg, ins);
8913 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8915 if (sp != stack_start)
8916 UNVERIFIED;
8918 MONO_INST_NEW (cfg, ins, OP_BR);
8919 ins->inst_target_bb = end_bblock;
8920 MONO_ADD_INS (bblock, ins);
8921 link_bblock (cfg, bblock, end_bblock);
8922 start_new_bblock = 1;
8923 ip += 6;
8924 break;
8926 case CEE_MONO_CISINST:
8927 case CEE_MONO_CCASTCLASS: {
8928 int token;
8929 CHECK_STACK (1);
8930 --sp;
8931 CHECK_OPSIZE (6);
8932 token = read32 (ip + 2);
8933 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8934 if (ip [1] == CEE_MONO_CISINST)
8935 ins = handle_cisinst (cfg, klass, sp [0]);
8936 else
8937 ins = handle_ccastclass (cfg, klass, sp [0]);
8938 bblock = cfg->cbb;
8939 *sp++ = ins;
8940 ip += 6;
8941 break;
8943 case CEE_MONO_SAVE_LMF:
8944 case CEE_MONO_RESTORE_LMF:
8945 #ifdef MONO_ARCH_HAVE_LMF_OPS
8946 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8947 MONO_ADD_INS (bblock, ins);
8948 cfg->need_lmf_area = TRUE;
8949 #endif
8950 ip += 2;
8951 break;
8952 case CEE_MONO_CLASSCONST:
8953 CHECK_STACK_OVF (1);
8954 CHECK_OPSIZE (6);
8955 token = read32 (ip + 2);
8956 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8957 *sp++ = ins;
8958 ip += 6;
8959 inline_costs += 10 * num_calls++;
8960 break;
8961 case CEE_MONO_NOT_TAKEN:
8962 bblock->out_of_line = TRUE;
8963 ip += 2;
8964 break;
8965 case CEE_MONO_TLS:
8966 CHECK_STACK_OVF (1);
8967 CHECK_OPSIZE (6);
8968 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
8969 ins->dreg = alloc_preg (cfg);
8970 ins->inst_offset = (gint32)read32 (ip + 2);
8971 ins->type = STACK_PTR;
8972 MONO_ADD_INS (bblock, ins);
8973 *sp++ = ins;
8974 ip += 6;
8975 break;
8976 default:
8977 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
8978 break;
8980 break;
8983 case CEE_PREFIX1: {
8984 CHECK_OPSIZE (2);
8985 switch (ip [1]) {
8986 case CEE_ARGLIST: {
8987 /* somewhat similar to LDTOKEN */
8988 MonoInst *addr, *vtvar;
8989 CHECK_STACK_OVF (1);
8990 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
8992 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8993 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
8995 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8996 ins->type = STACK_VTYPE;
8997 ins->klass = mono_defaults.argumenthandle_class;
8998 *sp++ = ins;
8999 ip += 2;
9000 break;
9002 case CEE_CEQ:
9003 case CEE_CGT:
9004 case CEE_CGT_UN:
9005 case CEE_CLT:
9006 case CEE_CLT_UN: {
9007 MonoInst *cmp;
9008 CHECK_STACK (2);
9010 * The following transforms:
9011 * CEE_CEQ into OP_CEQ
9012 * CEE_CGT into OP_CGT
9013 * CEE_CGT_UN into OP_CGT_UN
9014 * CEE_CLT into OP_CLT
9015 * CEE_CLT_UN into OP_CLT_UN
9017 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9019 MONO_INST_NEW (cfg, ins, cmp->opcode);
9020 sp -= 2;
9021 cmp->sreg1 = sp [0]->dreg;
9022 cmp->sreg2 = sp [1]->dreg;
9023 type_from_op (cmp, sp [0], sp [1]);
9024 CHECK_TYPE (cmp);
9025 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9026 cmp->opcode = OP_LCOMPARE;
9027 else if (sp [0]->type == STACK_R8)
9028 cmp->opcode = OP_FCOMPARE;
9029 else
9030 cmp->opcode = OP_ICOMPARE;
9031 MONO_ADD_INS (bblock, cmp);
9032 ins->type = STACK_I4;
9033 ins->dreg = alloc_dreg (cfg, ins->type);
9034 type_from_op (ins, sp [0], sp [1]);
9036 if (cmp->opcode == OP_FCOMPARE) {
9038 * The backends expect the fceq opcodes to do the
9039 * comparison too.
9041 cmp->opcode = OP_NOP;
9042 ins->sreg1 = cmp->sreg1;
9043 ins->sreg2 = cmp->sreg2;
9045 MONO_ADD_INS (bblock, ins);
9046 *sp++ = ins;
9047 ip += 2;
9048 break;
9050 case CEE_LDFTN: {
9051 MonoInst *argconst;
9052 MonoMethod *cil_method;
9053 gboolean needs_static_rgctx_invoke;
9055 CHECK_STACK_OVF (1);
9056 CHECK_OPSIZE (6);
9057 n = read32 (ip + 2);
9058 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9059 if (!cmethod)
9060 goto load_error;
9061 mono_class_init (cmethod->klass);
9063 mono_save_token_info (cfg, image, n, cmethod);
9065 if (cfg->generic_sharing_context)
9066 context_used = mono_method_check_context_used (cmethod);
9068 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9070 cil_method = cmethod;
9071 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9072 METHOD_ACCESS_FAILURE;
9074 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9075 if (check_linkdemand (cfg, method, cmethod))
9076 INLINE_FAILURE;
9077 CHECK_CFG_EXCEPTION;
9078 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9079 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9083 * Optimize the common case of ldftn+delegate creation
9085 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9086 /* FIXME: SGEN support */
9087 /* FIXME: handle shared static generic methods */
9088 /* FIXME: handle this in shared code */
9089 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9090 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9091 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9092 MonoInst *target_ins;
9093 MonoMethod *invoke;
9095 invoke = mono_get_delegate_invoke (ctor_method->klass);
9096 if (!invoke || !mono_method_signature (invoke))
9097 goto load_error;
9099 ip += 6;
9100 if (cfg->verbose_level > 3)
9101 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9102 target_ins = sp [-1];
9103 sp --;
9104 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9105 ip += 5;
9106 sp ++;
9107 break;
9110 #endif
9112 if (context_used) {
9113 if (needs_static_rgctx_invoke)
9114 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9116 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9117 } else if (needs_static_rgctx_invoke) {
9118 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9119 } else {
9120 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9122 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9123 *sp++ = ins;
9125 ip += 6;
9126 inline_costs += 10 * num_calls++;
9127 break;
9129 case CEE_LDVIRTFTN: {
9130 MonoInst *args [2];
9132 CHECK_STACK (1);
9133 CHECK_OPSIZE (6);
9134 n = read32 (ip + 2);
9135 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9136 if (!cmethod)
9137 goto load_error;
9138 mono_class_init (cmethod->klass);
9140 if (cfg->generic_sharing_context)
9141 context_used = mono_method_check_context_used (cmethod);
9143 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9144 if (check_linkdemand (cfg, method, cmethod))
9145 INLINE_FAILURE;
9146 CHECK_CFG_EXCEPTION;
9147 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9148 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9151 --sp;
9152 args [0] = *sp;
9154 if (context_used) {
9155 args [1] = emit_get_rgctx_method (cfg, context_used,
9156 cmethod, MONO_RGCTX_INFO_METHOD);
9157 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9158 } else {
9159 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9160 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9163 ip += 6;
9164 inline_costs += 10 * num_calls++;
9165 break;
9167 case CEE_LDARG:
9168 CHECK_STACK_OVF (1);
9169 CHECK_OPSIZE (4);
9170 n = read16 (ip + 2);
9171 CHECK_ARG (n);
9172 EMIT_NEW_ARGLOAD (cfg, ins, n);
9173 *sp++ = ins;
9174 ip += 4;
9175 break;
9176 case CEE_LDARGA:
9177 CHECK_STACK_OVF (1);
9178 CHECK_OPSIZE (4);
9179 n = read16 (ip + 2);
9180 CHECK_ARG (n);
9181 NEW_ARGLOADA (cfg, ins, n);
9182 MONO_ADD_INS (cfg->cbb, ins);
9183 *sp++ = ins;
9184 ip += 4;
9185 break;
9186 case CEE_STARG:
9187 CHECK_STACK (1);
9188 --sp;
9189 CHECK_OPSIZE (4);
9190 n = read16 (ip + 2);
9191 CHECK_ARG (n);
9192 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9193 UNVERIFIED;
9194 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9195 ip += 4;
9196 break;
9197 case CEE_LDLOC:
9198 CHECK_STACK_OVF (1);
9199 CHECK_OPSIZE (4);
9200 n = read16 (ip + 2);
9201 CHECK_LOCAL (n);
9202 EMIT_NEW_LOCLOAD (cfg, ins, n);
9203 *sp++ = ins;
9204 ip += 4;
9205 break;
9206 case CEE_LDLOCA: {
9207 unsigned char *tmp_ip;
9208 CHECK_STACK_OVF (1);
9209 CHECK_OPSIZE (4);
9210 n = read16 (ip + 2);
9211 CHECK_LOCAL (n);
9213 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9214 ip = tmp_ip;
9215 inline_costs += 1;
9216 break;
9219 EMIT_NEW_LOCLOADA (cfg, ins, n);
9220 *sp++ = ins;
9221 ip += 4;
9222 break;
9224 case CEE_STLOC:
9225 CHECK_STACK (1);
9226 --sp;
9227 CHECK_OPSIZE (4);
9228 n = read16 (ip + 2);
9229 CHECK_LOCAL (n);
9230 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9231 UNVERIFIED;
9232 emit_stloc_ir (cfg, sp, header, n);
9233 ip += 4;
9234 inline_costs += 1;
9235 break;
9236 case CEE_LOCALLOC:
9237 CHECK_STACK (1);
9238 --sp;
9239 if (sp != stack_start)
9240 UNVERIFIED;
9241 if (cfg->method != method)
9243 * Inlining this into a loop in a parent could lead to
9244 * stack overflows which is different behavior than the
9245 * non-inlined case, thus disable inlining in this case.
9247 goto inline_failure;
9249 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9250 ins->dreg = alloc_preg (cfg);
9251 ins->sreg1 = sp [0]->dreg;
9252 ins->type = STACK_PTR;
9253 MONO_ADD_INS (cfg->cbb, ins);
9255 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9256 if (header->init_locals)
9257 ins->flags |= MONO_INST_INIT;
9259 *sp++ = ins;
9260 ip += 2;
9261 break;
9262 case CEE_ENDFILTER: {
9263 MonoExceptionClause *clause, *nearest;
9264 int cc, nearest_num;
9266 CHECK_STACK (1);
9267 --sp;
9268 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9269 UNVERIFIED;
9270 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9271 ins->sreg1 = (*sp)->dreg;
9272 MONO_ADD_INS (bblock, ins);
9273 start_new_bblock = 1;
9274 ip += 2;
9276 nearest = NULL;
9277 nearest_num = 0;
9278 for (cc = 0; cc < header->num_clauses; ++cc) {
9279 clause = &header->clauses [cc];
9280 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9281 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9282 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9283 nearest = clause;
9284 nearest_num = cc;
9287 g_assert (nearest);
9288 if ((ip - header->code) != nearest->handler_offset)
9289 UNVERIFIED;
9291 break;
9293 case CEE_UNALIGNED_:
9294 ins_flag |= MONO_INST_UNALIGNED;
9295 /* FIXME: record alignment? we can assume 1 for now */
9296 CHECK_OPSIZE (3);
9297 ip += 3;
9298 break;
9299 case CEE_VOLATILE_:
9300 ins_flag |= MONO_INST_VOLATILE;
9301 ip += 2;
9302 break;
9303 case CEE_TAIL_:
9304 ins_flag |= MONO_INST_TAILCALL;
9305 cfg->flags |= MONO_CFG_HAS_TAIL;
9306 /* Can't inline tail calls at this time */
9307 inline_costs += 100000;
9308 ip += 2;
9309 break;
9310 case CEE_INITOBJ:
9311 CHECK_STACK (1);
9312 --sp;
9313 CHECK_OPSIZE (6);
9314 token = read32 (ip + 2);
9315 klass = mini_get_class (method, token, generic_context);
9316 CHECK_TYPELOAD (klass);
9317 if (generic_class_is_reference_type (cfg, klass))
9318 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9319 else
9320 mini_emit_initobj (cfg, *sp, NULL, klass);
9321 ip += 6;
9322 inline_costs += 1;
9323 break;
9324 case CEE_CONSTRAINED_:
9325 CHECK_OPSIZE (6);
9326 token = read32 (ip + 2);
9327 constrained_call = mono_class_get_full (image, token, generic_context);
9328 CHECK_TYPELOAD (constrained_call);
9329 ip += 6;
9330 break;
9331 case CEE_CPBLK:
9332 case CEE_INITBLK: {
9333 MonoInst *iargs [3];
9334 CHECK_STACK (3);
9335 sp -= 3;
9337 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9338 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9339 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9340 /* emit_memset only works when val == 0 */
9341 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9342 } else {
9343 iargs [0] = sp [0];
9344 iargs [1] = sp [1];
9345 iargs [2] = sp [2];
9346 if (ip [1] == CEE_CPBLK) {
9347 MonoMethod *memcpy_method = get_memcpy_method ();
9348 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9349 } else {
9350 MonoMethod *memset_method = get_memset_method ();
9351 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9354 ip += 2;
9355 inline_costs += 1;
9356 break;
9358 case CEE_NO_:
9359 CHECK_OPSIZE (3);
9360 if (ip [2] & 0x1)
9361 ins_flag |= MONO_INST_NOTYPECHECK;
9362 if (ip [2] & 0x2)
9363 ins_flag |= MONO_INST_NORANGECHECK;
9364 /* we ignore the no-nullcheck for now since we
9365 * really do it explicitly only when doing callvirt->call
9367 ip += 3;
9368 break;
9369 case CEE_RETHROW: {
9370 MonoInst *load;
9371 int handler_offset = -1;
9373 for (i = 0; i < header->num_clauses; ++i) {
9374 MonoExceptionClause *clause = &header->clauses [i];
9375 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9376 handler_offset = clause->handler_offset;
9377 break;
9381 bblock->flags |= BB_EXCEPTION_UNSAFE;
9383 g_assert (handler_offset != -1);
9385 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9386 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9387 ins->sreg1 = load->dreg;
9388 MONO_ADD_INS (bblock, ins);
9389 sp = stack_start;
9390 link_bblock (cfg, bblock, end_bblock);
9391 start_new_bblock = 1;
9392 ip += 2;
9393 break;
9395 case CEE_SIZEOF: {
9396 guint32 align;
9397 int ialign;
9399 CHECK_STACK_OVF (1);
9400 CHECK_OPSIZE (6);
9401 token = read32 (ip + 2);
9402 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9403 MonoType *type = mono_type_create_from_typespec (image, token);
9404 token = mono_type_size (type, &ialign);
9405 } else {
9406 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9407 CHECK_TYPELOAD (klass);
9408 mono_class_init (klass);
9409 token = mono_class_value_size (klass, &align);
9411 EMIT_NEW_ICONST (cfg, ins, token);
9412 *sp++= ins;
9413 ip += 6;
9414 break;
9416 case CEE_REFANYTYPE: {
9417 MonoInst *src_var, *src;
9419 CHECK_STACK (1);
9420 --sp;
9422 // FIXME:
9423 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9424 if (!src_var)
9425 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9426 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9427 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9428 *sp++ = ins;
9429 ip += 2;
9430 break;
9432 case CEE_READONLY_:
9433 readonly = TRUE;
9434 ip += 2;
9435 break;
9436 default:
9437 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9439 break;
9441 default:
9442 g_error ("opcode 0x%02x not handled", *ip);
9445 if (start_new_bblock != 1)
9446 UNVERIFIED;
9448 bblock->cil_length = ip - bblock->cil_code;
9449 bblock->next_bb = end_bblock;
9451 if (cfg->method == method && cfg->domainvar) {
9452 MonoInst *store;
9453 MonoInst *get_domain;
9455 cfg->cbb = init_localsbb;
9457 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9458 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9460 else {
9461 get_domain->dreg = alloc_preg (cfg);
9462 MONO_ADD_INS (cfg->cbb, get_domain);
9464 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9465 MONO_ADD_INS (cfg->cbb, store);
9468 if (cfg->method == method && cfg->got_var)
9469 mono_emit_load_got_addr (cfg);
9471 if (header->init_locals) {
9472 MonoInst *store;
9474 cfg->cbb = init_localsbb;
9475 cfg->ip = NULL;
9476 for (i = 0; i < header->num_locals; ++i) {
9477 MonoType *ptype = header->locals [i];
9478 int t = ptype->type;
9479 dreg = cfg->locals [i]->dreg;
9481 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9482 t = mono_class_enum_basetype (ptype->data.klass)->type;
9483 if (ptype->byref) {
9484 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9485 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9486 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9487 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9488 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9489 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9490 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9491 ins->type = STACK_R8;
9492 ins->inst_p0 = (void*)&r8_0;
9493 ins->dreg = alloc_dreg (cfg, STACK_R8);
9494 MONO_ADD_INS (init_localsbb, ins);
9495 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9496 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9497 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9498 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9499 } else {
9500 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9505 cfg->ip = NULL;
9507 if (cfg->method == method) {
9508 MonoBasicBlock *bb;
9509 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9510 bb->region = mono_find_block_region (cfg, bb->real_offset);
9511 if (cfg->spvars)
9512 mono_create_spvar_for_region (cfg, bb->region);
9513 if (cfg->verbose_level > 2)
9514 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9518 g_slist_free (class_inits);
9519 dont_inline = g_list_remove (dont_inline, method);
9521 if (inline_costs < 0) {
9522 char *mname;
9524 /* Method is too large */
9525 mname = mono_method_full_name (method, TRUE);
9526 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9527 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9528 g_free (mname);
9529 return -1;
9532 if ((cfg->verbose_level > 2) && (cfg->method == method))
9533 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9535 return inline_costs;
9537 exception_exit:
9538 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9539 g_slist_free (class_inits);
9540 dont_inline = g_list_remove (dont_inline, method);
9541 return -1;
9543 inline_failure:
9544 g_slist_free (class_inits);
9545 dont_inline = g_list_remove (dont_inline, method);
9546 return -1;
9548 load_error:
9549 g_slist_free (class_inits);
9550 dont_inline = g_list_remove (dont_inline, method);
9551 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9552 return -1;
9554 unverified:
9555 g_slist_free (class_inits);
9556 dont_inline = g_list_remove (dont_inline, method);
9557 set_exception_type_from_invalid_il (cfg, method, ip);
9558 return -1;
9561 static int
9562 store_membase_reg_to_store_membase_imm (int opcode)
9564 switch (opcode) {
9565 case OP_STORE_MEMBASE_REG:
9566 return OP_STORE_MEMBASE_IMM;
9567 case OP_STOREI1_MEMBASE_REG:
9568 return OP_STOREI1_MEMBASE_IMM;
9569 case OP_STOREI2_MEMBASE_REG:
9570 return OP_STOREI2_MEMBASE_IMM;
9571 case OP_STOREI4_MEMBASE_REG:
9572 return OP_STOREI4_MEMBASE_IMM;
9573 case OP_STOREI8_MEMBASE_REG:
9574 return OP_STOREI8_MEMBASE_IMM;
9575 default:
9576 g_assert_not_reached ();
9579 return -1;
9582 #endif /* DISABLE_JIT */
9585 mono_op_to_op_imm (int opcode)
9587 switch (opcode) {
9588 case OP_IADD:
9589 return OP_IADD_IMM;
9590 case OP_ISUB:
9591 return OP_ISUB_IMM;
9592 case OP_IDIV:
9593 return OP_IDIV_IMM;
9594 case OP_IDIV_UN:
9595 return OP_IDIV_UN_IMM;
9596 case OP_IREM:
9597 return OP_IREM_IMM;
9598 case OP_IREM_UN:
9599 return OP_IREM_UN_IMM;
9600 case OP_IMUL:
9601 return OP_IMUL_IMM;
9602 case OP_IAND:
9603 return OP_IAND_IMM;
9604 case OP_IOR:
9605 return OP_IOR_IMM;
9606 case OP_IXOR:
9607 return OP_IXOR_IMM;
9608 case OP_ISHL:
9609 return OP_ISHL_IMM;
9610 case OP_ISHR:
9611 return OP_ISHR_IMM;
9612 case OP_ISHR_UN:
9613 return OP_ISHR_UN_IMM;
9615 case OP_LADD:
9616 return OP_LADD_IMM;
9617 case OP_LSUB:
9618 return OP_LSUB_IMM;
9619 case OP_LAND:
9620 return OP_LAND_IMM;
9621 case OP_LOR:
9622 return OP_LOR_IMM;
9623 case OP_LXOR:
9624 return OP_LXOR_IMM;
9625 case OP_LSHL:
9626 return OP_LSHL_IMM;
9627 case OP_LSHR:
9628 return OP_LSHR_IMM;
9629 case OP_LSHR_UN:
9630 return OP_LSHR_UN_IMM;
9632 case OP_COMPARE:
9633 return OP_COMPARE_IMM;
9634 case OP_ICOMPARE:
9635 return OP_ICOMPARE_IMM;
9636 case OP_LCOMPARE:
9637 return OP_LCOMPARE_IMM;
9639 case OP_STORE_MEMBASE_REG:
9640 return OP_STORE_MEMBASE_IMM;
9641 case OP_STOREI1_MEMBASE_REG:
9642 return OP_STOREI1_MEMBASE_IMM;
9643 case OP_STOREI2_MEMBASE_REG:
9644 return OP_STOREI2_MEMBASE_IMM;
9645 case OP_STOREI4_MEMBASE_REG:
9646 return OP_STOREI4_MEMBASE_IMM;
9648 #if defined(__i386__) || defined (__x86_64__)
9649 case OP_X86_PUSH:
9650 return OP_X86_PUSH_IMM;
9651 case OP_X86_COMPARE_MEMBASE_REG:
9652 return OP_X86_COMPARE_MEMBASE_IMM;
9653 #endif
9654 #if defined(__x86_64__)
9655 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9656 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9657 #endif
9658 case OP_VOIDCALL_REG:
9659 return OP_VOIDCALL;
9660 case OP_CALL_REG:
9661 return OP_CALL;
9662 case OP_LCALL_REG:
9663 return OP_LCALL;
9664 case OP_FCALL_REG:
9665 return OP_FCALL;
9666 case OP_LOCALLOC:
9667 return OP_LOCALLOC_IMM;
9670 return -1;
9673 static int
9674 ldind_to_load_membase (int opcode)
9676 switch (opcode) {
9677 case CEE_LDIND_I1:
9678 return OP_LOADI1_MEMBASE;
9679 case CEE_LDIND_U1:
9680 return OP_LOADU1_MEMBASE;
9681 case CEE_LDIND_I2:
9682 return OP_LOADI2_MEMBASE;
9683 case CEE_LDIND_U2:
9684 return OP_LOADU2_MEMBASE;
9685 case CEE_LDIND_I4:
9686 return OP_LOADI4_MEMBASE;
9687 case CEE_LDIND_U4:
9688 return OP_LOADU4_MEMBASE;
9689 case CEE_LDIND_I:
9690 return OP_LOAD_MEMBASE;
9691 case CEE_LDIND_REF:
9692 return OP_LOAD_MEMBASE;
9693 case CEE_LDIND_I8:
9694 return OP_LOADI8_MEMBASE;
9695 case CEE_LDIND_R4:
9696 return OP_LOADR4_MEMBASE;
9697 case CEE_LDIND_R8:
9698 return OP_LOADR8_MEMBASE;
9699 default:
9700 g_assert_not_reached ();
9703 return -1;
9706 static int
9707 stind_to_store_membase (int opcode)
9709 switch (opcode) {
9710 case CEE_STIND_I1:
9711 return OP_STOREI1_MEMBASE_REG;
9712 case CEE_STIND_I2:
9713 return OP_STOREI2_MEMBASE_REG;
9714 case CEE_STIND_I4:
9715 return OP_STOREI4_MEMBASE_REG;
9716 case CEE_STIND_I:
9717 case CEE_STIND_REF:
9718 return OP_STORE_MEMBASE_REG;
9719 case CEE_STIND_I8:
9720 return OP_STOREI8_MEMBASE_REG;
9721 case CEE_STIND_R4:
9722 return OP_STORER4_MEMBASE_REG;
9723 case CEE_STIND_R8:
9724 return OP_STORER8_MEMBASE_REG;
9725 default:
9726 g_assert_not_reached ();
9729 return -1;
9733 mono_load_membase_to_load_mem (int opcode)
9735 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9736 #if defined(__i386__) || defined(__x86_64__)
9737 switch (opcode) {
9738 case OP_LOAD_MEMBASE:
9739 return OP_LOAD_MEM;
9740 case OP_LOADU1_MEMBASE:
9741 return OP_LOADU1_MEM;
9742 case OP_LOADU2_MEMBASE:
9743 return OP_LOADU2_MEM;
9744 case OP_LOADI4_MEMBASE:
9745 return OP_LOADI4_MEM;
9746 case OP_LOADU4_MEMBASE:
9747 return OP_LOADU4_MEM;
9748 #if SIZEOF_REGISTER == 8
9749 case OP_LOADI8_MEMBASE:
9750 return OP_LOADI8_MEM;
9751 #endif
9753 #endif
9755 return -1;
9758 static inline int
9759 op_to_op_dest_membase (int store_opcode, int opcode)
9761 #if defined(__i386__)
9762 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9763 return -1;
9765 switch (opcode) {
9766 case OP_IADD:
9767 return OP_X86_ADD_MEMBASE_REG;
9768 case OP_ISUB:
9769 return OP_X86_SUB_MEMBASE_REG;
9770 case OP_IAND:
9771 return OP_X86_AND_MEMBASE_REG;
9772 case OP_IOR:
9773 return OP_X86_OR_MEMBASE_REG;
9774 case OP_IXOR:
9775 return OP_X86_XOR_MEMBASE_REG;
9776 case OP_ADD_IMM:
9777 case OP_IADD_IMM:
9778 return OP_X86_ADD_MEMBASE_IMM;
9779 case OP_SUB_IMM:
9780 case OP_ISUB_IMM:
9781 return OP_X86_SUB_MEMBASE_IMM;
9782 case OP_AND_IMM:
9783 case OP_IAND_IMM:
9784 return OP_X86_AND_MEMBASE_IMM;
9785 case OP_OR_IMM:
9786 case OP_IOR_IMM:
9787 return OP_X86_OR_MEMBASE_IMM;
9788 case OP_XOR_IMM:
9789 case OP_IXOR_IMM:
9790 return OP_X86_XOR_MEMBASE_IMM;
9791 case OP_MOVE:
9792 return OP_NOP;
9794 #endif
9796 #if defined(__x86_64__)
9797 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9798 return -1;
9800 switch (opcode) {
9801 case OP_IADD:
9802 return OP_X86_ADD_MEMBASE_REG;
9803 case OP_ISUB:
9804 return OP_X86_SUB_MEMBASE_REG;
9805 case OP_IAND:
9806 return OP_X86_AND_MEMBASE_REG;
9807 case OP_IOR:
9808 return OP_X86_OR_MEMBASE_REG;
9809 case OP_IXOR:
9810 return OP_X86_XOR_MEMBASE_REG;
9811 case OP_IADD_IMM:
9812 return OP_X86_ADD_MEMBASE_IMM;
9813 case OP_ISUB_IMM:
9814 return OP_X86_SUB_MEMBASE_IMM;
9815 case OP_IAND_IMM:
9816 return OP_X86_AND_MEMBASE_IMM;
9817 case OP_IOR_IMM:
9818 return OP_X86_OR_MEMBASE_IMM;
9819 case OP_IXOR_IMM:
9820 return OP_X86_XOR_MEMBASE_IMM;
9821 case OP_LADD:
9822 return OP_AMD64_ADD_MEMBASE_REG;
9823 case OP_LSUB:
9824 return OP_AMD64_SUB_MEMBASE_REG;
9825 case OP_LAND:
9826 return OP_AMD64_AND_MEMBASE_REG;
9827 case OP_LOR:
9828 return OP_AMD64_OR_MEMBASE_REG;
9829 case OP_LXOR:
9830 return OP_AMD64_XOR_MEMBASE_REG;
9831 case OP_ADD_IMM:
9832 case OP_LADD_IMM:
9833 return OP_AMD64_ADD_MEMBASE_IMM;
9834 case OP_SUB_IMM:
9835 case OP_LSUB_IMM:
9836 return OP_AMD64_SUB_MEMBASE_IMM;
9837 case OP_AND_IMM:
9838 case OP_LAND_IMM:
9839 return OP_AMD64_AND_MEMBASE_IMM;
9840 case OP_OR_IMM:
9841 case OP_LOR_IMM:
9842 return OP_AMD64_OR_MEMBASE_IMM;
9843 case OP_XOR_IMM:
9844 case OP_LXOR_IMM:
9845 return OP_AMD64_XOR_MEMBASE_IMM;
9846 case OP_MOVE:
9847 return OP_NOP;
9849 #endif
9851 return -1;
9854 static inline int
9855 op_to_op_store_membase (int store_opcode, int opcode)
9857 #if defined(__i386__) || defined(__x86_64__)
9858 switch (opcode) {
9859 case OP_ICEQ:
9860 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9861 return OP_X86_SETEQ_MEMBASE;
9862 case OP_CNE:
9863 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9864 return OP_X86_SETNE_MEMBASE;
9866 #endif
9868 return -1;
9871 static inline int
9872 op_to_op_src1_membase (int load_opcode, int opcode)
9874 #ifdef __i386__
9875 /* FIXME: This has sign extension issues */
9877 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9878 return OP_X86_COMPARE_MEMBASE8_IMM;
9881 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9882 return -1;
9884 switch (opcode) {
9885 case OP_X86_PUSH:
9886 return OP_X86_PUSH_MEMBASE;
9887 case OP_COMPARE_IMM:
9888 case OP_ICOMPARE_IMM:
9889 return OP_X86_COMPARE_MEMBASE_IMM;
9890 case OP_COMPARE:
9891 case OP_ICOMPARE:
9892 return OP_X86_COMPARE_MEMBASE_REG;
9894 #endif
9896 #ifdef __x86_64__
9897 /* FIXME: This has sign extension issues */
9899 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9900 return OP_X86_COMPARE_MEMBASE8_IMM;
9903 switch (opcode) {
9904 case OP_X86_PUSH:
9905 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9906 return OP_X86_PUSH_MEMBASE;
9907 break;
9908 /* FIXME: This only works for 32 bit immediates
9909 case OP_COMPARE_IMM:
9910 case OP_LCOMPARE_IMM:
9911 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9912 return OP_AMD64_COMPARE_MEMBASE_IMM;
9914 case OP_ICOMPARE_IMM:
9915 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9916 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9917 break;
9918 case OP_COMPARE:
9919 case OP_LCOMPARE:
9920 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9921 return OP_AMD64_COMPARE_MEMBASE_REG;
9922 break;
9923 case OP_ICOMPARE:
9924 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9925 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9926 break;
9928 #endif
9930 return -1;
9933 static inline int
9934 op_to_op_src2_membase (int load_opcode, int opcode)
9936 #ifdef __i386__
9937 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9938 return -1;
9940 switch (opcode) {
9941 case OP_COMPARE:
9942 case OP_ICOMPARE:
9943 return OP_X86_COMPARE_REG_MEMBASE;
9944 case OP_IADD:
9945 return OP_X86_ADD_REG_MEMBASE;
9946 case OP_ISUB:
9947 return OP_X86_SUB_REG_MEMBASE;
9948 case OP_IAND:
9949 return OP_X86_AND_REG_MEMBASE;
9950 case OP_IOR:
9951 return OP_X86_OR_REG_MEMBASE;
9952 case OP_IXOR:
9953 return OP_X86_XOR_REG_MEMBASE;
9955 #endif
9957 #ifdef __x86_64__
9958 switch (opcode) {
9959 case OP_ICOMPARE:
9960 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9961 return OP_AMD64_ICOMPARE_REG_MEMBASE;
9962 break;
9963 case OP_COMPARE:
9964 case OP_LCOMPARE:
9965 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9966 return OP_AMD64_COMPARE_REG_MEMBASE;
9967 break;
9968 case OP_IADD:
9969 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9970 return OP_X86_ADD_REG_MEMBASE;
9971 case OP_ISUB:
9972 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9973 return OP_X86_SUB_REG_MEMBASE;
9974 case OP_IAND:
9975 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9976 return OP_X86_AND_REG_MEMBASE;
9977 case OP_IOR:
9978 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9979 return OP_X86_OR_REG_MEMBASE;
9980 case OP_IXOR:
9981 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9982 return OP_X86_XOR_REG_MEMBASE;
9983 case OP_LADD:
9984 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9985 return OP_AMD64_ADD_REG_MEMBASE;
9986 case OP_LSUB:
9987 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9988 return OP_AMD64_SUB_REG_MEMBASE;
9989 case OP_LAND:
9990 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9991 return OP_AMD64_AND_REG_MEMBASE;
9992 case OP_LOR:
9993 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9994 return OP_AMD64_OR_REG_MEMBASE;
9995 case OP_LXOR:
9996 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
9997 return OP_AMD64_XOR_REG_MEMBASE;
9999 #endif
10001 return -1;
10005 mono_op_to_op_imm_noemul (int opcode)
10007 switch (opcode) {
10008 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10009 case OP_LSHR:
10010 case OP_LSHL:
10011 case OP_LSHR_UN:
10012 #endif
10013 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10014 case OP_IDIV:
10015 case OP_IDIV_UN:
10016 case OP_IREM:
10017 case OP_IREM_UN:
10018 #endif
10019 return -1;
10020 default:
10021 return mono_op_to_op_imm (opcode);
10025 #ifndef DISABLE_JIT
10028 * mono_handle_global_vregs:
10030 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10031 * for them.
10033 void
10034 mono_handle_global_vregs (MonoCompile *cfg)
10036 gint32 *vreg_to_bb;
10037 MonoBasicBlock *bb;
10038 int i, pos;
10040 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10042 #ifdef MONO_ARCH_SIMD_INTRINSICS
10043 if (cfg->uses_simd_intrinsics)
10044 mono_simd_simplify_indirection (cfg);
10045 #endif
10047 /* Find local vregs used in more than one bb */
10048 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10049 MonoInst *ins = bb->code;
10050 int block_num = bb->block_num;
10052 if (cfg->verbose_level > 2)
10053 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10055 cfg->cbb = bb;
10056 for (; ins; ins = ins->next) {
10057 const char *spec = INS_INFO (ins->opcode);
10058 int regtype, regindex;
10059 gint32 prev_bb;
10061 if (G_UNLIKELY (cfg->verbose_level > 2))
10062 mono_print_ins (ins);
10064 g_assert (ins->opcode >= MONO_CEE_LAST);
10066 for (regindex = 0; regindex < 3; regindex ++) {
10067 int vreg;
10069 if (regindex == 0) {
10070 regtype = spec [MONO_INST_DEST];
10071 if (regtype == ' ')
10072 continue;
10073 vreg = ins->dreg;
10074 } else if (regindex == 1) {
10075 regtype = spec [MONO_INST_SRC1];
10076 if (regtype == ' ')
10077 continue;
10078 vreg = ins->sreg1;
10079 } else {
10080 regtype = spec [MONO_INST_SRC2];
10081 if (regtype == ' ')
10082 continue;
10083 vreg = ins->sreg2;
10086 #if SIZEOF_REGISTER == 4
10087 if (regtype == 'l') {
10089 * Since some instructions reference the original long vreg,
10090 * and some reference the two component vregs, it is quite hard
10091 * to determine when it needs to be global. So be conservative.
10093 if (!get_vreg_to_inst (cfg, vreg)) {
10094 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10096 if (cfg->verbose_level > 2)
10097 printf ("LONG VREG R%d made global.\n", vreg);
10101 * Make the component vregs volatile since the optimizations can
10102 * get confused otherwise.
10104 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10105 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10107 #endif
10109 g_assert (vreg != -1);
10111 prev_bb = vreg_to_bb [vreg];
10112 if (prev_bb == 0) {
10113 /* 0 is a valid block num */
10114 vreg_to_bb [vreg] = block_num + 1;
10115 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10116 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10117 continue;
10119 if (!get_vreg_to_inst (cfg, vreg)) {
10120 if (G_UNLIKELY (cfg->verbose_level > 2))
10121 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10123 switch (regtype) {
10124 case 'i':
10125 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10126 break;
10127 case 'f':
10128 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10129 break;
10130 case 'v':
10131 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10132 break;
10133 default:
10134 g_assert_not_reached ();
10138 /* Flag as having been used in more than one bb */
10139 vreg_to_bb [vreg] = -1;
10145 /* If a variable is used in only one bblock, convert it into a local vreg */
10146 for (i = 0; i < cfg->num_varinfo; i++) {
10147 MonoInst *var = cfg->varinfo [i];
10148 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10150 switch (var->type) {
10151 case STACK_I4:
10152 case STACK_OBJ:
10153 case STACK_PTR:
10154 case STACK_MP:
10155 case STACK_VTYPE:
10156 #if SIZEOF_REGISTER == 8
10157 case STACK_I8:
10158 #endif
10159 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10160 /* Enabling this screws up the fp stack on x86 */
10161 case STACK_R8:
10162 #endif
10163 /* Arguments are implicitly global */
10164 /* Putting R4 vars into registers doesn't work currently */
10165 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10167 * Make that the variable's liveness interval doesn't contain a call, since
10168 * that would cause the lvreg to be spilled, making the whole optimization
10169 * useless.
10171 /* This is too slow for JIT compilation */
10172 #if 0
10173 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10174 MonoInst *ins;
10175 int def_index, call_index, ins_index;
10176 gboolean spilled = FALSE;
10178 def_index = -1;
10179 call_index = -1;
10180 ins_index = 0;
10181 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10182 const char *spec = INS_INFO (ins->opcode);
10184 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10185 def_index = ins_index;
10187 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10188 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10189 if (call_index > def_index) {
10190 spilled = TRUE;
10191 break;
10195 if (MONO_IS_CALL (ins))
10196 call_index = ins_index;
10198 ins_index ++;
10201 if (spilled)
10202 break;
10204 #endif
10206 if (G_UNLIKELY (cfg->verbose_level > 2))
10207 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10208 var->flags |= MONO_INST_IS_DEAD;
10209 cfg->vreg_to_inst [var->dreg] = NULL;
10211 break;
10216 * Compress the varinfo and vars tables so the liveness computation is faster and
10217 * takes up less space.
10219 pos = 0;
10220 for (i = 0; i < cfg->num_varinfo; ++i) {
10221 MonoInst *var = cfg->varinfo [i];
10222 if (pos < i && cfg->locals_start == i)
10223 cfg->locals_start = pos;
10224 if (!(var->flags & MONO_INST_IS_DEAD)) {
10225 if (pos < i) {
10226 cfg->varinfo [pos] = cfg->varinfo [i];
10227 cfg->varinfo [pos]->inst_c0 = pos;
10228 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10229 cfg->vars [pos].idx = pos;
10230 #if SIZEOF_REGISTER == 4
10231 if (cfg->varinfo [pos]->type == STACK_I8) {
10232 /* Modify the two component vars too */
10233 MonoInst *var1;
10235 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10236 var1->inst_c0 = pos;
10237 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10238 var1->inst_c0 = pos;
10240 #endif
10242 pos ++;
10245 cfg->num_varinfo = pos;
10246 if (cfg->locals_start > cfg->num_varinfo)
10247 cfg->locals_start = cfg->num_varinfo;
10251 * mono_spill_global_vars:
10253 * Generate spill code for variables which are not allocated to registers,
10254 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10255 * code is generated which could be optimized by the local optimization passes.
10257 void
10258 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10260 MonoBasicBlock *bb;
10261 char spec2 [16];
10262 int orig_next_vreg;
10263 guint32 *vreg_to_lvreg;
10264 guint32 *lvregs;
10265 guint32 i, lvregs_len;
10266 gboolean dest_has_lvreg = FALSE;
10267 guint32 stacktypes [128];
10268 MonoInst **live_range_start, **live_range_end;
10269 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10271 *need_local_opts = FALSE;
10273 memset (spec2, 0, sizeof (spec2));
10275 /* FIXME: Move this function to mini.c */
10276 stacktypes ['i'] = STACK_PTR;
10277 stacktypes ['l'] = STACK_I8;
10278 stacktypes ['f'] = STACK_R8;
10279 #ifdef MONO_ARCH_SIMD_INTRINSICS
10280 stacktypes ['x'] = STACK_VTYPE;
10281 #endif
10283 #if SIZEOF_REGISTER == 4
10284 /* Create MonoInsts for longs */
10285 for (i = 0; i < cfg->num_varinfo; i++) {
10286 MonoInst *ins = cfg->varinfo [i];
10288 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10289 switch (ins->type) {
10290 #ifdef MONO_ARCH_SOFT_FLOAT
10291 case STACK_R8:
10292 #endif
10293 case STACK_I8: {
10294 MonoInst *tree;
10296 g_assert (ins->opcode == OP_REGOFFSET);
10298 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10299 g_assert (tree);
10300 tree->opcode = OP_REGOFFSET;
10301 tree->inst_basereg = ins->inst_basereg;
10302 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10304 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10305 g_assert (tree);
10306 tree->opcode = OP_REGOFFSET;
10307 tree->inst_basereg = ins->inst_basereg;
10308 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10309 break;
10311 default:
10312 break;
10316 #endif
10318 /* FIXME: widening and truncation */
10321 * As an optimization, when a variable allocated to the stack is first loaded into
10322 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10323 * the variable again.
10325 orig_next_vreg = cfg->next_vreg;
10326 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10327 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10328 lvregs_len = 0;
10331 * These arrays contain the first and last instructions accessing a given
10332 * variable.
10333 * Since we emit bblocks in the same order we process them here, and we
10334 * don't split live ranges, these will precisely describe the live range of
10335 * the variable, i.e. the instruction range where a valid value can be found
10336 * in the variables location.
10338 /* FIXME: Only do this if debugging info is requested */
10339 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10340 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10341 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10342 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10344 /* Add spill loads/stores */
10345 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10346 MonoInst *ins;
10348 if (cfg->verbose_level > 2)
10349 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10351 /* Clear vreg_to_lvreg array */
10352 for (i = 0; i < lvregs_len; i++)
10353 vreg_to_lvreg [lvregs [i]] = 0;
10354 lvregs_len = 0;
10356 cfg->cbb = bb;
10357 MONO_BB_FOR_EACH_INS (bb, ins) {
10358 const char *spec = INS_INFO (ins->opcode);
10359 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10360 gboolean store, no_lvreg;
10362 if (G_UNLIKELY (cfg->verbose_level > 2))
10363 mono_print_ins (ins);
10365 if (ins->opcode == OP_NOP)
10366 continue;
10369 * We handle LDADDR here as well, since it can only be decomposed
10370 * when variable addresses are known.
10372 if (ins->opcode == OP_LDADDR) {
10373 MonoInst *var = ins->inst_p0;
10375 if (var->opcode == OP_VTARG_ADDR) {
10376 /* Happens on SPARC/S390 where vtypes are passed by reference */
10377 MonoInst *vtaddr = var->inst_left;
10378 if (vtaddr->opcode == OP_REGVAR) {
10379 ins->opcode = OP_MOVE;
10380 ins->sreg1 = vtaddr->dreg;
10382 else if (var->inst_left->opcode == OP_REGOFFSET) {
10383 ins->opcode = OP_LOAD_MEMBASE;
10384 ins->inst_basereg = vtaddr->inst_basereg;
10385 ins->inst_offset = vtaddr->inst_offset;
10386 } else
10387 NOT_IMPLEMENTED;
10388 } else {
10389 g_assert (var->opcode == OP_REGOFFSET);
10391 ins->opcode = OP_ADD_IMM;
10392 ins->sreg1 = var->inst_basereg;
10393 ins->inst_imm = var->inst_offset;
10396 *need_local_opts = TRUE;
10397 spec = INS_INFO (ins->opcode);
10400 if (ins->opcode < MONO_CEE_LAST) {
10401 mono_print_ins (ins);
10402 g_assert_not_reached ();
10406 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10407 * src register.
10408 * FIXME:
10410 if (MONO_IS_STORE_MEMBASE (ins)) {
10411 tmp_reg = ins->dreg;
10412 ins->dreg = ins->sreg2;
10413 ins->sreg2 = tmp_reg;
10414 store = TRUE;
10416 spec2 [MONO_INST_DEST] = ' ';
10417 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10418 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10419 spec = spec2;
10420 } else if (MONO_IS_STORE_MEMINDEX (ins))
10421 g_assert_not_reached ();
10422 else
10423 store = FALSE;
10424 no_lvreg = FALSE;
10426 if (G_UNLIKELY (cfg->verbose_level > 2))
10427 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10429 /***************/
10430 /* DREG */
10431 /***************/
10432 regtype = spec [MONO_INST_DEST];
10433 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10434 prev_dreg = -1;
10436 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10437 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10438 MonoInst *store_ins;
10439 int store_opcode;
10440 MonoInst *def_ins = ins;
10441 int dreg = ins->dreg; /* The original vreg */
10443 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10445 if (var->opcode == OP_REGVAR) {
10446 ins->dreg = var->dreg;
10447 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10449 * Instead of emitting a load+store, use a _membase opcode.
10451 g_assert (var->opcode == OP_REGOFFSET);
10452 if (ins->opcode == OP_MOVE) {
10453 NULLIFY_INS (ins);
10454 def_ins = NULL;
10455 } else {
10456 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10457 ins->inst_basereg = var->inst_basereg;
10458 ins->inst_offset = var->inst_offset;
10459 ins->dreg = -1;
10461 spec = INS_INFO (ins->opcode);
10462 } else {
10463 guint32 lvreg;
10465 g_assert (var->opcode == OP_REGOFFSET);
10467 prev_dreg = ins->dreg;
10469 /* Invalidate any previous lvreg for this vreg */
10470 vreg_to_lvreg [ins->dreg] = 0;
10472 lvreg = 0;
10474 #ifdef MONO_ARCH_SOFT_FLOAT
10475 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10476 regtype = 'l';
10477 store_opcode = OP_STOREI8_MEMBASE_REG;
10479 #endif
10481 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10483 if (regtype == 'l') {
10484 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10485 mono_bblock_insert_after_ins (bb, ins, store_ins);
10486 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10487 mono_bblock_insert_after_ins (bb, ins, store_ins);
10488 def_ins = store_ins;
10490 else {
10491 g_assert (store_opcode != OP_STOREV_MEMBASE);
10493 /* Try to fuse the store into the instruction itself */
10494 /* FIXME: Add more instructions */
10495 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10496 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10497 ins->inst_imm = ins->inst_c0;
10498 ins->inst_destbasereg = var->inst_basereg;
10499 ins->inst_offset = var->inst_offset;
10500 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10501 ins->opcode = store_opcode;
10502 ins->inst_destbasereg = var->inst_basereg;
10503 ins->inst_offset = var->inst_offset;
10505 no_lvreg = TRUE;
10507 tmp_reg = ins->dreg;
10508 ins->dreg = ins->sreg2;
10509 ins->sreg2 = tmp_reg;
10510 store = TRUE;
10512 spec2 [MONO_INST_DEST] = ' ';
10513 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10514 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10515 spec = spec2;
10516 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10517 // FIXME: The backends expect the base reg to be in inst_basereg
10518 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10519 ins->dreg = -1;
10520 ins->inst_basereg = var->inst_basereg;
10521 ins->inst_offset = var->inst_offset;
10522 spec = INS_INFO (ins->opcode);
10523 } else {
10524 /* printf ("INS: "); mono_print_ins (ins); */
10525 /* Create a store instruction */
10526 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10528 /* Insert it after the instruction */
10529 mono_bblock_insert_after_ins (bb, ins, store_ins);
10531 def_ins = store_ins;
10534 * We can't assign ins->dreg to var->dreg here, since the
10535 * sregs could use it. So set a flag, and do it after
10536 * the sregs.
10538 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10539 dest_has_lvreg = TRUE;
10544 if (def_ins && !live_range_start [dreg]) {
10545 live_range_start [dreg] = def_ins;
10546 live_range_start_bb [dreg] = bb;
10550 /************/
10551 /* SREGS */
10552 /************/
10553 for (srcindex = 0; srcindex < 2; ++srcindex) {
10554 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10555 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10557 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10558 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10559 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10560 MonoInst *use_ins = ins;
10561 MonoInst *load_ins;
10562 guint32 load_opcode;
10564 if (var->opcode == OP_REGVAR) {
10565 if (srcindex == 0)
10566 ins->sreg1 = var->dreg;
10567 else
10568 ins->sreg2 = var->dreg;
10569 live_range_end [var->dreg] = use_ins;
10570 live_range_end_bb [var->dreg] = bb;
10571 continue;
10574 g_assert (var->opcode == OP_REGOFFSET);
10576 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10578 g_assert (load_opcode != OP_LOADV_MEMBASE);
10580 if (vreg_to_lvreg [sreg]) {
10581 /* The variable is already loaded to an lvreg */
10582 if (G_UNLIKELY (cfg->verbose_level > 2))
10583 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10584 if (srcindex == 0)
10585 ins->sreg1 = vreg_to_lvreg [sreg];
10586 else
10587 ins->sreg2 = vreg_to_lvreg [sreg];
10588 continue;
10591 /* Try to fuse the load into the instruction */
10592 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10593 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10594 ins->inst_basereg = var->inst_basereg;
10595 ins->inst_offset = var->inst_offset;
10596 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10597 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10598 ins->sreg2 = var->inst_basereg;
10599 ins->inst_offset = var->inst_offset;
10600 } else {
10601 if (MONO_IS_REAL_MOVE (ins)) {
10602 ins->opcode = OP_NOP;
10603 sreg = ins->dreg;
10604 } else {
10605 //printf ("%d ", srcindex); mono_print_ins (ins);
10607 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10609 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10610 if (var->dreg == prev_dreg) {
10612 * sreg refers to the value loaded by the load
10613 * emitted below, but we need to use ins->dreg
10614 * since it refers to the store emitted earlier.
10616 sreg = ins->dreg;
10618 vreg_to_lvreg [var->dreg] = sreg;
10619 g_assert (lvregs_len < 1024);
10620 lvregs [lvregs_len ++] = var->dreg;
10624 if (srcindex == 0)
10625 ins->sreg1 = sreg;
10626 else
10627 ins->sreg2 = sreg;
10629 if (regtype == 'l') {
10630 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10631 mono_bblock_insert_before_ins (bb, ins, load_ins);
10632 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10633 mono_bblock_insert_before_ins (bb, ins, load_ins);
10634 use_ins = load_ins;
10636 else {
10637 #if SIZEOF_REGISTER == 4
10638 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10639 #endif
10640 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10641 mono_bblock_insert_before_ins (bb, ins, load_ins);
10642 use_ins = load_ins;
10646 if (var->dreg < orig_next_vreg) {
10647 live_range_end [var->dreg] = use_ins;
10648 live_range_end_bb [var->dreg] = bb;
10653 if (dest_has_lvreg) {
10654 vreg_to_lvreg [prev_dreg] = ins->dreg;
10655 g_assert (lvregs_len < 1024);
10656 lvregs [lvregs_len ++] = prev_dreg;
10657 dest_has_lvreg = FALSE;
10660 if (store) {
10661 tmp_reg = ins->dreg;
10662 ins->dreg = ins->sreg2;
10663 ins->sreg2 = tmp_reg;
10666 if (MONO_IS_CALL (ins)) {
10667 /* Clear vreg_to_lvreg array */
10668 for (i = 0; i < lvregs_len; i++)
10669 vreg_to_lvreg [lvregs [i]] = 0;
10670 lvregs_len = 0;
10673 if (cfg->verbose_level > 2)
10674 mono_print_ins_index (1, ins);
10678 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10680 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10681 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10683 for (i = 0; i < cfg->num_varinfo; ++i) {
10684 int vreg = MONO_VARINFO (cfg, i)->vreg;
10685 MonoInst *ins;
10687 if (live_range_start [vreg]) {
10688 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10689 ins->inst_c0 = i;
10690 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10692 if (live_range_end [vreg]) {
10693 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10694 ins->inst_c0 = i;
10695 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10698 #endif
10700 g_free (live_range_start);
10701 g_free (live_range_end);
10702 g_free (live_range_start_bb);
10703 g_free (live_range_end_bb);
10707 * FIXME:
10708 * - use 'iadd' instead of 'int_add'
10709 * - handling ovf opcodes: decompose in method_to_ir.
10710 * - unify iregs/fregs
10711 * -> partly done, the missing parts are:
10712 * - a more complete unification would involve unifying the hregs as well, so
10713 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10714 * would no longer map to the machine hregs, so the code generators would need to
10715 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10716 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10717 * fp/non-fp branches speeds it up by about 15%.
10718 * - use sext/zext opcodes instead of shifts
10719 * - add OP_ICALL
10720 * - get rid of TEMPLOADs if possible and use vregs instead
10721 * - clean up usage of OP_P/OP_ opcodes
10722 * - cleanup usage of DUMMY_USE
10723 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10724 * stack
10725 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10726 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10727 * - make sure handle_stack_args () is called before the branch is emitted
10728 * - when the new IR is done, get rid of all unused stuff
10729 * - COMPARE/BEQ as separate instructions or unify them ?
10730 * - keeping them separate allows specialized compare instructions like
10731 * compare_imm, compare_membase
10732 * - most back ends unify fp compare+branch, fp compare+ceq
10733 * - integrate mono_save_args into inline_method
10734 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10735 * - handle long shift opts on 32 bit platforms somehow: they require
10736 * 3 sregs (2 for arg1 and 1 for arg2)
10737 * - make byref a 'normal' type.
10738 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10739 * variable if needed.
10740 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10741 * like inline_method.
10742 * - remove inlining restrictions
10743 * - fix LNEG and enable cfold of INEG
10744 * - generalize x86 optimizations like ldelema as a peephole optimization
10745 * - add store_mem_imm for amd64
10746 * - optimize the loading of the interruption flag in the managed->native wrappers
10747 * - avoid special handling of OP_NOP in passes
10748 * - move code inserting instructions into one function/macro.
10749 * - try a coalescing phase after liveness analysis
10750 * - add float -> vreg conversion + local optimizations on !x86
10751 * - figure out how to handle decomposed branches during optimizations, ie.
10752 * compare+branch, op_jump_table+op_br etc.
10753 * - promote RuntimeXHandles to vregs
10754 * - vtype cleanups:
10755 * - add a NEW_VARLOADA_VREG macro
10756 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10757 * accessing vtype fields.
10758 * - get rid of I8CONST on 64 bit platforms
10759 * - dealing with the increase in code size due to branches created during opcode
10760 * decomposition:
10761 * - use extended basic blocks
10762 * - all parts of the JIT
10763 * - handle_global_vregs () && local regalloc
10764 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10765 * - sources of increase in code size:
10766 * - vtypes
10767 * - long compares
10768 * - isinst and castclass
10769 * - lvregs not allocated to global registers even if used multiple times
10770 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10771 * meaningful.
10772 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10773 * - add all micro optimizations from the old JIT
10774 * - put tree optimizations into the deadce pass
10775 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10776 * specific function.
10777 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10778 * fcompare + branchCC.
10779 * - create a helper function for allocating a stack slot, taking into account
10780 * MONO_CFG_HAS_SPILLUP.
10781 * - merge r68207.
10782 * - merge the ia64 switch changes.
10783 * - optimize mono_regstate2_alloc_int/float.
10784 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10785 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10786 * parts of the tree could be separated by other instructions, killing the tree
10787 * arguments, or stores killing loads etc. Also, should we fold loads into other
10788 * instructions if the result of the load is used multiple times ?
10789 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10790 * - LAST MERGE: 108395.
10791 * - when returning vtypes in registers, generate IR and append it to the end of the
10792 * last bb instead of doing it in the epilog.
10793 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10798 NOTES
10799 -----
10801 - When to decompose opcodes:
10802 - earlier: this makes some optimizations hard to implement, since the low level IR
10803 no longer contains the neccessary information. But it is easier to do.
10804 - later: harder to implement, enables more optimizations.
10805 - Branches inside bblocks:
10806 - created when decomposing complex opcodes.
10807 - branches to another bblock: harmless, but not tracked by the branch
10808 optimizations, so need to branch to a label at the start of the bblock.
10809 - branches to inside the same bblock: very problematic, trips up the local
10810 reg allocator. Can be fixed by spitting the current bblock, but that is a
10811 complex operation, since some local vregs can become global vregs etc.
10812 - Local/global vregs:
10813 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10814 local register allocator.
10815 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10816 structure, created by mono_create_var (). Assigned to hregs or the stack by
10817 the global register allocator.
10818 - When to do optimizations like alu->alu_imm:
10819 - earlier -> saves work later on since the IR will be smaller/simpler
10820 - later -> can work on more instructions
10821 - Handling of valuetypes:
10822 - When a vtype is pushed on the stack, a new temporary is created, an
10823 instruction computing its address (LDADDR) is emitted and pushed on
10824 the stack. Need to optimize cases when the vtype is used immediately as in
10825 argument passing, stloc etc.
10826 - Instead of the to_end stuff in the old JIT, simply call the function handling
10827 the values on the stack before emitting the last instruction of the bb.
10830 #endif /* DISABLE_JIT */