2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/metadata/monitor.h>
19 #include <mono/arch/amd64/amd64-codegen.h>
21 #ifdef HAVE_VALGRIND_MEMCHECK_H
22 #include <valgrind/memcheck.h>
26 #include "mini-amd64.h"
28 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
30 static guint8
* nullified_class_init_trampoline
;
33 * mono_arch_get_unbox_trampoline:
34 * @gsctx: the generic sharing context
36 * @addr: pointer to native code for @m
38 * when value type methods are called through the vtable we need to unbox the
39 * this argument. This method returns a pointer to a trampoline which does
40 * unboxing before calling the method
43 mono_arch_get_unbox_trampoline (MonoGenericSharingContext
*gsctx
, MonoMethod
*m
, gpointer addr
)
48 MonoDomain
*domain
= mono_domain_get ();
50 this_reg
= mono_arch_get_this_arg_reg (mono_method_signature (m
), gsctx
, NULL
);
52 start
= code
= mono_domain_code_reserve (domain
, 20);
54 amd64_alu_reg_imm (code
, X86_ADD
, this_reg
, sizeof (MonoObject
));
55 /* FIXME: Optimize this */
56 amd64_mov_reg_imm (code
, AMD64_RAX
, addr
);
57 amd64_jump_reg (code
, AMD64_RAX
);
58 g_assert ((code
- start
) < 20);
60 mono_arch_flush_icache (start
, code
- start
);
66 * mono_arch_patch_callsite:
68 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
69 * points to the pc right after the call.
72 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
76 gboolean can_write
= mono_breakpoint_clean_code (method_start
, orig_code
, 14, buf
, sizeof (buf
));
80 if (((code
[-13] == 0x49) && (code
[-12] == 0xbb)) || (code
[-5] == 0xe8)) {
81 if (code
[-5] != 0xe8) {
83 InterlockedExchangePointer ((gpointer
*)(orig_code
- 11), addr
);
84 #ifdef HAVE_VALGRIND_MEMCHECK_H
85 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 11, sizeof (gpointer
));
89 if ((((guint64
)(addr
)) >> 32) != 0) {
90 /* Print some diagnostics */
91 MonoJitInfo
*ji
= mono_jit_info_table_find (mono_domain_get (), (char*)orig_code
);
93 fprintf (stderr
, "At %s, offset 0x%zx\n", mono_method_full_name (ji
->method
, TRUE
), (guint8
*)orig_code
- (guint8
*)ji
->code_start
);
94 fprintf (stderr
, "Addr: %p\n", addr
);
95 ji
= mono_jit_info_table_find (mono_domain_get (), (char*)addr
);
97 fprintf (stderr
, "Callee: %s\n", mono_method_full_name (ji
->method
, TRUE
));
98 g_assert_not_reached ();
100 g_assert ((((guint64
)(orig_code
)) >> 32) == 0);
102 InterlockedExchange ((gint32
*)(orig_code
- 4), ((gint64
)addr
- (gint64
)orig_code
));
103 #ifdef HAVE_VALGRIND_MEMCHECK_H
104 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, 4);
109 else if ((code
[-7] == 0x41) && (code
[-6] == 0xff) && (code
[-5] == 0x15)) {
110 /* call *<OFFSET>(%rip) */
111 gpointer
*got_entry
= (gpointer
*)((guint8
*)orig_code
+ (*(guint32
*)(orig_code
- 4)));
113 InterlockedExchangePointer (got_entry
, addr
);
114 #ifdef HAVE_VALGRIND_MEMCHECK_H
115 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, sizeof (gpointer
));
122 mono_arch_patch_plt_entry (guint8
*code
, guint8
*addr
)
125 gpointer
*plt_jump_table_entry
;
127 /* A PLT entry: jmp *<DISP>(%rip) */
128 g_assert (code
[0] == 0xff);
129 g_assert (code
[1] == 0x25);
131 disp
= *(gint32
*)(code
+ 2);
133 plt_jump_table_entry
= (gpointer
*)(code
+ 6 + disp
);
135 InterlockedExchangePointer (plt_jump_table_entry
, addr
);
139 mono_arch_nullify_class_init_trampoline (guint8
*code
, gssize
*regs
)
142 gboolean can_write
= mono_breakpoint_clean_code (NULL
, code
, 7, buf
, sizeof (buf
));
150 * A given byte sequence can match more than case here, so we have to be
151 * really careful about the ordering of the cases. Longer sequences
154 if ((code
[-4] == 0x41) && (code
[-3] == 0xff) && (code
[-2] == 0x15)) {
155 gpointer
*vtable_slot
;
157 /* call *<OFFSET>(%rip) */
158 vtable_slot
= mono_arch_get_vcall_slot_addr (code
+ 3, (gpointer
*)regs
);
159 g_assert (vtable_slot
);
161 *vtable_slot
= nullified_class_init_trampoline
;
162 } else if (code
[-2] == 0xe8) {
164 //guint8 *buf = code - 2;
167 * It would be better to replace the call with nops, but that doesn't seem
168 * to work on SMP machines even when the whole call is inside a cache line.
169 * Patching the call address seems to work.
179 mono_arch_patch_callsite (code
- 2, code
- 2 + 5, nullified_class_init_trampoline
);
180 } else if ((code
[0] == 0x41) && (code
[1] == 0xff)) {
182 /* happens on machines without MAP_32BIT like freebsd */
183 /* amd64_set_reg_template is 10 bytes long */
184 guint8
* buf
= code
- 10;
186 /* FIXME: Make this thread safe */
187 /* Padding code suggested by the AMD64 Opt Manual */
201 } else if (code
[0] == 0x90 || code
[0] == 0xeb || code
[0] == 0x66) {
202 /* Already changed by another thread */
205 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code
[0], code
[1], code
[2], code
[3],
206 code
[4], code
[5], code
[6]);
207 g_assert_not_reached ();
212 mono_arch_nullify_plt_entry (guint8
*code
)
214 if (mono_aot_only
&& !nullified_class_init_trampoline
)
215 nullified_class_init_trampoline
= mono_aot_get_named_code ("nullified_class_init_trampoline");
217 mono_arch_patch_plt_entry (code
, nullified_class_init_trampoline
);
221 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type
)
226 GSList
*unwind_ops
, *l
;
228 code
= mono_arch_create_trampoline_code_full (tramp_type
, &code_size
, &ji
, &unwind_ops
, FALSE
);
230 mono_save_trampoline_xdebug_info ("<generic_trampoline>", code
, code_size
, unwind_ops
);
232 for (l
= unwind_ops
; l
; l
= l
->next
)
234 g_slist_free (unwind_ops
);
240 mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type
, guint32
*code_size
, MonoJumpInfo
**ji
, GSList
**out_unwind_ops
, gboolean aot
)
242 guint8
*buf
, *code
, *tramp
, *br
[2], *r11_save_code
, *after_r11_save_code
;
243 int i
, lmf_offset
, offset
, res_offset
, arg_offset
, rax_offset
, tramp_offset
, saved_regs_offset
;
244 int saved_fpregs_offset
, rbp_offset
, framesize
, orig_rsp_to_rbp_offset
, cfa_offset
;
246 GSList
*unwind_ops
= NULL
;
248 if (tramp_type
== MONO_TRAMPOLINE_JUMP
)
253 code
= buf
= mono_global_codeman_reserve (538);
257 framesize
= 538 + sizeof (MonoLMF
);
258 framesize
= (framesize
+ (MONO_ARCH_FRAME_ALIGNMENT
- 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT
- 1);
260 orig_rsp_to_rbp_offset
= 0;
261 r11_save_code
= code
;
262 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
264 after_r11_save_code
= code
;
266 // CFA = sp + 16 (the trampoline address is on the stack)
268 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 16);
269 // IP saved at CFA - 8
270 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -8);
272 /* Pop the return address off the stack */
273 amd64_pop_reg (code
, AMD64_R11
);
274 orig_rsp_to_rbp_offset
+= 8;
277 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
280 * Allocate a new stack frame
282 amd64_push_reg (code
, AMD64_RBP
);
284 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
285 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
287 orig_rsp_to_rbp_offset
-= 8;
288 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, 8);
289 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
290 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
293 rbp_offset
= - offset
;
296 rax_offset
= - offset
;
299 tramp_offset
= - offset
;
302 arg_offset
= - offset
;
304 /* Compute the trampoline address from the return address */
306 /* 7 = length of call *<offset>(rip) */
307 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 7);
309 /* 5 = length of amd64_call_membase () */
310 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 5);
312 amd64_mov_membase_reg (code
, AMD64_RBP
, tramp_offset
, AMD64_R11
, 8);
315 res_offset
= - offset
;
317 /* Save all registers */
319 offset
+= AMD64_NREG
* 8;
320 saved_regs_offset
= - offset
;
321 for (i
= 0; i
< AMD64_NREG
; ++i
) {
322 if (i
== AMD64_RBP
) {
323 /* RAX is already saved */
324 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, rbp_offset
, 8);
325 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* 8), AMD64_RAX
, 8);
326 } else if (i
!= AMD64_R11
) {
327 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* 8), i
, 8);
329 /* We have to save R11 right at the start of
330 the trampoline code because it's used as a
332 amd64_mov_membase_reg (r11_save_code
, AMD64_RSP
, saved_regs_offset
+ orig_rsp_to_rbp_offset
+ (i
* 8), i
, 8);
333 g_assert (r11_save_code
== after_r11_save_code
);
337 saved_fpregs_offset
= - offset
;
338 for (i
= 0; i
< 8; ++i
)
339 amd64_movsd_membase_reg (code
, AMD64_RBP
, saved_fpregs_offset
+ (i
* 8), i
);
341 if (tramp_type
!= MONO_TRAMPOLINE_GENERIC_CLASS_INIT
&&
342 tramp_type
!= MONO_TRAMPOLINE_MONITOR_ENTER
&&
343 tramp_type
!= MONO_TRAMPOLINE_MONITOR_EXIT
) {
344 /* Obtain the trampoline argument which is encoded in the instruction stream */
346 /* Load the GOT offset */
347 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, 8);
348 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 7, 4);
349 /* Compute the address of the GOT slot */
350 amd64_alu_reg_reg_size (code
, X86_ADD
, AMD64_R11
, AMD64_RAX
, 8);
352 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, 8);
354 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, 8);
355 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 5, 1);
356 amd64_widen_reg (code
, AMD64_RAX
, AMD64_RAX
, TRUE
, FALSE
);
357 amd64_alu_reg_imm_size (code
, X86_CMP
, AMD64_RAX
, 4, 1);
359 x86_branch8 (code
, X86_CC_NE
, 6, FALSE
);
360 /* 32 bit immediate */
361 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 4);
363 x86_jump8 (code
, 10);
364 /* 64 bit immediate */
365 mono_amd64_patch (br
[0], code
);
366 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 8);
367 mono_amd64_patch (br
[1], code
);
369 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, 8);
371 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, saved_regs_offset
+ (MONO_AMD64_ARG_REG1
* 8), 8);
372 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, 8);
377 offset
+= sizeof (MonoLMF
);
378 lmf_offset
= - offset
;
382 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 8, 8);
384 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
385 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rip
), AMD64_R11
, 8);
387 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, framesize
, 8);
388 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rbp
), AMD64_R11
, 8);
390 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, 8);
391 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
392 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rsp
), AMD64_R11
, 8);
394 if (tramp_type
== MONO_TRAMPOLINE_JIT
|| tramp_type
== MONO_TRAMPOLINE_JUMP
) {
395 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, arg_offset
, 8);
396 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
), AMD64_R11
, 8);
398 amd64_mov_membase_imm (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
), 0, 8);
400 /* Save callee saved regs */
401 #ifdef PLATFORM_WIN32
402 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rdi
), AMD64_RDI
, 8);
403 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rsi
), AMD64_RSI
, 8);
405 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rbx
), AMD64_RBX
, 8);
406 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r12
), AMD64_R12
, 8);
407 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r13
), AMD64_R13
, 8);
408 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r14
), AMD64_R14
, 8);
409 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r15
), AMD64_R15
, 8);
412 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_get_lmf_addr");
413 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
415 amd64_mov_reg_imm (code
, AMD64_R11
, mono_get_lmf_addr
);
417 amd64_call_reg (code
, AMD64_R11
);
420 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), AMD64_RAX
, 8);
421 /* Save previous_lmf */
422 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
423 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RAX
, 0, 8);
424 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_R11
, 1, 8);
425 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), AMD64_R11
, 8);
427 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
);
428 amd64_mov_membase_reg (code
, AMD64_RAX
, 0, AMD64_R11
, 8);
432 /* Arg1 is the pointer to the saved registers */
433 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RBP
, saved_regs_offset
);
435 /* Arg2 is the address of the calling code */
437 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, AMD64_RBP
, 8, 8);
439 amd64_mov_reg_imm (code
, AMD64_ARG_REG2
, 0);
441 /* Arg3 is the method/vtable ptr */
442 amd64_mov_reg_membase (code
, AMD64_ARG_REG3
, AMD64_RBP
, arg_offset
, 8);
444 /* Arg4 is the trampoline address */
445 amd64_mov_reg_membase (code
, AMD64_ARG_REG4
, AMD64_RBP
, tramp_offset
, 8);
448 char *icall_name
= g_strdup_printf ("trampoline_func_%d", tramp_type
);
449 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, icall_name
);
450 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RIP
, 0, 8);
452 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
453 amd64_mov_reg_imm (code
, AMD64_RAX
, tramp
);
455 amd64_call_reg (code
, AMD64_RAX
);
457 /* Check for thread interruption */
458 /* This is not perf critical code so no need to check the interrupt flag */
460 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
462 amd64_mov_membase_reg (code
, AMD64_RBP
, res_offset
, AMD64_RAX
, 8);
464 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_thread_force_interruption_checkpoint");
465 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RIP
, 0, 8);
467 amd64_mov_reg_imm (code
, AMD64_RAX
, (guint8
*)mono_thread_force_interruption_checkpoint
);
469 amd64_call_reg (code
, AMD64_RAX
);
470 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, res_offset
, 8);
474 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), 8);
475 amd64_alu_reg_imm_size (code
, X86_SUB
, AMD64_RCX
, 1, 8);
476 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), 8);
477 amd64_mov_membase_reg (code
, AMD64_R11
, 0, AMD64_RCX
, 8);
480 * Save rax to the stack, after the leave instruction, this will become part of
483 amd64_mov_membase_reg (code
, AMD64_RBP
, rax_offset
, AMD64_RAX
, 8);
485 /* Restore argument registers, r10 (needed to pass rgctx to
486 static shared generic methods), r11 (imt register for
487 interface calls), and rax (needed for direct calls to C vararg functions). */
488 for (i
= 0; i
< AMD64_NREG
; ++i
)
489 if (AMD64_IS_ARGUMENT_REG (i
) || i
== AMD64_R10
|| i
== AMD64_R11
|| i
== AMD64_RAX
)
490 amd64_mov_reg_membase (code
, i
, AMD64_RBP
, saved_regs_offset
+ (i
* 8), 8);
492 for (i
= 0; i
< 8; ++i
)
493 amd64_movsd_reg_membase (code
, i
, AMD64_RBP
, saved_fpregs_offset
+ (i
* 8));
498 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
500 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, rax_offset
- 0x8, 8);
503 /* call the compiled method using the saved rax */
504 amd64_jump_membase (code
, AMD64_RSP
, rax_offset
- 0x8);
507 g_assert ((code
- buf
) <= 538);
509 mono_arch_flush_icache (buf
, code
- buf
);
511 *code_size
= code
- buf
;
513 if (tramp_type
== MONO_TRAMPOLINE_CLASS_INIT
) {
516 /* Initialize the nullified class init trampoline used in the AOT case */
517 nullified_class_init_trampoline
= mono_arch_get_nullified_class_init_trampoline (&code_len
);
520 *out_unwind_ops
= unwind_ops
;
526 mono_arch_get_nullified_class_init_trampoline (guint32
*code_len
)
530 code
= buf
= mono_global_codeman_reserve (16);
533 mono_arch_flush_icache (buf
, code
- buf
);
535 *code_len
= code
- buf
;
541 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
543 guint8
*code
, *buf
, *tramp
;
546 tramp
= mono_get_trampoline_code (tramp_type
);
548 if ((((guint64
)arg1
) >> 32) == 0)
553 code
= buf
= mono_domain_code_reserve_align (domain
, size
, 1);
555 amd64_call_code (code
, tramp
);
556 /* The trampoline code will obtain the argument from the instruction stream */
557 if ((((guint64
)arg1
) >> 32) == 0) {
559 *(guint32
*)(code
+ 1) = (gint64
)arg1
;
563 *(guint64
*)(code
+ 1) = (gint64
)arg1
;
567 g_assert ((code
- buf
) <= size
);
572 mono_arch_flush_icache (buf
, size
);
578 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
)
583 return mono_arch_create_rgctx_lazy_fetch_trampoline_full (slot
, &code_size
, &ji
, FALSE
);
587 mono_arch_create_rgctx_lazy_fetch_trampoline_full (guint32 slot
, guint32
*code_size
, MonoJumpInfo
**ji
, gboolean aot
)
591 guint8
**rgctx_null_jumps
;
599 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
600 index
= MONO_RGCTX_SLOT_INDEX (slot
);
602 index
+= sizeof (MonoMethodRuntimeGenericContext
) / sizeof (gpointer
);
603 for (depth
= 0; ; ++depth
) {
604 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
606 if (index
< size
- 1)
611 tramp_size
= 64 + 8 * depth
;
613 code
= buf
= mono_global_codeman_reserve (tramp_size
);
615 rgctx_null_jumps
= g_malloc (sizeof (guint8
*) * (depth
+ 2));
619 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_ARG_REG1
, 8);
621 /* load rgctx ptr from vtable */
622 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), 8);
623 /* is the rgctx ptr null? */
624 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
625 /* if yes, jump to actual trampoline */
626 rgctx_null_jumps
[0] = code
;
627 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
630 for (i
= 0; i
< depth
; ++i
) {
631 /* load ptr to next array */
632 if (mrgctx
&& i
== 0)
633 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, sizeof (MonoMethodRuntimeGenericContext
), 8);
635 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, 0, 8);
636 /* is the ptr null? */
637 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
638 /* if yes, jump to actual trampoline */
639 rgctx_null_jumps
[i
+ 1] = code
;
640 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
644 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, sizeof (gpointer
) * (index
+ 1), 8);
645 /* is the slot null? */
646 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
647 /* if yes, jump to actual trampoline */
648 rgctx_null_jumps
[depth
+ 1] = code
;
649 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
650 /* otherwise return */
653 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
654 x86_patch (rgctx_null_jumps
[i
], code
);
656 g_free (rgctx_null_jumps
);
658 /* move the rgctx pointer to the VTABLE register */
659 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, 8);
662 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot
));
663 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
664 amd64_jump_reg (code
, AMD64_R11
);
666 tramp
= mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mono_get_root_domain (), NULL
);
668 /* jump to the actual trampoline */
669 amd64_jump_code (code
, tramp
);
672 mono_arch_flush_icache (buf
, code
- buf
);
674 g_assert (code
- buf
<= tramp_size
);
676 *code_size
= code
- buf
;
682 mono_arch_create_generic_class_init_trampoline (void)
687 return mono_arch_create_generic_class_init_trampoline_full (&code_size
, &ji
, FALSE
);
691 mono_arch_create_generic_class_init_trampoline_full (guint32
*code_size
, MonoJumpInfo
**ji
, gboolean aot
)
695 static int byte_offset
= -1;
696 static guint8 bitmask
;
704 code
= buf
= mono_global_codeman_reserve (tramp_size
);
707 mono_marshal_find_bitfield_offset (MonoVTable
, initialized
, &byte_offset
, &bitmask
);
709 amd64_test_membase_imm_size (code
, MONO_AMD64_ARG_REG1
, byte_offset
, bitmask
, 1);
711 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
715 x86_patch (jump
, code
);
718 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "specific_trampoline_generic_class_init");
719 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
720 amd64_jump_reg (code
, AMD64_R11
);
722 tramp
= mono_arch_create_specific_trampoline (NULL
, MONO_TRAMPOLINE_GENERIC_CLASS_INIT
, mono_get_root_domain (), NULL
);
724 /* jump to the actual trampoline */
725 amd64_jump_code (code
, tramp
);
728 mono_arch_flush_icache (buf
, code
- buf
);
730 g_assert (code
- buf
<= tramp_size
);
732 *code_size
= code
- buf
;
737 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
740 mono_arch_create_monitor_enter_trampoline (void)
745 return mono_arch_create_monitor_enter_trampoline_full (&code_size
, &ji
, FALSE
);
749 mono_arch_create_monitor_enter_trampoline_full (guint32
*code_size
, MonoJumpInfo
**ji
, gboolean aot
)
754 guint8
*jump_obj_null
, *jump_sync_null
, *jump_cmpxchg_failed
, *jump_other_owner
, *jump_tid
;
756 int owner_offset
, nest_offset
, dummy
;
760 g_assert (MONO_ARCH_MONITOR_OBJECT_REG
== AMD64_RDI
);
762 mono_monitor_threads_sync_members_offset (&owner_offset
, &nest_offset
, &dummy
);
763 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset
) == sizeof (gpointer
));
764 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset
) == sizeof (guint32
));
765 owner_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset
);
766 nest_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset
);
770 code
= buf
= mono_global_codeman_reserve (tramp_size
);
772 if (mono_thread_get_tls_offset () != -1) {
773 /* MonoObject* obj is in RDI */
775 amd64_test_reg_reg (code
, AMD64_RDI
, AMD64_RDI
);
776 /* if yes, jump to actual trampoline */
777 jump_obj_null
= code
;
778 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
780 /* load obj->synchronization to RCX */
781 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RDI
, G_STRUCT_OFFSET (MonoObject
, synchronisation
), 8);
782 /* is synchronization null? */
783 amd64_test_reg_reg (code
, AMD64_RCX
, AMD64_RCX
);
784 /* if yes, jump to actual trampoline */
785 jump_sync_null
= code
;
786 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
788 /* load MonoThread* into RDX */
789 code
= mono_amd64_emit_tls_get (code
, AMD64_RDX
, mono_thread_get_tls_offset ());
790 /* load TID into RDX */
791 amd64_mov_reg_membase (code
, AMD64_RDX
, AMD64_RDX
, G_STRUCT_OFFSET (MonoThread
, tid
), 8);
793 /* is synchronization->owner null? */
794 amd64_alu_membase_imm_size (code
, X86_CMP
, AMD64_RCX
, owner_offset
, 0, 8);
795 /* if not, jump to next case */
797 amd64_branch8 (code
, X86_CC_NZ
, -1, 1);
799 /* if yes, try a compare-exchange with the TID */
801 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RAX
, AMD64_RAX
);
802 /* compare and exchange */
803 amd64_prefix (code
, X86_LOCK_PREFIX
);
804 amd64_cmpxchg_membase_reg_size (code
, AMD64_RCX
, owner_offset
, AMD64_RDX
, 8);
805 /* if not successful, jump to actual trampoline */
806 jump_cmpxchg_failed
= code
;
807 amd64_branch8 (code
, X86_CC_NZ
, -1, 1);
808 /* if successful, return */
811 /* next case: synchronization->owner is not null */
812 x86_patch (jump_tid
, code
);
813 /* is synchronization->owner == TID? */
814 amd64_alu_membase_reg_size (code
, X86_CMP
, AMD64_RCX
, owner_offset
, AMD64_RDX
, 8);
815 /* if not, jump to actual trampoline */
816 jump_other_owner
= code
;
817 amd64_branch8 (code
, X86_CC_NZ
, -1, 1);
818 /* if yes, increment nest */
819 amd64_inc_membase_size (code
, AMD64_RCX
, nest_offset
, 4);
823 x86_patch (jump_obj_null
, code
);
824 x86_patch (jump_sync_null
, code
);
825 x86_patch (jump_cmpxchg_failed
, code
);
826 x86_patch (jump_other_owner
, code
);
829 /* jump to the actual trampoline */
830 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
831 amd64_mov_reg_reg (code
, MONO_AMD64_ARG_REG1
, AMD64_RDI
);
835 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "specific_trampoline_monitor_enter");
836 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
837 amd64_jump_reg (code
, AMD64_R11
);
839 tramp
= mono_arch_create_specific_trampoline (NULL
, MONO_TRAMPOLINE_MONITOR_ENTER
, mono_get_root_domain (), NULL
);
841 /* jump to the actual trampoline */
842 amd64_jump_code (code
, tramp
);
845 mono_arch_flush_icache (code
, code
- buf
);
846 g_assert (code
- buf
<= tramp_size
);
848 *code_size
= code
- buf
;
854 mono_arch_create_monitor_exit_trampoline (void)
859 return mono_arch_create_monitor_exit_trampoline_full (&code_size
, &ji
, FALSE
);
863 mono_arch_create_monitor_exit_trampoline_full (guint32
*code_size
, MonoJumpInfo
**ji
, gboolean aot
)
867 guint8
*jump_obj_null
, *jump_have_waiters
;
870 int owner_offset
, nest_offset
, entry_count_offset
;
874 g_assert (MONO_ARCH_MONITOR_OBJECT_REG
== AMD64_RDI
);
876 mono_monitor_threads_sync_members_offset (&owner_offset
, &nest_offset
, &entry_count_offset
);
877 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset
) == sizeof (gpointer
));
878 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset
) == sizeof (guint32
));
879 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset
) == sizeof (gint32
));
880 owner_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset
);
881 nest_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset
);
882 entry_count_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset
);
886 code
= buf
= mono_global_codeman_reserve (tramp_size
);
888 if (mono_thread_get_tls_offset () != -1) {
889 /* MonoObject* obj is in RDI */
891 amd64_test_reg_reg (code
, AMD64_RDI
, AMD64_RDI
);
892 /* if yes, jump to actual trampoline */
893 jump_obj_null
= code
;
894 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
896 /* load obj->synchronization to RCX */
897 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RDI
, G_STRUCT_OFFSET (MonoObject
, synchronisation
), 8);
898 /* is synchronization null? */
899 amd64_test_reg_reg (code
, AMD64_RCX
, AMD64_RCX
);
900 /* if not, jump to next case */
902 amd64_branch8 (code
, X86_CC_NZ
, -1, 1);
903 /* if yes, just return */
906 /* next case: synchronization is not null */
907 x86_patch (jump_next
, code
);
908 /* load MonoThread* into RDX */
909 code
= mono_amd64_emit_tls_get (code
, AMD64_RDX
, mono_thread_get_tls_offset ());
910 /* load TID into RDX */
911 amd64_mov_reg_membase (code
, AMD64_RDX
, AMD64_RDX
, G_STRUCT_OFFSET (MonoThread
, tid
), 8);
912 /* is synchronization->owner == TID */
913 amd64_alu_membase_reg_size (code
, X86_CMP
, AMD64_RCX
, owner_offset
, AMD64_RDX
, 8);
914 /* if yes, jump to next case */
916 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
917 /* if not, just return */
920 /* next case: synchronization->owner == TID */
921 x86_patch (jump_next
, code
);
922 /* is synchronization->nest == 1 */
923 amd64_alu_membase_imm_size (code
, X86_CMP
, AMD64_RCX
, nest_offset
, 1, 4);
924 /* if not, jump to next case */
926 amd64_branch8 (code
, X86_CC_NZ
, -1, 1);
927 /* if yes, is synchronization->entry_count zero? */
928 amd64_alu_membase_imm_size (code
, X86_CMP
, AMD64_RCX
, entry_count_offset
, 0, 4);
929 /* if not, jump to actual trampoline */
930 jump_have_waiters
= code
;
931 amd64_branch8 (code
, X86_CC_NZ
, -1 , 1);
932 /* if yes, set synchronization->owner to null and return */
933 amd64_mov_membase_imm (code
, AMD64_RCX
, owner_offset
, 0, 8);
936 /* next case: synchronization->nest is not 1 */
937 x86_patch (jump_next
, code
);
938 /* decrease synchronization->nest and return */
939 amd64_dec_membase_size (code
, AMD64_RCX
, nest_offset
, 4);
942 x86_patch (jump_obj_null
, code
);
943 x86_patch (jump_have_waiters
, code
);
946 /* jump to the actual trampoline */
947 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
948 amd64_mov_reg_reg (code
, MONO_AMD64_ARG_REG1
, AMD64_RDI
);
952 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "specific_trampoline_monitor_exit");
953 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
954 amd64_jump_reg (code
, AMD64_R11
);
956 tramp
= mono_arch_create_specific_trampoline (NULL
, MONO_TRAMPOLINE_MONITOR_EXIT
, mono_get_root_domain (), NULL
);
957 amd64_jump_code (code
, tramp
);
960 mono_arch_flush_icache (code
, code
- buf
);
961 g_assert (code
- buf
<= tramp_size
);
963 *code_size
= code
- buf
;
970 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
972 /* FIXME: This is not thread safe */
973 guint8
*code
= ji
->code_start
;
975 amd64_mov_reg_imm (code
, AMD64_ARG_REG1
, func_arg
);
976 amd64_mov_reg_imm (code
, AMD64_R11
, func
);
978 x86_push_imm (code
, (guint64
)func_arg
);
979 amd64_call_reg (code
, AMD64_R11
);