Make sure x86 ATOMIC_CAS doesn't overwrite its own operands.
[mono-debugger.git] / mono / mini / tramp-amd64.c
blob58c333761dd9e34456b0b5b4b6202d0f5614bde1
1 /*
2 * tramp-amd64.c: JIT trampoline code for amd64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <glib.h>
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/metadata/monitor.h>
19 #include <mono/arch/amd64/amd64-codegen.h>
21 #ifdef HAVE_VALGRIND_MEMCHECK_H
22 #include <valgrind/memcheck.h>
23 #endif
25 #include "mini.h"
26 #include "mini-amd64.h"
28 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
30 static guint8* nullified_class_init_trampoline;
33 * mono_arch_get_unbox_trampoline:
34 * @gsctx: the generic sharing context
35 * @m: method pointer
36 * @addr: pointer to native code for @m
38 * when value type methods are called through the vtable we need to unbox the
39 * this argument. This method returns a pointer to a trampoline which does
40 * unboxing before calling the method
42 gpointer
43 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
45 guint8 *code, *start;
46 int this_reg;
48 MonoDomain *domain = mono_domain_get ();
50 this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), gsctx, NULL);
52 start = code = mono_domain_code_reserve (domain, 20);
54 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
55 /* FIXME: Optimize this */
56 amd64_mov_reg_imm (code, AMD64_RAX, addr);
57 amd64_jump_reg (code, AMD64_RAX);
58 g_assert ((code - start) < 20);
60 mono_arch_flush_icache (start, code - start);
62 return start;
66 * mono_arch_patch_callsite:
68 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
69 * points to the pc right after the call.
71 void
72 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
74 guint8 *code;
75 guint8 buf [16];
76 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
78 code = buf + 14;
80 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
81 if (code [-5] != 0xe8) {
82 if (can_write) {
83 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
84 #ifdef HAVE_VALGRIND_MEMCHECK_H
85 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
86 #endif
88 } else {
89 if ((((guint64)(addr)) >> 32) != 0) {
90 /* Print some diagnostics */
91 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
92 if (ji)
93 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
94 fprintf (stderr, "Addr: %p\n", addr);
95 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
96 if (ji)
97 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
98 g_assert_not_reached ();
100 g_assert ((((guint64)(orig_code)) >> 32) == 0);
101 if (can_write) {
102 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
103 #ifdef HAVE_VALGRIND_MEMCHECK_H
104 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
105 #endif
109 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
110 /* call *<OFFSET>(%rip) */
111 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
112 if (can_write) {
113 InterlockedExchangePointer (got_entry, addr);
114 #ifdef HAVE_VALGRIND_MEMCHECK_H
115 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
116 #endif
121 void
122 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
124 gint32 disp;
125 gpointer *plt_jump_table_entry;
127 /* A PLT entry: jmp *<DISP>(%rip) */
128 g_assert (code [0] == 0xff);
129 g_assert (code [1] == 0x25);
131 disp = *(gint32*)(code + 2);
133 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
135 InterlockedExchangePointer (plt_jump_table_entry, addr);
138 void
139 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
141 guint8 buf [16];
142 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 7, buf, sizeof (buf));
144 if (!can_write)
145 return;
147 code -= 3;
150 * A given byte sequence can match more than case here, so we have to be
151 * really careful about the ordering of the cases. Longer sequences
152 * come first.
154 if ((code [-4] == 0x41) && (code [-3] == 0xff) && (code [-2] == 0x15)) {
155 gpointer *vtable_slot;
157 /* call *<OFFSET>(%rip) */
158 vtable_slot = mono_arch_get_vcall_slot_addr (code + 3, (gpointer*)regs);
159 g_assert (vtable_slot);
161 *vtable_slot = nullified_class_init_trampoline;
162 } else if (code [-2] == 0xe8) {
163 /* call <TARGET> */
164 //guint8 *buf = code - 2;
167 * It would be better to replace the call with nops, but that doesn't seem
168 * to work on SMP machines even when the whole call is inside a cache line.
169 * Patching the call address seems to work.
172 buf [0] = 0x66;
173 buf [1] = 0x66;
174 buf [2] = 0x90;
175 buf [3] = 0x66;
176 buf [4] = 0x90;
179 mono_arch_patch_callsite (code - 2, code - 2 + 5, nullified_class_init_trampoline);
180 } else if ((code [0] == 0x41) && (code [1] == 0xff)) {
181 /* call <REG> */
182 /* happens on machines without MAP_32BIT like freebsd */
183 /* amd64_set_reg_template is 10 bytes long */
184 guint8* buf = code - 10;
186 /* FIXME: Make this thread safe */
187 /* Padding code suggested by the AMD64 Opt Manual */
188 buf [0] = 0x66;
189 buf [1] = 0x66;
190 buf [2] = 0x66;
191 buf [3] = 0x90;
192 buf [4] = 0x66;
193 buf [5] = 0x66;
194 buf [6] = 0x66;
195 buf [7] = 0x90;
196 buf [8] = 0x66;
197 buf [9] = 0x66;
198 buf [10] = 0x90;
199 buf [11] = 0x66;
200 buf [12] = 0x90;
201 } else if (code [0] == 0x90 || code [0] == 0xeb || code [0] == 0x66) {
202 /* Already changed by another thread */
204 } else {
205 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
206 code [4], code [5], code [6]);
207 g_assert_not_reached ();
211 void
212 mono_arch_nullify_plt_entry (guint8 *code)
214 if (mono_aot_only && !nullified_class_init_trampoline)
215 nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
217 mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
220 guchar*
221 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
223 MonoJumpInfo *ji;
224 guint32 code_size;
225 guchar *code;
226 GSList *unwind_ops, *l;
228 code = mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, &unwind_ops, FALSE);
230 mono_save_trampoline_xdebug_info ("<generic_trampoline>", code, code_size, unwind_ops);
232 for (l = unwind_ops; l; l = l->next)
233 g_free (l->data);
234 g_slist_free (unwind_ops);
236 return code;
239 guchar*
240 mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, GSList **out_unwind_ops, gboolean aot)
242 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
243 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
244 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
245 gboolean has_caller;
246 GSList *unwind_ops = NULL;
248 if (tramp_type == MONO_TRAMPOLINE_JUMP)
249 has_caller = FALSE;
250 else
251 has_caller = TRUE;
253 code = buf = mono_global_codeman_reserve (538);
255 *ji = NULL;
257 framesize = 538 + sizeof (MonoLMF);
258 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
260 orig_rsp_to_rbp_offset = 0;
261 r11_save_code = code;
262 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
263 code += 5;
264 after_r11_save_code = code;
266 // CFA = sp + 16 (the trampoline address is on the stack)
267 cfa_offset = 16;
268 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
269 // IP saved at CFA - 8
270 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
272 /* Pop the return address off the stack */
273 amd64_pop_reg (code, AMD64_R11);
274 orig_rsp_to_rbp_offset += 8;
276 cfa_offset -= 8;
277 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
280 * Allocate a new stack frame
282 amd64_push_reg (code, AMD64_RBP);
283 cfa_offset += 8;
284 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
285 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
287 orig_rsp_to_rbp_offset -= 8;
288 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
289 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
290 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
292 offset = 0;
293 rbp_offset = - offset;
295 offset += 8;
296 rax_offset = - offset;
298 offset += 8;
299 tramp_offset = - offset;
301 offset += 8;
302 arg_offset = - offset;
304 /* Compute the trampoline address from the return address */
305 if (aot) {
306 /* 7 = length of call *<offset>(rip) */
307 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
308 } else {
309 /* 5 = length of amd64_call_membase () */
310 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
312 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, 8);
314 offset += 8;
315 res_offset = - offset;
317 /* Save all registers */
319 offset += AMD64_NREG * 8;
320 saved_regs_offset = - offset;
321 for (i = 0; i < AMD64_NREG; ++i) {
322 if (i == AMD64_RBP) {
323 /* RAX is already saved */
324 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, 8);
325 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), AMD64_RAX, 8);
326 } else if (i != AMD64_R11) {
327 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, 8);
328 } else {
329 /* We have to save R11 right at the start of
330 the trampoline code because it's used as a
331 scratch register */
332 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * 8), i, 8);
333 g_assert (r11_save_code == after_r11_save_code);
336 offset += 8 * 8;
337 saved_fpregs_offset = - offset;
338 for (i = 0; i < 8; ++i)
339 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * 8), i);
341 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
342 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
343 tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT) {
344 /* Obtain the trampoline argument which is encoded in the instruction stream */
345 if (aot) {
346 /* Load the GOT offset */
347 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
348 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 7, 4);
349 /* Compute the address of the GOT slot */
350 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, 8);
351 /* Load the value */
352 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
353 } else {
354 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
355 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
356 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
357 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
358 br [0] = code;
359 x86_branch8 (code, X86_CC_NE, 6, FALSE);
360 /* 32 bit immediate */
361 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
362 br [1] = code;
363 x86_jump8 (code, 10);
364 /* 64 bit immediate */
365 mono_amd64_patch (br [0], code);
366 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
367 mono_amd64_patch (br [1], code);
369 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
370 } else {
371 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * 8), 8);
372 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
375 /* Save LMF begin */
377 offset += sizeof (MonoLMF);
378 lmf_offset = - offset;
380 /* Save ip */
381 if (has_caller)
382 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, 8);
383 else
384 amd64_mov_reg_imm (code, AMD64_R11, 0);
385 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
386 /* Save fp */
387 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, 8);
388 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, 8);
389 /* Save sp */
390 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
391 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
392 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, 8);
393 /* Save method */
394 if (tramp_type == MONO_TRAMPOLINE_JIT || tramp_type == MONO_TRAMPOLINE_JUMP) {
395 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, 8);
396 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
397 } else {
398 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, 8);
400 /* Save callee saved regs */
401 #ifdef PLATFORM_WIN32
402 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, 8);
403 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, 8);
404 #endif
405 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
406 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
407 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
408 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
409 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
411 if (aot) {
412 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
413 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
414 } else {
415 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
417 amd64_call_reg (code, AMD64_R11);
419 /* Save lmf_addr */
420 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
421 /* Save previous_lmf */
422 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
423 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
424 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, 8);
425 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
426 /* Set new lmf */
427 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
428 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
430 /* Save LMF end */
432 /* Arg1 is the pointer to the saved registers */
433 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
435 /* Arg2 is the address of the calling code */
436 if (has_caller)
437 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, 8);
438 else
439 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
441 /* Arg3 is the method/vtable ptr */
442 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, 8);
444 /* Arg4 is the trampoline address */
445 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, 8);
447 if (aot) {
448 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
449 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
450 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
451 } else {
452 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
453 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
455 amd64_call_reg (code, AMD64_RAX);
457 /* Check for thread interruption */
458 /* This is not perf critical code so no need to check the interrupt flag */
460 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
462 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, 8);
463 if (aot) {
464 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
465 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
466 } else {
467 amd64_mov_reg_imm (code, AMD64_RAX, (guint8*)mono_thread_force_interruption_checkpoint);
469 amd64_call_reg (code, AMD64_RAX);
470 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, 8);
472 /* Restore LMF */
474 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
475 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, 8);
476 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
477 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
480 * Save rax to the stack, after the leave instruction, this will become part of
481 * the red zone.
483 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, 8);
485 /* Restore argument registers, r10 (needed to pass rgctx to
486 static shared generic methods), r11 (imt register for
487 interface calls), and rax (needed for direct calls to C vararg functions). */
488 for (i = 0; i < AMD64_NREG; ++i)
489 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11 || i == AMD64_RAX)
490 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
492 for (i = 0; i < 8; ++i)
493 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
495 /* Restore stack */
496 amd64_leave (code);
498 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
499 /* Load result */
500 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - 0x8, 8);
501 amd64_ret (code);
502 } else {
503 /* call the compiled method using the saved rax */
504 amd64_jump_membase (code, AMD64_RSP, rax_offset - 0x8);
507 g_assert ((code - buf) <= 538);
509 mono_arch_flush_icache (buf, code - buf);
511 *code_size = code - buf;
513 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
514 guint32 code_len;
516 /* Initialize the nullified class init trampoline used in the AOT case */
517 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
520 *out_unwind_ops = unwind_ops;
522 return buf;
525 gpointer
526 mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
528 guint8 *code, *buf;
530 code = buf = mono_global_codeman_reserve (16);
531 amd64_ret (code);
533 mono_arch_flush_icache (buf, code - buf);
535 *code_len = code - buf;
537 return buf;
540 gpointer
541 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
543 guint8 *code, *buf, *tramp;
544 int size;
546 tramp = mono_get_trampoline_code (tramp_type);
548 if ((((guint64)arg1) >> 32) == 0)
549 size = 5 + 1 + 4;
550 else
551 size = 5 + 1 + 8;
553 code = buf = mono_domain_code_reserve_align (domain, size, 1);
555 amd64_call_code (code, tramp);
556 /* The trampoline code will obtain the argument from the instruction stream */
557 if ((((guint64)arg1) >> 32) == 0) {
558 *code = 0x4;
559 *(guint32*)(code + 1) = (gint64)arg1;
560 code += 5;
561 } else {
562 *code = 0x8;
563 *(guint64*)(code + 1) = (gint64)arg1;
564 code += 9;
567 g_assert ((code - buf) <= size);
569 if (code_len)
570 *code_len = size;
572 mono_arch_flush_icache (buf, size);
574 return buf;
577 gpointer
578 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
580 guint32 code_size;
581 MonoJumpInfo *ji;
583 return mono_arch_create_rgctx_lazy_fetch_trampoline_full (slot, &code_size, &ji, FALSE);
586 gpointer
587 mono_arch_create_rgctx_lazy_fetch_trampoline_full (guint32 slot, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
589 guint8 *tramp;
590 guint8 *code, *buf;
591 guint8 **rgctx_null_jumps;
592 int tramp_size;
593 int depth, index;
594 int i;
595 gboolean mrgctx;
597 *ji = NULL;
599 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
600 index = MONO_RGCTX_SLOT_INDEX (slot);
601 if (mrgctx)
602 index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
603 for (depth = 0; ; ++depth) {
604 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
606 if (index < size - 1)
607 break;
608 index -= size - 1;
611 tramp_size = 64 + 8 * depth;
613 code = buf = mono_global_codeman_reserve (tramp_size);
615 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
617 if (mrgctx) {
618 /* get mrgctx ptr */
619 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
620 } else {
621 /* load rgctx ptr from vtable */
622 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 8);
623 /* is the rgctx ptr null? */
624 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
625 /* if yes, jump to actual trampoline */
626 rgctx_null_jumps [0] = code;
627 amd64_branch8 (code, X86_CC_Z, -1, 1);
630 for (i = 0; i < depth; ++i) {
631 /* load ptr to next array */
632 if (mrgctx && i == 0)
633 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (MonoMethodRuntimeGenericContext), 8);
634 else
635 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, 8);
636 /* is the ptr null? */
637 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
638 /* if yes, jump to actual trampoline */
639 rgctx_null_jumps [i + 1] = code;
640 amd64_branch8 (code, X86_CC_Z, -1, 1);
643 /* fetch slot */
644 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), 8);
645 /* is the slot null? */
646 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
647 /* if yes, jump to actual trampoline */
648 rgctx_null_jumps [depth + 1] = code;
649 amd64_branch8 (code, X86_CC_Z, -1, 1);
650 /* otherwise return */
651 amd64_ret (code);
653 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
654 x86_patch (rgctx_null_jumps [i], code);
656 g_free (rgctx_null_jumps);
658 /* move the rgctx pointer to the VTABLE register */
659 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, 8);
661 if (aot) {
662 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
663 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
664 amd64_jump_reg (code, AMD64_R11);
665 } else {
666 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
668 /* jump to the actual trampoline */
669 amd64_jump_code (code, tramp);
672 mono_arch_flush_icache (buf, code - buf);
674 g_assert (code - buf <= tramp_size);
676 *code_size = code - buf;
678 return buf;
681 gpointer
682 mono_arch_create_generic_class_init_trampoline (void)
684 guint32 code_size;
685 MonoJumpInfo *ji;
687 return mono_arch_create_generic_class_init_trampoline_full (&code_size, &ji, FALSE);
690 gpointer
691 mono_arch_create_generic_class_init_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
693 guint8 *tramp;
694 guint8 *code, *buf;
695 static int byte_offset = -1;
696 static guint8 bitmask;
697 guint8 *jump;
698 int tramp_size;
700 *ji = NULL;
702 tramp_size = 64;
704 code = buf = mono_global_codeman_reserve (tramp_size);
706 if (byte_offset < 0)
707 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
709 amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
710 jump = code;
711 amd64_branch8 (code, X86_CC_Z, -1, 1);
713 amd64_ret (code);
715 x86_patch (jump, code);
717 if (aot) {
718 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
719 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
720 amd64_jump_reg (code, AMD64_R11);
721 } else {
722 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
724 /* jump to the actual trampoline */
725 amd64_jump_code (code, tramp);
728 mono_arch_flush_icache (buf, code - buf);
730 g_assert (code - buf <= tramp_size);
732 *code_size = code - buf;
734 return buf;
737 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
739 gpointer
740 mono_arch_create_monitor_enter_trampoline (void)
742 guint32 code_size;
743 MonoJumpInfo *ji;
745 return mono_arch_create_monitor_enter_trampoline_full (&code_size, &ji, FALSE);
748 gpointer
749 mono_arch_create_monitor_enter_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
752 guint8 *tramp;
753 guint8 *code, *buf;
754 guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid;
755 int tramp_size;
756 int owner_offset, nest_offset, dummy;
758 *ji = NULL;
760 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
762 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
763 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
764 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
765 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
766 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
768 tramp_size = 96;
770 code = buf = mono_global_codeman_reserve (tramp_size);
772 if (mono_thread_get_tls_offset () != -1) {
773 /* MonoObject* obj is in RDI */
774 /* is obj null? */
775 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
776 /* if yes, jump to actual trampoline */
777 jump_obj_null = code;
778 amd64_branch8 (code, X86_CC_Z, -1, 1);
780 /* load obj->synchronization to RCX */
781 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
782 /* is synchronization null? */
783 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
784 /* if yes, jump to actual trampoline */
785 jump_sync_null = code;
786 amd64_branch8 (code, X86_CC_Z, -1, 1);
788 /* load MonoThread* into RDX */
789 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
790 /* load TID into RDX */
791 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoThread, tid), 8);
793 /* is synchronization->owner null? */
794 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
795 /* if not, jump to next case */
796 jump_tid = code;
797 amd64_branch8 (code, X86_CC_NZ, -1, 1);
799 /* if yes, try a compare-exchange with the TID */
800 /* zero RAX */
801 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
802 /* compare and exchange */
803 amd64_prefix (code, X86_LOCK_PREFIX);
804 amd64_cmpxchg_membase_reg_size (code, AMD64_RCX, owner_offset, AMD64_RDX, 8);
805 /* if not successful, jump to actual trampoline */
806 jump_cmpxchg_failed = code;
807 amd64_branch8 (code, X86_CC_NZ, -1, 1);
808 /* if successful, return */
809 amd64_ret (code);
811 /* next case: synchronization->owner is not null */
812 x86_patch (jump_tid, code);
813 /* is synchronization->owner == TID? */
814 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
815 /* if not, jump to actual trampoline */
816 jump_other_owner = code;
817 amd64_branch8 (code, X86_CC_NZ, -1, 1);
818 /* if yes, increment nest */
819 amd64_inc_membase_size (code, AMD64_RCX, nest_offset, 4);
820 /* return */
821 amd64_ret (code);
823 x86_patch (jump_obj_null, code);
824 x86_patch (jump_sync_null, code);
825 x86_patch (jump_cmpxchg_failed, code);
826 x86_patch (jump_other_owner, code);
829 /* jump to the actual trampoline */
830 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
831 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
832 #endif
834 if (aot) {
835 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
836 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
837 amd64_jump_reg (code, AMD64_R11);
838 } else {
839 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
841 /* jump to the actual trampoline */
842 amd64_jump_code (code, tramp);
845 mono_arch_flush_icache (code, code - buf);
846 g_assert (code - buf <= tramp_size);
848 *code_size = code - buf;
850 return buf;
853 gpointer
854 mono_arch_create_monitor_exit_trampoline (void)
856 guint32 code_size;
857 MonoJumpInfo *ji;
859 return mono_arch_create_monitor_exit_trampoline_full (&code_size, &ji, FALSE);
862 gpointer
863 mono_arch_create_monitor_exit_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
865 guint8 *tramp;
866 guint8 *code, *buf;
867 guint8 *jump_obj_null, *jump_have_waiters;
868 guint8 *jump_next;
869 int tramp_size;
870 int owner_offset, nest_offset, entry_count_offset;
872 *ji = NULL;
874 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
876 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
877 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
878 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
879 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
880 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
881 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
882 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
884 tramp_size = 94;
886 code = buf = mono_global_codeman_reserve (tramp_size);
888 if (mono_thread_get_tls_offset () != -1) {
889 /* MonoObject* obj is in RDI */
890 /* is obj null? */
891 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
892 /* if yes, jump to actual trampoline */
893 jump_obj_null = code;
894 amd64_branch8 (code, X86_CC_Z, -1, 1);
896 /* load obj->synchronization to RCX */
897 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
898 /* is synchronization null? */
899 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
900 /* if not, jump to next case */
901 jump_next = code;
902 amd64_branch8 (code, X86_CC_NZ, -1, 1);
903 /* if yes, just return */
904 amd64_ret (code);
906 /* next case: synchronization is not null */
907 x86_patch (jump_next, code);
908 /* load MonoThread* into RDX */
909 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
910 /* load TID into RDX */
911 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoThread, tid), 8);
912 /* is synchronization->owner == TID */
913 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
914 /* if yes, jump to next case */
915 jump_next = code;
916 amd64_branch8 (code, X86_CC_Z, -1, 1);
917 /* if not, just return */
918 amd64_ret (code);
920 /* next case: synchronization->owner == TID */
921 x86_patch (jump_next, code);
922 /* is synchronization->nest == 1 */
923 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, nest_offset, 1, 4);
924 /* if not, jump to next case */
925 jump_next = code;
926 amd64_branch8 (code, X86_CC_NZ, -1, 1);
927 /* if yes, is synchronization->entry_count zero? */
928 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, entry_count_offset, 0, 4);
929 /* if not, jump to actual trampoline */
930 jump_have_waiters = code;
931 amd64_branch8 (code, X86_CC_NZ, -1 , 1);
932 /* if yes, set synchronization->owner to null and return */
933 amd64_mov_membase_imm (code, AMD64_RCX, owner_offset, 0, 8);
934 amd64_ret (code);
936 /* next case: synchronization->nest is not 1 */
937 x86_patch (jump_next, code);
938 /* decrease synchronization->nest and return */
939 amd64_dec_membase_size (code, AMD64_RCX, nest_offset, 4);
940 amd64_ret (code);
942 x86_patch (jump_obj_null, code);
943 x86_patch (jump_have_waiters, code);
946 /* jump to the actual trampoline */
947 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
948 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
949 #endif
951 if (aot) {
952 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
953 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
954 amd64_jump_reg (code, AMD64_R11);
955 } else {
956 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
957 amd64_jump_code (code, tramp);
960 mono_arch_flush_icache (code, code - buf);
961 g_assert (code - buf <= tramp_size);
963 *code_size = code - buf;
965 return buf;
967 #endif
969 void
970 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
972 /* FIXME: This is not thread safe */
973 guint8 *code = ji->code_start;
975 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
976 amd64_mov_reg_imm (code, AMD64_R11, func);
978 x86_push_imm (code, (guint64)func_arg);
979 amd64_call_reg (code, AMD64_R11);