Make sure x86 ATOMIC_CAS doesn't overwrite its own operands.
[mono-debugger.git] / mono / mini / exceptions-amd64.c
blobc31987bf2a89dbf39f6e802e7cd56a1273de95f1
1 /*
2 * exceptions-amd64.c: exception support for AMD64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
12 #include <signal.h>
13 #include <string.h>
14 #ifdef HAVE_UCONTEXT_H
15 #include <ucontext.h>
16 #endif
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
29 #include "mini.h"
30 #include "mini-amd64.h"
31 #include "debug-mini.h"
33 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
35 #ifdef PLATFORM_WIN32
36 static MonoW32ExceptionHandler fpe_handler;
37 static MonoW32ExceptionHandler ill_handler;
38 static MonoW32ExceptionHandler segv_handler;
40 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
42 #define W32_SEH_HANDLE_EX(_ex) \
43 if (_ex##_handler) _ex##_handler((int)sctx)
46 * Unhandled Exception Filter
47 * Top-level per-process exception handler.
49 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
51 EXCEPTION_RECORD* er;
52 CONTEXT* ctx;
53 MonoContext* sctx;
54 LONG res;
56 res = EXCEPTION_CONTINUE_EXECUTION;
58 er = ep->ExceptionRecord;
59 ctx = ep->ContextRecord;
60 sctx = g_malloc(sizeof(MonoContext));
62 /* Copy Win32 context to UNIX style context */
63 sctx->rax = ctx->Rax;
64 sctx->rbx = ctx->Rbx;
65 sctx->rcx = ctx->Rcx;
66 sctx->rdx = ctx->Rdx;
67 sctx->rbp = ctx->Rbp;
68 sctx->rsp = ctx->Rsp;
69 sctx->rsi = ctx->Rsi;
70 sctx->rdi = ctx->Rdi;
71 sctx->rip = ctx->Rip;
72 sctx->r12 = ctx->R12;
73 sctx->r13 = ctx->R13;
74 sctx->r14 = ctx->R14;
75 sctx->r15 = ctx->R15;
77 switch (er->ExceptionCode) {
78 case EXCEPTION_ACCESS_VIOLATION:
79 W32_SEH_HANDLE_EX(segv);
80 break;
81 case EXCEPTION_ILLEGAL_INSTRUCTION:
82 W32_SEH_HANDLE_EX(ill);
83 break;
84 case EXCEPTION_INT_DIVIDE_BY_ZERO:
85 case EXCEPTION_INT_OVERFLOW:
86 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
87 case EXCEPTION_FLT_OVERFLOW:
88 case EXCEPTION_FLT_UNDERFLOW:
89 case EXCEPTION_FLT_INEXACT_RESULT:
90 W32_SEH_HANDLE_EX(fpe);
91 break;
92 default:
93 break;
96 /* Copy context back */
97 /* Nonvolatile */
98 ctx->Rsp = sctx->rsp;
99 ctx->Rdi = sctx->rdi;
100 ctx->Rsi = sctx->rsi;
101 ctx->Rbx = sctx->rbx;
102 ctx->Rbp = sctx->rbp;
103 ctx->R12 = sctx->r12;
104 ctx->R13 = sctx->r13;
105 ctx->R14 = sctx->r14;
106 ctx->R15 = sctx->r15;
107 ctx->Rip = sctx->rip;
109 /* Volatile But should not matter?*/
110 ctx->Rax = sctx->rax;
111 ctx->Rcx = sctx->rcx;
112 ctx->Rdx = sctx->rdx;
114 g_free (sctx);
116 return res;
119 void win32_seh_init()
121 old_handler = SetUnhandledExceptionFilter(seh_handler);
124 void win32_seh_cleanup()
126 if (old_handler) SetUnhandledExceptionFilter(old_handler);
129 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
131 switch (type) {
132 case SIGFPE:
133 fpe_handler = handler;
134 break;
135 case SIGILL:
136 ill_handler = handler;
137 break;
138 case SIGSEGV:
139 segv_handler = handler;
140 break;
141 default:
142 break;
146 #endif /* PLATFORM_WIN32 */
149 * mono_arch_get_restore_context:
151 * Returns a pointer to a method which restores a previously saved sigcontext.
153 gpointer
154 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
156 guint8 *start = NULL;
157 guint8 *code;
159 /* restore_contect (MonoContext *ctx) */
161 *ji = NULL;
163 start = code = mono_global_codeman_reserve (256);
165 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
167 /* Restore all registers except %rip and %r11 */
168 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
169 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
170 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
171 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
173 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
174 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
175 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
176 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
177 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
178 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
179 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
180 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
181 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
183 if (mono_running_on_valgrind ()) {
184 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
185 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
186 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
187 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
188 } else {
189 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
190 /* get return address */
191 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
194 /* jump to the saved IP */
195 amd64_jump_reg (code, AMD64_R11);
197 mono_arch_flush_icache (start, code - start);
199 *code_size = code - start;
201 return start;
205 * mono_arch_get_call_filter:
207 * Returns a pointer to a method which calls an exception filter. We
208 * also use this function to call finally handlers (we pass NULL as
209 * @exc object in this case).
211 gpointer
212 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
214 guint8 *start;
215 int i;
216 guint8 *code;
217 guint32 pos;
219 *ji = NULL;
221 start = code = mono_global_codeman_reserve (128);
223 /* call_filter (MonoContext *ctx, unsigned long eip) */
224 code = start;
226 /* Alloc new frame */
227 amd64_push_reg (code, AMD64_RBP);
228 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
230 /* Save callee saved regs */
231 pos = 0;
232 for (i = 0; i < AMD64_NREG; ++i)
233 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
234 amd64_push_reg (code, i);
235 pos += 8;
238 /* Save EBP */
239 pos += 8;
240 amd64_push_reg (code, AMD64_RBP);
242 /* Make stack misaligned, the call will make it aligned again */
243 if (! (pos & 8))
244 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
246 /* set new EBP */
247 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
248 /* load callee saved regs */
249 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
250 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
251 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
252 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
253 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
254 #ifdef PLATFORM_WIN32
255 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
256 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
257 #endif
259 /* call the handler */
260 amd64_call_reg (code, AMD64_ARG_REG2);
262 if (! (pos & 8))
263 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
265 /* restore RBP */
266 amd64_pop_reg (code, AMD64_RBP);
268 /* Restore callee saved regs */
269 for (i = AMD64_NREG; i >= 0; --i)
270 if (AMD64_IS_CALLEE_SAVED_REG (i))
271 amd64_pop_reg (code, i);
273 amd64_leave (code);
274 amd64_ret (code);
276 g_assert ((code - start) < 128);
278 mono_arch_flush_icache (start, code - start);
280 *code_size = code - start;
282 return start;
286 * The first few arguments are dummy, to force the other arguments to be passed on
287 * the stack, this avoids overwriting the argument registers in the throw trampoline.
289 void
290 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
291 guint64 dummy5, guint64 dummy6,
292 MonoObject *exc, guint64 rip, guint64 rsp,
293 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
294 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
295 guint64 rax, guint64 rcx, guint64 rdx,
296 guint64 rethrow)
298 static void (*restore_context) (MonoContext *);
299 MonoContext ctx;
301 if (!restore_context)
302 restore_context = mono_get_restore_context ();
304 ctx.rsp = rsp;
305 ctx.rip = rip;
306 ctx.rbx = rbx;
307 ctx.rbp = rbp;
308 ctx.r12 = r12;
309 ctx.r13 = r13;
310 ctx.r14 = r14;
311 ctx.r15 = r15;
312 ctx.rdi = rdi;
313 ctx.rsi = rsi;
314 ctx.rax = rax;
315 ctx.rcx = rcx;
316 ctx.rdx = rdx;
318 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
319 MonoException *mono_ex = (MonoException*)exc;
320 if (!rethrow)
321 mono_ex->stack_trace = NULL;
324 if (mono_debug_using_mono_debugger ()) {
325 guint8 buf [16], *code;
327 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
328 code = buf + 8;
330 if (buf [3] == 0xe8) {
331 MonoContext ctx_cp = ctx;
332 ctx_cp.rip = rip - 5;
334 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
335 restore_context (&ctx_cp);
336 g_assert_not_reached ();
341 /* adjust eip so that it point into the call instruction */
342 ctx.rip -= 1;
344 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
345 restore_context (&ctx);
347 g_assert_not_reached ();
350 static gpointer
351 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
353 guint8* start;
354 guint8 *code;
356 start = code = mono_global_codeman_reserve (64);
358 code = start;
360 *ji = NULL;
362 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
364 /* reverse order */
365 amd64_push_imm (code, rethrow);
366 amd64_push_reg (code, AMD64_RDX);
367 amd64_push_reg (code, AMD64_RCX);
368 amd64_push_reg (code, AMD64_RAX);
369 amd64_push_reg (code, AMD64_RSI);
370 amd64_push_reg (code, AMD64_RDI);
371 amd64_push_reg (code, AMD64_R15);
372 amd64_push_reg (code, AMD64_R14);
373 amd64_push_reg (code, AMD64_R13);
374 amd64_push_reg (code, AMD64_R12);
375 amd64_push_reg (code, AMD64_RBP);
376 amd64_push_reg (code, AMD64_RBX);
378 /* SP */
379 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
380 amd64_push_reg (code, AMD64_RAX);
382 /* IP */
383 amd64_push_membase (code, AMD64_R11, 0);
385 /* Exception */
386 amd64_push_reg (code, AMD64_ARG_REG1);
388 #ifdef PLATFORM_WIN32
389 /* align stack */
390 amd64_push_imm (code, 0);
391 amd64_push_imm (code, 0);
392 amd64_push_imm (code, 0);
393 amd64_push_imm (code, 0);
394 amd64_push_imm (code, 0);
395 amd64_push_imm (code, 0);
396 #endif
398 if (aot) {
399 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
400 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
401 } else {
402 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
404 amd64_call_reg (code, AMD64_R11);
405 amd64_breakpoint (code);
407 mono_arch_flush_icache (start, code - start);
409 g_assert ((code - start) < 64);
411 *code_size = code - start;
413 return start;
417 * mono_arch_get_throw_exception:
419 * Returns a function pointer which can be used to raise
420 * exceptions. The returned function has the following
421 * signature: void (*func) (MonoException *exc);
424 gpointer
425 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
427 return get_throw_trampoline (FALSE, code_size, ji, aot);
430 gpointer
431 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
433 return get_throw_trampoline (TRUE, code_size, ji, aot);
436 gpointer
437 mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
439 guint8* start;
440 guint8 *code;
442 start = code = mono_global_codeman_reserve (64);
444 *ji = NULL;
446 /* Not used on amd64 */
447 amd64_breakpoint (code);
449 mono_arch_flush_icache (start, code - start);
451 *code_size = code - start;
453 return start;
457 * mono_arch_get_throw_corlib_exception:
459 * Returns a function pointer which can be used to raise
460 * corlib exceptions. The returned function has the following
461 * signature: void (*func) (guint32 ex_token, guint32 offset);
462 * Here, offset is the offset which needs to be substracted from the caller IP
463 * to get the IP of the throw. Passing the offset has the advantage that it
464 * needs no relocations in the caller.
466 gpointer
467 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
469 static guint8* start;
470 guint8 *code;
471 guint64 throw_ex;
473 start = code = mono_global_codeman_reserve (64);
475 *ji = NULL;
477 /* Push throw_ip */
478 amd64_push_reg (code, AMD64_ARG_REG2);
480 /* Call exception_from_token */
481 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
482 if (aot) {
483 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
484 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
485 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
486 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
487 } else {
488 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
489 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
491 #ifdef PLATFORM_WIN32
492 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
493 #endif
494 amd64_call_reg (code, AMD64_R11);
495 #ifdef PLATFORM_WIN32
496 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
497 #endif
499 /* Compute throw_ip */
500 amd64_pop_reg (code, AMD64_ARG_REG2);
501 /* return addr */
502 amd64_pop_reg (code, AMD64_ARG_REG3);
503 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
505 /* Put the throw_ip at the top of the misaligned stack */
506 amd64_push_reg (code, AMD64_ARG_REG3);
508 throw_ex = (guint64)mono_get_throw_exception ();
510 /* Call throw_exception */
511 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
512 if (aot) {
513 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
514 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
515 } else {
516 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
518 /* The original IP is on the stack */
519 amd64_jump_reg (code, AMD64_R11);
521 g_assert ((code - start) < 64);
523 mono_arch_flush_icache (start, code - start);
525 *code_size = code - start;
527 return start;
530 /* mono_arch_find_jit_info:
532 * This function is used to gather information from @ctx. It return the
533 * MonoJitInfo of the corresponding function, unwinds one stack frame and
534 * stores the resulting context into @new_ctx. It also stores a string
535 * describing the stack location into @trace (if not NULL), and modifies
536 * the @lmf if necessary. @native_offset return the IP offset from the
537 * start of the function or -1 if that info is not available.
539 MonoJitInfo *
540 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
541 MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed)
543 MonoJitInfo *ji;
544 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
546 /* Avoid costly table lookup during stack overflow */
547 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
548 ji = prev_ji;
549 else
550 ji = mono_jit_info_table_find (domain, ip);
552 if (managed)
553 *managed = FALSE;
555 *new_ctx = *ctx;
557 if (ji != NULL) {
558 gssize regs [MONO_MAX_IREGS + 1];
559 guint8 *cfa;
560 guint32 unwind_info_len;
561 guint8 *unwind_info;
563 if (managed)
564 if (!ji->method->wrapper_type)
565 *managed = TRUE;
567 if (ji->from_aot)
568 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
569 else
570 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
572 regs [AMD64_RAX] = new_ctx->rax;
573 regs [AMD64_RBX] = new_ctx->rbx;
574 regs [AMD64_RCX] = new_ctx->rcx;
575 regs [AMD64_RDX] = new_ctx->rdx;
576 regs [AMD64_RBP] = new_ctx->rbp;
577 regs [AMD64_RSP] = new_ctx->rsp;
578 regs [AMD64_RSI] = new_ctx->rsi;
579 regs [AMD64_RDI] = new_ctx->rdi;
580 regs [AMD64_RIP] = new_ctx->rip;
581 regs [AMD64_R12] = new_ctx->r12;
582 regs [AMD64_R13] = new_ctx->r13;
583 regs [AMD64_R14] = new_ctx->r14;
584 regs [AMD64_R15] = new_ctx->r15;
586 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
587 (guint8*)ji->code_start + ji->code_size,
588 ip, regs, MONO_MAX_IREGS + 1, &cfa);
590 new_ctx->rax = regs [AMD64_RAX];
591 new_ctx->rbx = regs [AMD64_RBX];
592 new_ctx->rcx = regs [AMD64_RCX];
593 new_ctx->rdx = regs [AMD64_RDX];
594 new_ctx->rbp = regs [AMD64_RBP];
595 new_ctx->rsp = regs [AMD64_RSP];
596 new_ctx->rsi = regs [AMD64_RSI];
597 new_ctx->rdi = regs [AMD64_RDI];
598 new_ctx->rip = regs [AMD64_RIP];
599 new_ctx->r12 = regs [AMD64_R12];
600 new_ctx->r13 = regs [AMD64_R13];
601 new_ctx->r14 = regs [AMD64_R14];
602 new_ctx->r15 = regs [AMD64_R15];
604 /* The CFA becomes the new SP value */
605 new_ctx->rsp = (gssize)cfa;
607 /* Adjust IP */
608 new_ctx->rip --;
610 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
611 /* remove any unused lmf */
612 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
615 /* Pop arguments off the stack */
617 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
619 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
620 new_ctx->rsp += stack_to_pop;
623 return ji;
624 } else if (*lmf) {
625 guint64 rip;
627 if (((guint64)(*lmf)->previous_lmf) & 1) {
628 /* This LMF has the rip field set */
629 rip = (*lmf)->rip;
630 } else if ((*lmf)->rsp == 0) {
631 /* Top LMF entry */
632 return (gpointer)-1;
633 } else {
635 * The rsp field is set just before the call which transitioned to native
636 * code. Obtain the rip from the stack.
638 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
641 ji = mono_jit_info_table_find (domain, (gpointer)rip);
642 if (!ji) {
643 // FIXME: This can happen with multiple appdomains (bug #444383)
644 return (gpointer)-1;
647 new_ctx->rip = rip;
648 new_ctx->rbp = (*lmf)->rbp;
649 new_ctx->rsp = (*lmf)->rsp;
651 new_ctx->rbx = (*lmf)->rbx;
652 new_ctx->r12 = (*lmf)->r12;
653 new_ctx->r13 = (*lmf)->r13;
654 new_ctx->r14 = (*lmf)->r14;
655 new_ctx->r15 = (*lmf)->r15;
656 #ifdef PLATFORM_WIN32
657 new_ctx->rdi = (*lmf)->rdi;
658 new_ctx->rsi = (*lmf)->rsi;
659 #endif
661 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
663 return ji ? ji : res;
666 return NULL;
670 * mono_arch_handle_exception:
672 * @ctx: saved processor state
673 * @obj: the exception object
675 gboolean
676 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
678 MonoContext mctx;
680 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
682 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
683 return TRUE;
685 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
687 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
689 return TRUE;
692 #ifdef MONO_ARCH_USE_SIGACTION
693 static inline guint64*
694 gregs_from_ucontext (ucontext_t *ctx)
696 #ifdef __FreeBSD__
697 guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
698 #else
699 guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
700 #endif
702 return gregs;
704 #endif
705 void
706 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
708 #ifdef MONO_ARCH_USE_SIGACTION
709 ucontext_t *ctx = (ucontext_t*)sigctx;
711 guint64 *gregs = gregs_from_ucontext (ctx);
713 mctx->rax = gregs [REG_RAX];
714 mctx->rbx = gregs [REG_RBX];
715 mctx->rcx = gregs [REG_RCX];
716 mctx->rdx = gregs [REG_RDX];
717 mctx->rbp = gregs [REG_RBP];
718 mctx->rsp = gregs [REG_RSP];
719 mctx->rsi = gregs [REG_RSI];
720 mctx->rdi = gregs [REG_RDI];
721 mctx->rip = gregs [REG_RIP];
722 mctx->r12 = gregs [REG_R12];
723 mctx->r13 = gregs [REG_R13];
724 mctx->r14 = gregs [REG_R14];
725 mctx->r15 = gregs [REG_R15];
726 #else
727 MonoContext *ctx = (MonoContext *)sigctx;
729 mctx->rax = ctx->rax;
730 mctx->rbx = ctx->rbx;
731 mctx->rcx = ctx->rcx;
732 mctx->rdx = ctx->rdx;
733 mctx->rbp = ctx->rbp;
734 mctx->rsp = ctx->rsp;
735 mctx->rsi = ctx->rsi;
736 mctx->rdi = ctx->rdi;
737 mctx->rip = ctx->rip;
738 mctx->r12 = ctx->r12;
739 mctx->r13 = ctx->r13;
740 mctx->r14 = ctx->r14;
741 mctx->r15 = ctx->r15;
742 #endif
745 void
746 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
748 #ifdef MONO_ARCH_USE_SIGACTION
749 ucontext_t *ctx = (ucontext_t*)sigctx;
751 guint64 *gregs = gregs_from_ucontext (ctx);
753 gregs [REG_RAX] = mctx->rax;
754 gregs [REG_RBX] = mctx->rbx;
755 gregs [REG_RCX] = mctx->rcx;
756 gregs [REG_RDX] = mctx->rdx;
757 gregs [REG_RBP] = mctx->rbp;
758 gregs [REG_RSP] = mctx->rsp;
759 gregs [REG_RSI] = mctx->rsi;
760 gregs [REG_RDI] = mctx->rdi;
761 gregs [REG_RIP] = mctx->rip;
762 gregs [REG_R12] = mctx->r12;
763 gregs [REG_R13] = mctx->r13;
764 gregs [REG_R14] = mctx->r14;
765 gregs [REG_R15] = mctx->r15;
766 #else
767 MonoContext *ctx = (MonoContext *)sigctx;
769 ctx->rax = mctx->rax;
770 ctx->rbx = mctx->rbx;
771 ctx->rcx = mctx->rcx;
772 ctx->rdx = mctx->rdx;
773 ctx->rbp = mctx->rbp;
774 ctx->rsp = mctx->rsp;
775 ctx->rsi = mctx->rsi;
776 ctx->rdi = mctx->rdi;
777 ctx->rip = mctx->rip;
778 ctx->r12 = mctx->r12;
779 ctx->r13 = mctx->r13;
780 ctx->r14 = mctx->r14;
781 ctx->r15 = mctx->r15;
782 #endif
785 gpointer
786 mono_arch_ip_from_context (void *sigctx)
789 #ifdef MONO_ARCH_USE_SIGACTION
791 ucontext_t *ctx = (ucontext_t*)sigctx;
793 guint64 *gregs = gregs_from_ucontext (ctx);
795 return (gpointer)gregs [REG_RIP];
796 #else
797 MonoContext *ctx = sigctx;
798 return (gpointer)ctx->rip;
799 #endif
802 static void
803 restore_soft_guard_pages (void)
805 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
806 if (jit_tls->stack_ovf_guard_base)
807 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
811 * this function modifies mctx so that when it is restored, it
812 * won't execcute starting at mctx.eip, but in a function that
813 * will restore the protection on the soft-guard pages and return back to
814 * continue at mctx.eip.
816 static void
817 prepare_for_guard_pages (MonoContext *mctx)
819 gpointer *sp;
820 sp = (gpointer)(mctx->rsp);
821 sp -= 1;
822 /* the return addr */
823 sp [0] = (gpointer)(mctx->rip);
824 mctx->rip = (guint64)restore_soft_guard_pages;
825 mctx->rsp = (guint64)sp;
828 static void
829 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
831 void (*restore_context) (MonoContext *);
832 MonoContext mctx;
834 restore_context = mono_get_restore_context ();
835 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
837 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
838 if (stack_ovf)
839 prepare_for_guard_pages (&mctx);
840 restore_context (&mctx);
843 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
844 if (stack_ovf)
845 prepare_for_guard_pages (&mctx);
846 restore_context (&mctx);
849 void
850 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
852 #ifdef MONO_ARCH_USE_SIGACTION
853 MonoException *exc = NULL;
854 ucontext_t *ctx = (ucontext_t*)sigctx;
855 guint64 *gregs = gregs_from_ucontext (ctx);
856 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
857 gpointer *sp;
858 int frame_size;
860 if (stack_ovf)
861 exc = mono_domain_get ()->stack_overflow_ex;
862 if (!ji)
863 mono_handle_native_sigsegv (SIGSEGV, sigctx);
865 /* setup a call frame on the real stack so that control is returned there
866 * and exception handling can continue.
867 * The frame looks like:
868 * ucontext struct
869 * ...
870 * return ip
871 * 128 is the size of the red zone
873 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
874 frame_size += 15;
875 frame_size &= ~15;
876 sp = (gpointer)(gregs [REG_RSP] & ~15);
877 sp = (gpointer)((char*)sp - frame_size);
878 /* the arguments must be aligned */
879 sp [-1] = (gpointer)gregs [REG_RIP];
880 /* may need to adjust pointers in the new struct copy, depending on the OS */
881 memcpy (sp + 4, ctx, sizeof (ucontext_t));
882 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
883 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
884 gregs [REG_RSP] = (unsigned long)(sp - 1);
885 gregs [REG_RDI] = (unsigned long)(sp + 4);
886 gregs [REG_RSI] = (guint64)exc;
887 gregs [REG_RDX] = stack_ovf;
888 #endif
891 guint64
892 mono_amd64_get_original_ip (void)
894 MonoLMF *lmf = mono_get_lmf ();
896 g_assert (lmf);
898 /* Reset the change to previous_lmf */
899 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
901 return lmf->rip;
904 gpointer
905 mono_arch_get_throw_pending_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
907 guint8 *code, *start;
908 guint8 *br[1];
909 gpointer throw_trampoline;
911 *ji = NULL;
913 start = code = mono_global_codeman_reserve (128);
915 /* We are in the frame of a managed method after a call */
917 * We would like to throw the pending exception in such a way that it looks to
918 * be thrown from the managed method.
921 /* Save registers which might contain the return value of the call */
922 amd64_push_reg (code, AMD64_RAX);
923 amd64_push_reg (code, AMD64_RDX);
925 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
926 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
928 /* Align stack */
929 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
931 /* Obtain the pending exception */
932 if (aot) {
933 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
934 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
935 } else {
936 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
938 amd64_call_reg (code, AMD64_R11);
940 /* Check if it is NULL, and branch */
941 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
942 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
944 /* exc != NULL branch */
946 /* Save the exc on the stack */
947 amd64_push_reg (code, AMD64_RAX);
948 /* Align stack */
949 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
951 /* Obtain the original ip and clear the flag in previous_lmf */
952 if (aot) {
953 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
954 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
955 } else {
956 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
958 amd64_call_reg (code, AMD64_R11);
960 /* Load exc */
961 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
963 /* Pop saved stuff from the stack */
964 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
966 /* Setup arguments for the throw trampoline */
967 /* Exception */
968 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
969 /* The trampoline expects the caller ip to be pushed on the stack */
970 amd64_push_reg (code, AMD64_RAX);
972 /* Call the throw trampoline */
973 if (aot) {
974 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
975 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
976 } else {
977 throw_trampoline = mono_get_throw_exception ();
978 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
980 /* We use a jump instead of a call so we can push the original ip on the stack */
981 amd64_jump_reg (code, AMD64_R11);
983 /* ex == NULL branch */
984 mono_amd64_patch (br [0], code);
986 /* Obtain the original ip and clear the flag in previous_lmf */
987 if (aot) {
988 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
989 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
990 } else {
991 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
993 amd64_call_reg (code, AMD64_R11);
994 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
996 /* Restore registers */
997 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
998 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
999 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1000 amd64_pop_reg (code, AMD64_RDX);
1001 amd64_pop_reg (code, AMD64_RAX);
1003 /* Return to original code */
1004 amd64_jump_reg (code, AMD64_R11);
1006 g_assert ((code - start) < 128);
1008 *code_size = code - start;
1010 return start;
1013 static gpointer throw_pending_exception;
1016 * Called when a thread receives an async exception while executing unmanaged code.
1017 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1018 * the return address on the stack to point to a helper routine which throws the
1019 * exception.
1021 void
1022 mono_arch_notify_pending_exc (void)
1024 MonoLMF *lmf = mono_get_lmf ();
1026 if (lmf->rsp == 0)
1027 /* Initial LMF */
1028 return;
1030 if ((guint64)lmf->previous_lmf & 1)
1031 /* Already hijacked or trampoline LMF entry */
1032 return;
1034 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1035 lmf->rip = *(guint64*)(lmf->rsp - 8);
1036 /* Signal that lmf->rip is set */
1037 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1039 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1042 void
1043 mono_arch_exceptions_init (void)
1045 guint32 code_size;
1046 MonoJumpInfo *ji;
1048 if (mono_aot_only) {
1049 throw_pending_exception = mono_aot_get_named_code ("throw_pending_exception");
1050 } else {
1051 /* Call this to avoid initialization races */
1052 throw_pending_exception = mono_arch_get_throw_pending_exception_full (&code_size, &ji, FALSE);
1056 #ifdef PLATFORM_WIN32
1059 * The mono_arch_unwindinfo* methods are used to build and add
1060 * function table info for each emitted method from mono. On Winx64
1061 * the seh handler will not be called if the mono methods are not
1062 * added to the function table.
1064 * We should not need to add non-volatile register info to the
1065 * table since mono stores that info elsewhere. (Except for the register
1066 * used for the fp.)
1069 #define MONO_MAX_UNWIND_CODES 22
1071 typedef union _UNWIND_CODE {
1072 struct {
1073 guchar CodeOffset;
1074 guchar UnwindOp : 4;
1075 guchar OpInfo : 4;
1077 gushort FrameOffset;
1078 } UNWIND_CODE, *PUNWIND_CODE;
1080 typedef struct _UNWIND_INFO {
1081 guchar Version : 3;
1082 guchar Flags : 5;
1083 guchar SizeOfProlog;
1084 guchar CountOfCodes;
1085 guchar FrameRegister : 4;
1086 guchar FrameOffset : 4;
1087 /* custom size for mono allowing for mono allowing for*/
1088 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1089 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1090 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1091 /*UWOP_PUSH_NONVOL offset = 15-0*/
1092 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1094 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1095 * union {
1096 * OPTIONAL ULONG ExceptionHandler;
1097 * OPTIONAL ULONG FunctionEntry;
1098 * };
1099 * OPTIONAL ULONG ExceptionData[]; */
1100 } UNWIND_INFO, *PUNWIND_INFO;
1102 typedef struct
1104 RUNTIME_FUNCTION runtimeFunction;
1105 UNWIND_INFO unwindInfo;
1106 } MonoUnwindInfo, *PMonoUnwindInfo;
1108 static void
1109 mono_arch_unwindinfo_create (gpointer* monoui)
1111 PMonoUnwindInfo newunwindinfo;
1112 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1113 newunwindinfo->unwindInfo.Version = 1;
1116 void
1117 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1119 PMonoUnwindInfo unwindinfo;
1120 PUNWIND_CODE unwindcode;
1121 guchar codeindex;
1122 if (!*monoui)
1123 mono_arch_unwindinfo_create (monoui);
1125 unwindinfo = (MonoUnwindInfo*)*monoui;
1127 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1128 g_error ("Larger allocation needed for the unwind information.");
1130 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1131 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1132 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1133 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1134 unwindcode->OpInfo = reg;
1136 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1137 g_error ("Adding unwind info in wrong order.");
1139 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1142 void
1143 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1145 PMonoUnwindInfo unwindinfo;
1146 PUNWIND_CODE unwindcode;
1147 guchar codeindex;
1148 if (!*monoui)
1149 mono_arch_unwindinfo_create (monoui);
1151 unwindinfo = (MonoUnwindInfo*)*monoui;
1153 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1154 g_error ("Larger allocation needed for the unwind information.");
1156 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1157 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1158 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1159 unwindcode++;
1160 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1161 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1162 unwindcode->OpInfo = reg;
1164 unwindinfo->unwindInfo.FrameRegister = reg;
1166 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1167 g_error ("Adding unwind info in wrong order.");
1169 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1172 void
1173 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1175 PMonoUnwindInfo unwindinfo;
1176 PUNWIND_CODE unwindcode;
1177 guchar codeindex;
1178 guchar codesneeded;
1179 if (!*monoui)
1180 mono_arch_unwindinfo_create (monoui);
1182 unwindinfo = (MonoUnwindInfo*)*monoui;
1184 if (size < 0x8)
1185 g_error ("Stack allocation must be equal to or greater than 0x8.");
1187 if (size <= 0x80)
1188 codesneeded = 1;
1189 else if (size <= 0x7FFF8)
1190 codesneeded = 2;
1191 else
1192 codesneeded = 3;
1194 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1195 g_error ("Larger allocation needed for the unwind information.");
1197 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1198 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1200 if (codesneeded == 1) {
1201 /*The size of the allocation is
1202 (the number in the OpInfo member) times 8 plus 8*/
1203 unwindcode->OpInfo = (size - 8)/8;
1204 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1206 else {
1207 if (codesneeded == 3) {
1208 /*the unscaled size of the allocation is recorded
1209 in the next two slots in little-endian format*/
1210 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1211 unwindcode += 2;
1212 unwindcode->OpInfo = 1;
1214 else {
1215 /*the size of the allocation divided by 8
1216 is recorded in the next slot*/
1217 unwindcode->FrameOffset = size/8;
1218 unwindcode++;
1219 unwindcode->OpInfo = 0;
1222 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1225 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1227 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1228 g_error ("Adding unwind info in wrong order.");
1230 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1233 guint
1234 mono_arch_unwindinfo_get_size (gpointer monoui)
1236 PMonoUnwindInfo unwindinfo;
1237 if (!monoui)
1238 return 0;
1240 unwindinfo = (MonoUnwindInfo*)monoui;
1241 return (8 + sizeof (MonoUnwindInfo)) -
1242 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1245 PRUNTIME_FUNCTION
1246 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1248 MonoJitInfo *ji;
1249 guint64 pos;
1250 PMonoUnwindInfo targetinfo;
1251 MonoDomain *domain = mono_domain_get ();
1253 ji = mono_jit_info_table_find (domain, (char*)ControlPc);
1254 if (!ji)
1255 return 0;
1257 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1259 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1261 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1263 return &targetinfo->runtimeFunction;
1266 void
1267 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1269 PMonoUnwindInfo unwindinfo, targetinfo;
1270 guchar codecount;
1271 guint64 targetlocation;
1272 if (!*monoui)
1273 return;
1275 unwindinfo = (MonoUnwindInfo*)*monoui;
1276 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1277 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1279 unwindinfo->runtimeFunction.EndAddress = code_size;
1280 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1282 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1284 codecount = unwindinfo->unwindInfo.CountOfCodes;
1285 if (codecount) {
1286 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1287 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1290 g_free (unwindinfo);
1291 *monoui = 0;
1293 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1296 #endif