Make sure x86 ATOMIC_CAS doesn't overwrite its own operands.
[mono-debugger.git] / mono / mini / tramp-ia64.c
blobdb0bf5d61e46e69eac8e237951897285d6b667cd
1 /*
2 * tramp-ia64.c: JIT trampoline code for ia64
4 * Authors:
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/marshal.h>
15 #include <mono/metadata/tabledefs.h>
16 #include <mono/metadata/mono-debug-debugger.h>
17 #include <mono/arch/ia64/ia64-codegen.h>
19 #include "mini.h"
20 #include "mini-ia64.h"
22 #define GP_SCRATCH_REG 31
23 #define GP_SCRATCH_REG2 30
26 * mono_arch_get_unbox_trampoline:
27 * @gsctx: the generic sharing context
28 * @m: method pointer
29 * @addr: pointer to native code for @m
31 * when value type methods are called through the vtable we need to unbox the
32 * this argument. This method returns a pointer to a trampoline which does
33 * unboxing before calling the method
35 gpointer
36 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
38 guint8 *buf;
39 gpointer func_addr, func_gp;
40 Ia64CodegenState code;
41 int this_reg = 0;
42 gpointer *desc;
43 MonoDomain *domain = mono_domain_get ();
45 /* FIXME: Optimize this */
47 if (MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
48 this_reg = 1;
50 func_addr = ((gpointer*)addr) [0];
51 func_gp = ((gpointer*)addr) [1];
53 buf = mono_domain_code_reserve (domain, 256);
55 /* Since the this reg is a stacked register, its a bit hard to access it */
56 ia64_codegen_init (code, buf);
57 ia64_alloc (code, 40, 8, 1, 0, 0);
58 ia64_adds_imm (code, 32 + this_reg, sizeof (MonoObject), 32 + this_reg);
59 ia64_mov_to_ar_i (code, IA64_PFS, 40);
60 ia64_movl (code, GP_SCRATCH_REG, func_addr);
61 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
62 ia64_br_cond_reg (code, IA64_B6);
63 ia64_codegen_close (code);
65 g_assert (code.buf - buf < 256);
67 mono_arch_flush_icache (buf, code.buf - buf);
69 /* FIXME: */
70 desc = g_malloc0 (sizeof (gpointer) * 2);
71 desc [0] = buf;
72 desc [1] = func_gp;
74 return desc;
77 void
78 mono_arch_patch_callsite (guint8 *method_start, guint8 *code, guint8 *addr)
80 guint8 *callsite_begin;
81 guint64 *callsite = (guint64*)(gpointer)(code - 16);
82 guint64 *next_bundle;
83 guint64 ins, instructions [3];
84 guint64 buf [16];
85 Ia64CodegenState gen;
86 gpointer func = ((gpointer*)(gpointer)addr)[0];
88 while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
89 (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
90 callsite -= 2;
91 callsite_begin = (guint8*)callsite;
93 next_bundle = callsite + 2;
94 ins = ia64_bundle_ins1 (next_bundle);
95 if (ia64_ins_opcode (ins) == 5) {
96 /* ld8_inc_imm -> indirect call through a function pointer */
97 g_assert (ia64_ins_r1 (ins) == GP_SCRATCH_REG2);
98 g_assert (ia64_ins_r3 (ins) == GP_SCRATCH_REG);
99 return;
102 /* Patch the code generated by emit_call */
104 instructions [0] = ia64_bundle_ins1 (callsite);
105 instructions [1] = ia64_bundle_ins2 (callsite);
106 instructions [2] = ia64_bundle_ins3 (callsite);
108 ia64_codegen_init (gen, (guint8*)buf);
109 ia64_movl (gen, GP_SCRATCH_REG, func);
110 instructions [1] = gen.instructions [0];
111 instructions [2] = gen.instructions [1];
113 ia64_codegen_init (gen, (guint8*)buf);
114 ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
115 ia64_codegen_close (gen);
117 /* This might not be safe, but not all itanium processors support st16 */
118 callsite [0] = buf [0];
119 callsite [1] = buf [1];
121 mono_arch_flush_icache (callsite_begin, code - callsite_begin);
124 void
125 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
127 g_assert_not_reached ();
130 void
131 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
133 guint8 *callsite_begin;
134 guint64 *callsite = (guint64*)(gpointer)(code - 16);
135 guint64 instructions [3];
136 guint64 buf [16];
137 Ia64CodegenState gen;
139 while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) &&
140 (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS))
141 callsite -= 2;
142 callsite_begin = (guint8*)callsite;
144 /* Replace the code generated by emit_call with a sets of nops */
146 /* The first bundle might have other instructions in it */
147 instructions [0] = ia64_bundle_ins1 (callsite);
148 instructions [1] = IA64_NOP_X;
149 instructions [2] = IA64_NOP_X;
151 ia64_codegen_init (gen, (guint8*)buf);
152 ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]);
153 ia64_codegen_close (gen);
155 /* This might not be safe, but not all itanium processors support st16 */
156 callsite [0] = buf [0];
157 callsite [1] = buf [1];
159 callsite += 2;
161 /* The other bundles can be full replaced with nops */
163 ia64_codegen_init (gen, (guint8*)buf);
164 ia64_emit_bundle_template (&gen, IA64_TEMPLATE_MII, IA64_NOP_M, IA64_NOP_I, IA64_NOP_I);
165 ia64_codegen_close (gen);
167 while ((guint8*)callsite < code) {
168 callsite [0] = buf [0];
169 callsite [1] = buf [1];
170 callsite += 2;
173 mono_arch_flush_icache (callsite_begin, code - callsite_begin);
176 void
177 mono_arch_nullify_plt_entry (guint8 *code)
179 g_assert_not_reached ();
182 guchar*
183 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
185 guint8 *buf, *tramp;
186 int i, offset, saved_regs_offset, saved_fpregs_offset, last_offset, framesize;
187 int in0, local0, out0, l0, l1, l2, l3, l4, l5, l6, l7, l8, o0, o1, o2, o3;
188 gboolean has_caller;
189 Ia64CodegenState code;
190 unw_dyn_info_t *di;
191 unw_dyn_region_info_t *r_pro;
194 * Since jump trampolines are not patched, this trampoline is executed every
195 * time a call is made to a jump trampoline. So we try to keep things faster
196 * in that case.
198 if (tramp_type == MONO_TRAMPOLINE_JUMP)
199 has_caller = FALSE;
200 else
201 has_caller = TRUE;
203 buf = mono_global_codeman_reserve (2048);
205 ia64_codegen_init (code, buf);
207 /* Stacked Registers */
208 in0 = 32;
209 local0 = in0 + 8;
210 out0 = local0 + 16;
211 l0 = 40;
212 l1 = 41;
213 l2 = 42;
214 l3 = 43;
215 l4 = 44;
216 l5 = 45; /* saved ar.pfs */
217 l6 = 46; /* arg */
218 l7 = 47; /* code */
219 l8 = 48; /* saved sp */
220 o0 = out0 + 0; /* regs */
221 o1 = out0 + 1; /* code */
222 o2 = out0 + 2; /* arg */
223 o3 = out0 + 3; /* tramp */
225 framesize = (128 * 8) + 1024;
226 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
229 * Allocate a new register+memory stack frame.
230 * 8 input registers (the max used by the ABI)
231 * 16 locals
232 * 4 output (number of parameters passed to trampoline)
234 ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + l5);
235 ia64_alloc (code, l5, local0 - in0, out0 - local0, 4, 0);
236 ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + l8);
237 ia64_mov (code, l8, IA64_SP);
238 ia64_adds_imm (code, IA64_SP, (-framesize), IA64_SP);
240 offset = 16; /* scratch area */
242 /* Save the argument received from the specific trampoline */
243 ia64_mov (code, l6, GP_SCRATCH_REG);
245 /* Save the calling address */
246 ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + local0 + 7);
247 ia64_mov_from_br (code, l7, IA64_B0);
249 /* Create unwind info for the prolog */
250 ia64_begin_bundle (code);
251 r_pro = mono_ia64_create_unwind_region (&code);
253 /* Save registers */
254 /* Not needed for jump trampolines */
255 if (tramp_type != MONO_TRAMPOLINE_JUMP) {
256 saved_regs_offset = offset;
257 offset += 128 * 8;
259 * Only the registers which are needed for computing vtable slots need
260 * to be saved.
262 last_offset = -1;
263 for (i = 0; i < 64; ++i)
264 if ((1 << i) & MONO_ARCH_CALLEE_REGS) {
265 if (last_offset != i * 8)
266 ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP);
267 ia64_st8_spill_inc_imm_hint (code, l1, i, 8, 0);
268 last_offset = (i + 1) * 8;
272 /* Save fp registers */
273 saved_fpregs_offset = offset;
274 offset += 8 * 8;
275 ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP);
276 for (i = 0; i < 8; ++i)
277 ia64_stfd_inc_imm_hint (code, l1, i + 8, 8, 0);
279 g_assert (offset < framesize);
281 /* Arg1 is the pointer to the saved registers */
282 ia64_adds_imm (code, o0, saved_regs_offset, IA64_SP);
284 /* Arg2 is the address of the calling code */
285 if (has_caller)
286 ia64_mov (code, o1, l7);
287 else
288 ia64_mov (code, o1, 0);
290 /* Arg3 is the method/vtable ptr */
291 ia64_mov (code, o2, l6);
293 /* Arg4 is the trampoline address */
294 /* FIXME: */
295 ia64_mov (code, o3, 0);
297 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
299 /* Call the trampoline using an indirect call */
300 ia64_movl (code, l0, tramp);
301 ia64_ld8_inc_imm (code, l1, l0, 8);
302 ia64_mov_to_br (code, IA64_B6, l1);
303 ia64_ld8 (code, IA64_GP, l0);
304 ia64_br_call_reg (code, 0, IA64_B6);
306 /* Check for thread interruption */
307 /* This is not perf critical code so no need to check the interrupt flag */
308 ia64_mov (code, l2, IA64_R8);
310 tramp = (guint8*)mono_thread_force_interruption_checkpoint;
311 ia64_movl (code, l0, tramp);
312 ia64_ld8_inc_imm (code, l1, l0, 8);
313 ia64_mov_to_br (code, IA64_B6, l1);
314 ia64_ld8 (code, IA64_GP, l0);
315 ia64_br_call_reg (code, 0, IA64_B6);
317 ia64_mov (code, IA64_R8, l2);
319 /* Restore fp regs */
320 ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP);
321 for (i = 0; i < 8; ++i)
322 ia64_ldfd_inc_imm (code, i + 8, l1, 8);
324 /* FIXME: Handle NATs in fp regs / scratch regs */
326 if (tramp_type != MONO_TRAMPOLINE_CLASS_INIT) {
327 /* Load method address from function descriptor */
328 ia64_ld8 (code, l0, IA64_R8);
329 ia64_mov_to_br (code, IA64_B6, l0);
332 /* Clean up register/memory stack frame */
333 ia64_adds_imm (code, IA64_SP, framesize, IA64_SP);
334 ia64_mov_to_ar_i (code, IA64_PFS, l5);
336 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
337 ia64_mov_ret_to_br (code, IA64_B0, l7);
338 ia64_br_ret_reg (code, IA64_B0);
340 else {
341 /* Call the compiled method */
342 ia64_mov_to_br (code, IA64_B0, l7);
343 ia64_br_cond_reg (code, IA64_B6);
346 ia64_codegen_close (code);
348 g_assert ((code.buf - buf) <= 2048);
350 /* FIXME: emit unwind info for epilog */
351 di = g_malloc0 (sizeof (unw_dyn_info_t));
352 di->start_ip = (unw_word_t) buf;
353 di->end_ip = (unw_word_t) code.buf;
354 di->gp = 0;
355 di->format = UNW_INFO_FORMAT_DYNAMIC;
356 di->u.pi.name_ptr = (unw_word_t)"ia64_generic_trampoline";
357 di->u.pi.regions = r_pro;
359 _U_dyn_register (di);
361 mono_arch_flush_icache (buf, code.buf - buf);
363 return buf;
366 #define TRAMPOLINE_SIZE 128
368 gpointer
369 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
371 guint8 *buf, *tramp;
372 gint64 disp;
373 Ia64CodegenState code;
375 tramp = mono_get_trampoline_code (tramp_type);
377 buf = mono_domain_code_reserve (domain, TRAMPOLINE_SIZE);
379 /* FIXME: Optimize this */
381 ia64_codegen_init (code, buf);
383 ia64_movl (code, GP_SCRATCH_REG, arg1);
385 ia64_begin_bundle (code);
386 disp = (tramp - code.buf) >> 4;
387 if (ia64_is_imm21 (disp)) {
388 ia64_br_cond (code, disp);
390 else {
391 ia64_movl (code, GP_SCRATCH_REG2, tramp);
392 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
393 ia64_br_cond_reg (code, IA64_B6);
396 ia64_codegen_close (code);
398 g_assert (code.buf - buf <= TRAMPOLINE_SIZE);
400 mono_arch_flush_icache (buf, code.buf - buf);
402 if (code_len)
403 *code_len = code.buf - buf;
405 return buf;
408 void
409 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
411 NOT_IMPLEMENTED;
414 gpointer
415 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 encoded_offset)
417 /* FIXME: implement! */
418 g_assert_not_reached ();
419 return NULL;