2009-03-11 Zoltan Varga <vargaz@gmail.com>
[mono-debugger.git] / mono / mini / mini-hppa.c
blob036bd1cb6c554c2135ccaea716a8e0ec43a18cf7
1 /*
2 * mini-hppa.c: HPPA backend for the Mono code generator
4 * Copyright (c) 2007 Randolph Chung
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "mini.h"
26 #include <string.h>
27 #include <pthread.h>
28 #include <unistd.h>
30 #include <unistd.h>
31 #include <sys/mman.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/debug-helpers.h>
35 #include <mono/metadata/tokentype.h>
36 #include <mono/utils/mono-math.h>
38 #include "mini-hppa.h"
39 #include "trace.h"
40 #include "cpu-hppa.h"
42 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
43 #define SIGNAL_STACK_SIZE (64 * 1024)
45 #define DEBUG(a) // a
46 #define DEBUG_FUNC_ENTER() // printf("Entering %s\n", __FUNCTION__)
47 #define DEBUG_FUNC_EXIT() // printf("Exiting %s\n", __FUNCTION__)
49 static const guchar
50 branch_b0_table [] = {
51 TRUE, /* OP_HPPA_BEQ */
52 FALSE, /* OP_HPPA_BGE */
53 FALSE, /* OP_HPPA_BGT */
54 TRUE, /* OP_HPPA_BLE */
55 TRUE, /* OP_HPPA_BLT */
56 FALSE, /* OP_HPPA_BNE */
57 FALSE, /* OP_HPPA_BGE_UN */
58 FALSE, /* OP_HPPA_BGT_UN */
59 TRUE, /* OP_HPPA_BLE_UN */
60 TRUE, /* OP_HPPA_BLT_UN */
63 static const guchar
64 branch_b1_table [] = {
65 HPPA_CMP_COND_EQ, /* OP_HPPA_BEQ */
66 HPPA_CMP_COND_SLT, /* OP_HPPA_BGE */
67 HPPA_CMP_COND_SLE, /* OP_HPPA_BGT */
68 HPPA_CMP_COND_SLE, /* OP_HPPA_BLE */
69 HPPA_CMP_COND_SLT, /* OP_HPPA_BLT */
70 HPPA_CMP_COND_EQ, /* OP_HPPA_BNE_UN */
71 HPPA_CMP_COND_ULT, /* OP_HPPA_BGE_UN */
72 HPPA_CMP_COND_ULE, /* OP_HPPA_BGT_UN */
73 HPPA_CMP_COND_ULE, /* OP_HPPA_BLE_UN */
74 HPPA_CMP_COND_ULT, /* OP_HPPA_BLT_UN */
77 /* Note that these are inverted from the OP_xxx, because we nullify
78 * the branch if the condition is met
80 static const guchar
81 float_branch_table [] = {
82 26, /* OP_FBEQ */
83 11, /* OP_FBGE */
84 15, /* OP_FBGT */
85 19, /* OP_FBLE */
86 23, /* OP_FBLT */
87 4, /* OP_FBNE_UN */
88 8, /* OP_FBGE_UN */
89 13, /* OP_FBGT_UN */
90 17, /* OP_FBLE_UN */
91 20, /* OP_FBLT_UN */
94 static const guchar
95 float_ceq_table [] = {
96 26, /* OP_FCEQ */
97 15, /* OP_FCGT */
98 13, /* OP_FCGT_UN */
99 23, /* OP_FCLT */
100 21, /* OP_FCLT_UN */
104 * Branches have short (14 or 17 bit) targets on HPPA. To make longer jumps,
105 * we will need to rely on stubs - basically we create stub structures in
106 * the epilogue that uses a long branch to the destination, and any short
107 * jumps inside a method that cannot reach the destination directly will
108 * branch first to the stub.
110 typedef struct MonoOvfJump {
111 union {
112 MonoBasicBlock *bb;
113 const char *exception;
114 } data;
115 guint32 ip_offset;
116 } MonoOvfJump;
118 /* Create a literal 0.0 double for FNEG */
119 double hppa_zero = 0;
121 const char*
122 mono_arch_regname (int reg)
124 static const char * rnames[] = {
125 "hppa_r0", "hppa_r1", "hppa_rp", "hppa_r3", "hppa_r4",
126 "hppa_r5", "hppa_r6", "hppa_r7", "hppa_r8", "hppa_r9",
127 "hppa_r10", "hppa_r11", "hppa_r12", "hppa_r13", "hppa_r14",
128 "hppa_r15", "hppa_r16", "hppa_r17", "hppa_r18", "hppa_r19",
129 "hppa_r20", "hppa_r21", "hppa_r22", "hppa_r23", "hppa_r24",
130 "hppa_r25", "hppa_r26", "hppa_r27", "hppa_r28", "hppa_r29",
131 "hppa_sp", "hppa_r31"
133 if (reg >= 0 && reg < MONO_MAX_IREGS)
134 return rnames [reg];
135 return "unknown";
138 const char*
139 mono_arch_fregname (int reg)
141 static const char *rnames [] = {
142 "hppa_fr0", "hppa_fr1", "hppa_fr2", "hppa_fr3", "hppa_fr4",
143 "hppa_fr5", "hppa_fr6", "hppa_fr7", "hppa_fr8", "hppa_fr9",
144 "hppa_fr10", "hppa_fr11", "hppa_fr12", "hppa_fr13", "hppa_fr14",
145 "hppa_fr15", "hppa_fr16", "hppa_fr17", "hppa_fr18", "hppa_fr19",
146 "hppa_fr20", "hppa_fr21", "hppa_fr22", "hppa_fr23", "hppa_fr24",
147 "hppa_fr25", "hppa_fr26", "hppa_fr27", "hppa_fr28", "hppa_fr29",
148 "hppa_fr30", "hppa_fr31",
151 if (reg >= 0 && reg < MONO_MAX_FREGS)
152 return rnames [reg];
153 else
154 return "unknown";
158 * Initialize the cpu to execute managed code.
160 void
161 mono_arch_cpu_init (void)
163 guint32 dummy;
164 mono_arch_cpu_optimizazions(&dummy);
168 * Initialize architecture specific code.
170 void
171 mono_arch_init (void)
176 * Cleanup architecture specific code.
178 void
179 mono_arch_cleanup (void)
184 * This function returns the optimizations supported on this cpu.
186 guint32
187 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
189 guint32 opts = 0;
190 *exclude_mask = 0;
191 return opts;
194 void
195 mono_arch_flush_icache (guint8 *code, gint size)
197 guint8* p = (guint8*)((guint32)code & ~(0x3f));
198 guint8* end = (guint8*)((guint32)code + size);
199 while (p < end) {
200 __asm__ __volatile__ ("fdc %%r0(%%sr3, %0)\n"
201 "sync\n"
202 "fic %%r0(%%sr3, %0)\n"
203 "sync\n"
204 : : "r"(p));
205 p += 32; /* can be 64 on pa20 cpus */
209 void
210 mono_arch_flush_register_windows (void)
212 /* No register windows on hppa */
215 typedef enum {
216 ArgInIReg,
217 ArgInIRegPair,
218 ArgInFReg,
219 ArgInDReg,
220 ArgOnStack,
221 } ArgStorage;
223 typedef struct {
224 gint16 offset;
225 gint16 size;
226 guint8 type;
227 gint8 reg;
228 ArgStorage storage;
229 } ArgInfo;
231 typedef struct {
232 int nargs;
233 guint32 stack_usage;
234 int struct_return;
235 ArgInfo ret;
236 ArgInfo sig_cookie;
237 ArgInfo args [1];
238 } CallInfo;
240 #define PARAM_REGS 4
241 #define ARGS_OFFSET 36
243 static void
244 add_parameter (CallInfo *cinfo, ArgInfo *ainfo, MonoType *type)
246 int is_fp = (type->type == MONO_TYPE_R4 || type->type == MONO_TYPE_R8);
247 int ofs, align;
249 DEBUG_FUNC_ENTER ();
250 ainfo->reg = -1;
251 ainfo->size = mono_type_size (type, &align);
252 ainfo->type = type->type;
254 if (ainfo->size <= 4) {
255 cinfo->stack_usage += 4;
256 ainfo->offset = cinfo->stack_usage - (4 - ainfo->size);
258 else if (ainfo->size <= 8)
260 cinfo->stack_usage += 8;
261 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8);
262 ainfo->offset = cinfo->stack_usage - (8 - ainfo->size);
264 else
266 cinfo->stack_usage += ainfo->size;
267 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
268 ainfo->offset = cinfo->stack_usage;
271 ofs = (ALIGN_TO (ainfo->offset, 4) - ARGS_OFFSET) / 4;
272 if (ofs < PARAM_REGS) {
273 if (!is_fp) {
274 if (ainfo->size <= 4)
275 ainfo->storage = ArgInIReg;
276 else
277 ainfo->storage = ArgInIRegPair;
278 ainfo->reg = hppa_r26 - ofs;
279 } else if (type->type == MONO_TYPE_R4) {
280 ainfo->storage = ArgInFReg;
281 ainfo->reg = hppa_fr4 + ofs;
282 } else { /* type->type == MONO_TYPE_R8 */
283 ainfo->storage = ArgInDReg;
284 ainfo->reg = hppa_fr4 + ofs;
287 else {
288 /* frame pointer based offset */
289 ainfo->reg = hppa_r3;
290 ainfo->storage = ArgOnStack;
293 /* All offsets are negative relative to the frame pointer */
294 ainfo->offset = -ainfo->offset;
296 DEBUG_FUNC_EXIT ();
299 static void
300 analyze_return (CallInfo *cinfo, MonoMethodSignature *sig)
302 MonoType *type;
303 int align;
304 int size;
306 type = sig->ret;
307 size = mono_type_size (type, &align);
309 /* ref: mono_type_to_stind */
310 cinfo->ret.type = type->type;
311 if (type->byref) {
312 cinfo->ret.storage = ArgInIReg;
313 cinfo->ret.reg = hppa_r28;
314 } else {
315 handle_enum:
316 switch (type->type) {
317 case MONO_TYPE_VOID:
318 break;
319 case MONO_TYPE_BOOLEAN:
320 case MONO_TYPE_I1:
321 case MONO_TYPE_U1:
322 case MONO_TYPE_I2:
323 case MONO_TYPE_U2:
324 case MONO_TYPE_CHAR:
325 case MONO_TYPE_I4:
326 case MONO_TYPE_U4:
327 case MONO_TYPE_I:
328 case MONO_TYPE_U:
329 case MONO_TYPE_PTR:
330 case MONO_TYPE_FNPTR:
331 case MONO_TYPE_CLASS:
332 case MONO_TYPE_STRING:
333 case MONO_TYPE_OBJECT:
334 case MONO_TYPE_SZARRAY:
335 case MONO_TYPE_ARRAY:
336 cinfo->ret.storage = ArgInIReg;
337 cinfo->ret.reg = hppa_r28;
338 break;
339 case MONO_TYPE_U8:
340 case MONO_TYPE_I8:
341 cinfo->ret.storage = ArgInIRegPair;
342 cinfo->ret.reg = hppa_r28;
343 break;
344 case MONO_TYPE_R4:
345 cinfo->ret.storage = ArgInFReg;
346 cinfo->ret.reg = hppa_fr4;
347 break;
348 case MONO_TYPE_R8:
349 cinfo->ret.storage = ArgInDReg;
350 cinfo->ret.reg = hppa_fr4;
351 break;
352 case MONO_TYPE_GENERICINST:
353 type = &type->data.generic_class->container_class->byval_arg;
354 goto handle_enum;
356 case MONO_TYPE_VALUETYPE:
357 if (type->data.klass->enumtype) {
358 type = mono_class_enum_basetype (type->data.klass);
359 goto handle_enum;
361 /* Fall through */
362 case MONO_TYPE_TYPEDBYREF:
363 cinfo->struct_return = 1;
364 /* cinfo->ret.storage tells us how the ABI expects
365 * the parameter to be returned
367 if (size <= 4) {
368 cinfo->ret.storage = ArgInIReg;
369 cinfo->ret.reg = hppa_r28;
370 } else if (size <= 8) {
371 cinfo->ret.storage = ArgInIRegPair;
372 cinfo->ret.reg = hppa_r28;
373 } else {
374 cinfo->ret.storage = ArgOnStack;
375 cinfo->ret.reg = hppa_sp;
378 /* We always allocate stack space for this because the
379 * arch-indep code expects us to
381 cinfo->stack_usage += size;
382 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
383 cinfo->ret.offset = -cinfo->stack_usage;
384 break;
386 default:
387 g_error ("Can't handle as return value 0x%x", sig->ret->type);
393 * get_call_info:
395 * Obtain information about a call according to the calling convention.
397 static CallInfo*
398 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
400 guint32 i;
401 int n = sig->hasthis + sig->param_count;
402 CallInfo *cinfo;
403 MonoType *type;
404 MonoType ptrtype;
405 int dummy;
407 ptrtype.type = MONO_TYPE_PTR;
409 DEBUG_FUNC_ENTER();
410 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
412 /* The area below ARGS_OFFSET is the linkage area... */
413 cinfo->stack_usage = ARGS_OFFSET - 4;
414 /* -4, because the first argument will allocate the area it needs */
416 /* this */
417 if (sig->hasthis) {
418 add_parameter (cinfo, cinfo->args + 0, &ptrtype);
419 DEBUG (printf ("param <this>: assigned to reg %s offset %d\n", mono_arch_regname (cinfo->args[0].reg), cinfo->args[0].offset));
422 /* TODO: What to do with varargs? */
424 for (i = 0; i < sig->param_count; ++i) {
425 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
426 if (sig->params [i]->byref)
427 type = &ptrtype;
428 else
429 type = mono_type_get_underlying_type (sig->params [i]);
430 add_parameter (cinfo, ainfo, type);
432 DEBUG (printf ("param %d: type %d size %d assigned to reg %s offset %d\n", i, type->type, mono_type_size (type, &dummy), mono_arch_regname (ainfo->reg), ainfo->offset));
435 analyze_return (cinfo, sig);
437 DEBUG_FUNC_EXIT();
438 return cinfo;
441 GList *
442 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
444 GList *vars = NULL;
445 int i;
447 DEBUG_FUNC_ENTER();
448 for (i = 0; i < cfg->num_varinfo; i++) {
449 MonoInst *ins = cfg->varinfo [i];
450 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
452 /* unused vars */
453 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
454 continue;
456 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
457 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
458 continue;
460 if (mono_is_regsize_var (ins->inst_vtype)) {
461 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
462 g_assert (i == vmv->idx);
463 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
466 DEBUG_FUNC_EXIT();
468 return vars;
471 GList *
472 mono_arch_get_global_int_regs (MonoCompile *cfg)
474 GList *regs = NULL;
475 int i;
477 /* r3 is sometimes used as our frame pointer, so don't allocate it
478 * r19 is the GOT pointer, don't allocate it either
481 DEBUG_FUNC_ENTER();
482 for (i = 4; i <= 18; i++)
483 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
484 DEBUG_FUNC_EXIT();
486 return regs;
490 * mono_arch_regalloc_cost:
492 * Return the cost, in number of memory references, of the action of
493 * allocating the variable VMV into a register during global register
494 * allocation.
496 guint32
497 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
499 /* FIXME */
500 return 0;
504 * Set var information according to the calling convention.
505 * The locals var stuff should most likely be split in another method.
507 * updates m->stack_offset based on the amount of stack space needed for
508 * local vars
510 void
511 mono_arch_allocate_vars (MonoCompile *m)
513 MonoMethodSignature *sig;
514 MonoMethodHeader *header;
515 MonoInst *inst;
516 int i, offset, size, align, curinst;
517 guint32 stack_ptr;
518 guint rettype;
519 CallInfo *cinfo;
521 DEBUG_FUNC_ENTER();
522 m->flags |= MONO_CFG_HAS_SPILLUP;
524 header = mono_method_get_header (m->method);
526 sig = mono_method_signature (m->method);
527 DEBUG (printf ("Allocating locals - incoming params:\n"));
528 cinfo = get_call_info (sig, FALSE);
531 * We use the ABI calling conventions for managed code as well.
533 if (m->flags & MONO_CFG_HAS_ALLOCA) {
534 stack_ptr = hppa_r4;
535 m->used_int_regs |= 1 << hppa_r4;
536 } else {
537 stack_ptr = hppa_sp;
540 /* Before this function is called, we would have looked at all
541 * calls from this method and figured out how much space is needed
542 * for the param area.
544 * Locals are allocated backwards, right before the param area
546 /* TODO: in some cases we don't need the frame pointer... */
547 m->frame_reg = hppa_r3;
548 offset = m->param_area;
550 /* Return values can be passed back either in four ways:
551 * r28 is used for data <= 4 bytes (32-bit ABI)
552 * r28/r29 are used for data >4 && <= 8 bytes
553 * fr4 is used for floating point data
554 * data larger than 8 bytes is returned on the stack pointed to
555 * by r28
557 * This code needs to be in sync with how CEE_RET is handled
558 * in mono_method_to_ir (). In some cases when we return small
559 * structs, the ABI specifies that they should be returned in
560 * registers, but the code in mono_method_to_ir () always emits
561 * a memcpy for valuetype returns, so we need to make sure we
562 * allocate space on the stack for this copy.
564 if (cinfo->struct_return) {
565 /* this is used to stash the incoming r28 pointer */
566 offset += sizeof (gpointer);
567 m->ret->opcode = OP_REGOFFSET;
568 m->ret->inst_basereg = stack_ptr;
569 m->ret->inst_offset = -offset;
570 } else if (sig->ret->type != MONO_TYPE_VOID) {
571 m->ret->opcode = OP_REGVAR;
572 m->ret->inst_c0 = cinfo->ret.reg;
575 curinst = m->locals_start;
576 for (i = curinst; i < m->num_varinfo; ++i) {
577 inst = m->varinfo [i];
579 if (inst->opcode == OP_REGVAR) {
580 DEBUG (printf ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg)));
581 continue;
584 if (inst->flags & MONO_INST_IS_DEAD)
585 continue;
587 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
588 * pinvoke wrappers when they call functions returning structure */
589 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
590 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
591 else
592 size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
595 * This is needed since structures containing doubles must be doubleword
596 * aligned.
597 * FIXME: Do this only if needed.
599 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
600 align = 8;
603 * variables are accessed as negative offsets from hppa_sp
605 inst->opcode = OP_REGOFFSET;
606 inst->inst_basereg = stack_ptr;
607 offset += size;
608 offset = ALIGN_TO (offset, align);
609 inst->inst_offset = -offset;
611 DEBUG (printf ("allocating local %d (size = %d) to [%s - %d]\n", i, size, mono_arch_regname (inst->inst_basereg), -inst->inst_offset));
614 if (sig->call_convention == MONO_CALL_VARARG) {
615 /* TODO */
618 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
619 ArgInfo *ainfo = &cinfo->args [i];
620 inst = m->args [i];
621 if (inst->opcode != OP_REGVAR) {
622 switch (ainfo->storage) {
623 case ArgInIReg:
624 case ArgInIRegPair:
625 case ArgInFReg:
626 case ArgInDReg:
627 /* Currently mono requests all incoming registers
628 * be assigned to a stack location :-(
630 #if 0
631 if (!(inst->flags & (MONO_INST_VOLATILE | MONO_INST_INDIRECT))) {
632 inst->opcode = OP_REGVAR;
633 inst->dreg = ainfo->reg;
634 DEBUG (printf ("param %d in register %s\n", i, mono_arch_regname (inst->dreg)));
635 break;
637 #endif
638 /* fallthrough */
639 case ArgOnStack:
640 inst->opcode = OP_REGOFFSET;
641 inst->inst_basereg = hppa_r3;
642 inst->inst_offset = ainfo->offset;
643 DEBUG (printf ("param %d stored on stack [%s - %d]\n", i, mono_arch_regname (hppa_r3), -inst->inst_offset));
644 break;
649 m->stack_offset = offset; /* Includes cfg->param_area */
651 g_free (cinfo);
652 DEBUG_FUNC_EXIT();
656 * take the arguments and generate the arch-specific
657 * instructions to properly call the function in call.
658 * This includes pushing, moving arguments to the right register
659 * etc.
661 * sets call->stack_usage and cfg->param_area
663 MonoCallInst*
664 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual)
666 MonoInst *arg, *in;
667 MonoMethodSignature *sig;
668 int i, n;
669 CallInfo *cinfo;
670 ArgInfo *ainfo;
672 DEBUG_FUNC_ENTER();
673 DEBUG (printf ("is_virtual = %d\n", is_virtual));
675 sig = call->signature;
676 n = sig->param_count + sig->hasthis;
678 DEBUG (printf ("Calling method with %d parameters\n", n));
680 cinfo = get_call_info (sig, sig->pinvoke);
682 // DEBUG
683 g_assert (sig->call_convention != MONO_CALL_VARARG);
685 for (i = 0; i < n; ++i) {
686 ainfo = &cinfo->args [i];
688 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
689 /* TODO */
692 if (is_virtual && i == 0) {
693 /* the argument will be attached to the call instruction */
694 in = call->args [i];
695 call->used_iregs |= 1 << ainfo->reg;
696 } else {
697 MONO_INST_NEW (cfg, arg, OP_OUTARG);
698 in = call->args [i];
699 arg->cil_code = in->cil_code;
700 arg->inst_left = in;
701 arg->inst_call = call;
702 arg->type = in->type;
704 /* prepend, we'll need to reverse them later */
705 arg->next = call->out_args;
706 call->out_args = arg;
708 switch (ainfo->storage) {
709 case ArgInIReg:
710 case ArgInIRegPair: {
711 MonoHPPAArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoHPPAArgInfo));
712 ai->reg = ainfo->reg;
713 ai->size = ainfo->size;
714 ai->offset = ainfo->offset;
715 ai->pass_in_reg = 1;
716 arg->backend.data = ai;
718 call->used_iregs |= 1 << ainfo->reg;
719 if (ainfo->storage == ArgInIRegPair)
720 call->used_iregs |= 1 << (ainfo->reg + 1);
721 if (ainfo->type == MONO_TYPE_VALUETYPE)
722 arg->opcode = OP_OUTARG_VT;
723 break;
725 case ArgOnStack: {
726 MonoHPPAArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoHPPAArgInfo));
727 ai->reg = hppa_sp;
728 ai->size = ainfo->size;
729 ai->offset = ainfo->offset;
730 ai->pass_in_reg = 0;
731 arg->backend.data = ai;
732 if (ainfo->type == MONO_TYPE_VALUETYPE)
733 arg->opcode = OP_OUTARG_VT;
734 else
735 arg->opcode = OP_OUTARG_MEMBASE;
736 call->used_iregs |= 1 << ainfo->reg;
737 break;
739 case ArgInFReg:
740 arg->backend.reg3 = ainfo->reg;
741 arg->opcode = OP_OUTARG_R4;
742 call->used_fregs |= 1 << ainfo->reg;
743 break;
744 case ArgInDReg:
745 arg->backend.reg3 = ainfo->reg;
746 arg->opcode = OP_OUTARG_R8;
747 call->used_fregs |= 1 << ainfo->reg;
748 break;
749 default:
750 NOT_IMPLEMENTED;
756 * Reverse the call->out_args list.
759 MonoInst *prev = NULL, *list = call->out_args, *next;
760 while (list) {
761 next = list->next;
762 list->next = prev;
763 prev = list;
764 list = next;
766 call->out_args = prev;
768 call->stack_usage = cinfo->stack_usage;
769 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
770 cfg->param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
772 cfg->flags |= MONO_CFG_HAS_CALLS;
774 g_free (cinfo);
776 DEBUG_FUNC_EXIT();
777 return call;
780 void
781 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
785 void
786 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
788 DEBUG_FUNC_ENTER();
789 DEBUG_FUNC_EXIT();
792 static void
793 insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
795 if (ins == NULL) {
796 ins = bb->code;
797 bb->code = to_insert;
798 to_insert->next = ins;
799 } else {
800 to_insert->next = ins->next;
801 ins->next = to_insert;
805 #define NEW_INS(cfg,dest,op) do { \
806 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
807 (dest)->opcode = (op); \
808 insert_after_ins (bb, last_ins, (dest)); \
809 } while (0)
811 static int
812 map_to_reg_reg_op (int op)
814 switch (op) {
815 case OP_ADD_IMM:
816 return CEE_ADD;
817 case OP_SUB_IMM:
818 return CEE_SUB;
819 case OP_AND_IMM:
820 return CEE_AND;
821 case OP_COMPARE_IMM:
822 return OP_COMPARE;
823 case OP_ADDCC_IMM:
824 return OP_ADDCC;
825 case OP_ADC_IMM:
826 return OP_ADC;
827 case OP_SUBCC_IMM:
828 return OP_SUBCC;
829 case OP_SBB_IMM:
830 return OP_SBB;
831 case OP_OR_IMM:
832 return CEE_OR;
833 case OP_XOR_IMM:
834 return CEE_XOR;
835 case OP_MUL_IMM:
836 return CEE_MUL;
837 case OP_LOAD_MEMBASE:
838 return OP_LOAD_MEMINDEX;
839 case OP_LOADI4_MEMBASE:
840 return OP_LOADI4_MEMINDEX;
841 case OP_LOADU4_MEMBASE:
842 return OP_LOADU4_MEMINDEX;
843 case OP_LOADU1_MEMBASE:
844 return OP_LOADU1_MEMINDEX;
845 case OP_LOADI2_MEMBASE:
846 return OP_LOADI2_MEMINDEX;
847 case OP_LOADU2_MEMBASE:
848 return OP_LOADU2_MEMINDEX;
849 case OP_LOADI1_MEMBASE:
850 return OP_LOADI1_MEMINDEX;
851 case OP_LOADR4_MEMBASE:
852 return OP_LOADR4_MEMINDEX;
853 case OP_LOADR8_MEMBASE:
854 return OP_LOADR8_MEMINDEX;
855 case OP_STOREI1_MEMBASE_REG:
856 return OP_STOREI1_MEMINDEX;
857 case OP_STOREI2_MEMBASE_REG:
858 return OP_STOREI2_MEMINDEX;
859 case OP_STOREI4_MEMBASE_REG:
860 return OP_STOREI4_MEMINDEX;
861 case OP_STORE_MEMBASE_REG:
862 return OP_STORE_MEMINDEX;
863 case OP_STORER4_MEMBASE_REG:
864 return OP_STORER4_MEMINDEX;
865 case OP_STORER8_MEMBASE_REG:
866 return OP_STORER8_MEMINDEX;
867 case OP_STORE_MEMBASE_IMM:
868 return OP_STORE_MEMBASE_REG;
869 case OP_STOREI1_MEMBASE_IMM:
870 return OP_STOREI1_MEMBASE_REG;
871 case OP_STOREI2_MEMBASE_IMM:
872 return OP_STOREI2_MEMBASE_REG;
873 case OP_STOREI4_MEMBASE_IMM:
874 return OP_STOREI4_MEMBASE_REG;
876 g_assert_not_reached ();
880 * Remove from the instruction list the instructions that can't be
881 * represented with very simple instructions with no register
882 * requirements.
884 void
885 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
887 MonoInst *ins, *next, *temp, *last_ins = NULL;
888 int imm;
890 MONO_BB_FOR_EACH_INS (bb, ins) {
891 loop_start:
892 switch (ins->opcode) {
893 case OP_ADD_IMM:
894 case OP_ADDCC_IMM:
895 if (!hppa_check_bits (ins->inst_imm, 11)) {
896 NEW_INS (cfg, temp, OP_ICONST);
897 temp->inst_c0 = ins->inst_imm;
898 temp->dreg = mono_alloc_ireg (cfg);
899 ins->sreg2 = temp->dreg;
900 ins->opcode = map_to_reg_reg_op (ins->opcode);
902 break;
903 case OP_SUB_IMM:
904 case OP_SUBCC_IMM:
905 if (!hppa_check_bits (ins->inst_imm, 11)) {
906 NEW_INS (cfg, temp, OP_ICONST);
907 temp->inst_c0 = ins->inst_imm;
908 temp->dreg = mono_alloc_ireg (cfg);
909 ins->sreg2 = temp->dreg;
910 ins->opcode = map_to_reg_reg_op (ins->opcode);
912 break;
914 case OP_MUL_IMM:
915 if (ins->inst_imm == 1) {
916 ins->opcode = OP_MOVE;
917 break;
919 if (ins->inst_imm == 0) {
920 ins->opcode = OP_ICONST;
921 ins->inst_c0 = 0;
922 break;
924 imm = mono_is_power_of_two (ins->inst_imm);
925 if (imm > 0) {
926 ins->opcode = OP_SHL_IMM;
927 ins->inst_imm = imm;
928 break;
930 else {
931 int tmp = mono_alloc_ireg (cfg);
932 NEW_INS (cfg, temp, OP_ICONST);
933 temp->inst_c0 = ins->inst_c0;
934 temp->dreg = tmp;
936 ins->opcode = CEE_MUL;
937 ins->sreg2 = tmp;
938 /* Need to rewrite the CEE_MUL too... */
939 goto loop_start;
941 break;
943 case CEE_MUL: {
944 int freg1 = mono_alloc_freg (cfg);
945 int freg2 = mono_alloc_freg (cfg);
947 NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
948 temp->sreg1 = ins->sreg1;
949 temp->inst_destbasereg = hppa_sp;
950 temp->inst_offset = -16;
952 NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
953 temp->dreg = freg1;
954 temp->inst_basereg = hppa_sp;
955 temp->inst_offset = -16;
957 NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
958 temp->sreg1 = ins->sreg2;
959 temp->inst_destbasereg = hppa_sp;
960 temp->inst_offset = -16;
962 NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
963 temp->dreg = freg2;
964 temp->inst_basereg = hppa_sp;
965 temp->inst_offset = -16;
967 NEW_INS (cfg, temp, OP_HPPA_XMPYU);
968 temp->dreg = freg2;
969 temp->sreg1 = freg1;
970 temp->sreg2 = freg2;
972 NEW_INS(cfg, temp, OP_HPPA_STORER4_RIGHT);
973 temp->sreg1 = freg2;
974 temp->inst_destbasereg = hppa_sp;
975 temp->inst_offset = -16;
977 ins->opcode = OP_LOAD_MEMBASE;
978 ins->inst_basereg = hppa_sp;
979 ins->inst_offset = -16;
981 break;
983 default:
984 break;
986 last_ins = ins;
988 bb->last_ins = last_ins;
989 bb->max_vreg = cfg->next_vreg;
993 void
994 hppa_patch (guint32 *code, const gpointer target)
996 guint32 ins = *code;
997 gint32 val = (gint32)target;
998 gint32 disp = (val - (gint32)code - 8) >> 2;
999 int reg1, reg2;
1001 DEBUG (printf ("patching 0x%08x (0x%08x) to point to 0x%08x (disp = %d)\n", code, ins, val, disp));
1003 switch (*code >> 26) {
1004 case 0x08: /* ldil, next insn can be a ldo, ldw, or ble */
1005 *code = *code & ~0x1fffff;
1006 *code = *code | hppa_op_imm21 (hppa_lsel (val));
1007 code++;
1009 if ((*code >> 26) == 0x0D) { /* ldo */
1010 *code = *code & ~0x3fff;
1011 *code = *code | hppa_op_imm14 (hppa_rsel (val));
1012 } else if ((*code >> 26) == 0x12) { /* ldw */
1013 *code = *code & ~0x3fff;
1014 *code = *code | hppa_op_imm14 (hppa_rsel (val));
1015 } else if ((*code >> 26) == 0x39) { /* ble */
1016 *code = *code & ~0x1f1ffd;
1017 *code = *code | hppa_op_imm17 (hppa_rsel (val));
1020 break;
1022 case 0x3A: /* bl */
1023 if (disp == 0) {
1024 hppa_nop (code);
1025 break;
1027 if (!hppa_check_bits (disp, 17))
1028 goto jump_overflow;
1029 reg1 = (*code >> 21) & 0x1f;
1030 *code = (*code & ~0x1f1ffd) | hppa_op_imm17(disp);
1031 break;
1033 case 0x20: /* combt */
1034 case 0x22: /* combf */
1035 if (!hppa_check_bits (disp >> 2, 12))
1036 goto jump_overflow;
1037 *code = (*code & ~0x1ffd) | hppa_op_imm12(disp);
1038 break;
1040 default:
1041 g_warning ("Unpatched opcode %x\n", *code >> 26);
1044 return;
1046 jump_overflow:
1047 g_warning ("cannot branch to target, insn is %08x, displacement is %d\n", (int)*code, (int)disp);
1048 g_assert_not_reached ();
1051 static guint32 *
1052 emit_float_to_int (MonoCompile *cfg, guint32 *code, int dreg, int sreg, int size, gboolean is_signed)
1054 /* sreg is a float, dreg is an integer reg. */
1055 hppa_fcnvfxt (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, sreg, sreg);
1056 hppa_fstws (code, sreg, 0, -16, hppa_sp);
1057 hppa_ldw (code, -16, hppa_sp, dreg);
1058 if (!is_signed) {
1059 if (size == 1)
1060 hppa_extru (code, dreg, 31, 8, dreg);
1061 else if (size == 2)
1062 hppa_extru (code, dreg, 31, 16, dreg);
1063 } else {
1064 if (size == 1)
1065 hppa_extrs (code, dreg, 31, 8, dreg);
1066 else if (size == 2)
1067 hppa_extrs (code, dreg, 31, 16, dreg);
1069 return code;
1072 /* Clobbers r1, r20, r21 */
1073 static guint32 *
1074 emit_memcpy (guint32 *code, int doff, int dreg, int soff, int sreg, int size)
1076 /* r20 is the destination */
1077 hppa_set (code, doff, hppa_r20);
1078 hppa_add (code, hppa_r20, dreg, hppa_r20);
1080 /* r21 is the source */
1081 hppa_set (code, soff, hppa_r21);
1082 hppa_add (code, hppa_r21, sreg, hppa_r21);
1084 while (size >= 4) {
1085 hppa_ldw (code, 0, hppa_r21, hppa_r1);
1086 hppa_stw (code, hppa_r1, 0, hppa_r20);
1087 hppa_ldo (code, 4, hppa_r21, hppa_r21);
1088 hppa_ldo (code, 4, hppa_r20, hppa_r20);
1089 size -= 4;
1091 while (size >= 2) {
1092 hppa_ldh (code, 0, hppa_r21, hppa_r1);
1093 hppa_sth (code, hppa_r1, 0, hppa_r20);
1094 hppa_ldo (code, 2, hppa_r21, hppa_r21);
1095 hppa_ldo (code, 2, hppa_r20, hppa_r20);
1096 size -= 2;
1098 while (size > 0) {
1099 hppa_ldb (code, 0, hppa_r21, hppa_r1);
1100 hppa_stb (code, hppa_r1, 0, hppa_r20);
1101 hppa_ldo (code, 1, hppa_r21, hppa_r21);
1102 hppa_ldo (code, 1, hppa_r20, hppa_r20);
1103 size -= 1;
1106 return code;
1110 * mono_arch_get_vcall_slot_addr:
1112 * Determine the vtable slot used by a virtual call.
1114 gpointer*
1115 mono_arch_get_vcall_slot_addr (guint8 *code8, gpointer *regs)
1117 guint32 *code = (guint32*)((unsigned long)code8 & ~3);
1119 DEBUG_FUNC_ENTER();
1121 code -= 2;
1122 /* This is the special virtual call token */
1123 if (code [-1] != 0x34000eee) /* ldo 0x777(r0),r0 */
1124 return NULL;
1126 if ((code [0] >> 26) == 0x39 && /* ble */
1127 (code [-2] >> 26) == 0x12) { /* ldw */
1128 guint32 ldw = code [-2];
1129 guint32 reg = (ldw >> 21) & 0x1f;
1130 gint32 disp = ((ldw & 1) ? (-1 << 13) : 0) | ((ldw & 0x3fff) >> 1);
1131 /* FIXME: we are not guaranteed that reg is saved in the LMF.
1132 * In fact, it probably isn't, since it is allocated as a
1133 * callee register. Right now just return an address; this
1134 * is sufficient for non-AOT operation
1136 // return (gpointer)((guint8*)regs [reg] + disp);
1137 return code;
1139 else
1140 g_assert_not_reached ();
1142 DEBUG_FUNC_EXIT();
1145 /* ins->dreg = *(ins->inst_desgbasereg + ins->inst_offset) */
1146 #define EMIT_LOAD_MEMBASE(ins, op) do { \
1147 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1148 hppa_set (code, ins->inst_offset, hppa_r1); \
1149 hppa_ ## op ## x (code, hppa_r1, ins->inst_basereg, ins->dreg); \
1151 else { \
1152 hppa_ ## op (code, ins->inst_offset, ins->inst_basereg, ins->dreg); \
1154 } while (0)
1156 #define EMIT_COND_BRANCH_FLAGS(ins,r1,r2,b0,b1) do {\
1157 if (ins->flags & MONO_INST_BRLABEL) { \
1158 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1159 if (b0) \
1160 hppa_combt (code, r1, r2, b1, 0); \
1161 else \
1162 hppa_combf (code, r1, r2, b1, 0); \
1163 } else { \
1164 if (b0) \
1165 hppa_combf (code, r1, r2, b1, 2); \
1166 else \
1167 hppa_combt (code, r1, r2, b1, 2); \
1168 hppa_nop (code); \
1169 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1170 hppa_bl (code, 0, hppa_r0); \
1172 hppa_nop (code); \
1173 } while (0)
1175 #define EMIT_COND_BRANCH(ins,r1,r2,cond) EMIT_COND_BRANCH_FLAGS(ins, r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)])
1177 #define EMIT_FLOAT_COND_BRANCH_FLAGS(ins,r1,r2,b0) do {\
1178 hppa_fcmp (code, HPPA_FP_FMT_DBL, b0, r1, r2); \
1179 hppa_ftest (code, 0); \
1180 if (ins->flags & MONO_INST_BRLABEL) \
1181 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1182 else \
1183 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1184 hppa_bl (code, 8, hppa_r0); \
1185 hppa_nop (code); \
1186 } while (0)
1188 #define EMIT_FLOAT_COND_BRANCH(ins,r1,r2,cond) EMIT_FLOAT_COND_BRANCH_FLAGS(ins, r1, r2, float_branch_table [cond])
1190 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1,r2,b0,b1,exc_name) \
1191 do { \
1192 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1193 ovfj->data.exception = (exc_name); \
1194 ovfj->ip_offset = (guint8*)code - cfg->native_code; \
1195 hppa_bl (code, 8, hppa_r2); \
1196 hppa_depi (code, 0, 31, 2, hppa_r2); \
1197 hppa_ldo (code, 8, hppa_r2, hppa_r2); \
1198 if (b0) \
1199 hppa_combf (code, r1, r2, b1, 2); \
1200 else \
1201 hppa_combt (code, r1, r2, b1, 2); \
1202 hppa_nop (code); \
1203 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1204 hppa_bl (code, 0, hppa_r0); \
1205 hppa_nop (code); \
1206 } while (0)
1208 #define EMIT_COND_SYSTEM_EXCEPTION(r1,r2,cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1210 /* TODO: MEM_INDEX_REG - cannot be r1 */
1211 #define MEM_INDEX_REG hppa_r31
1212 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->inst_imm */
1213 #define EMIT_STORE_MEMBASE_IMM(ins, op) do { \
1214 guint32 sreg; \
1215 if (ins->inst_imm == 0) \
1216 sreg = hppa_r0; \
1217 else { \
1218 hppa_set (code, ins->inst_imm, hppa_r1); \
1219 sreg = hppa_r1; \
1221 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1222 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1223 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1224 hppa_ ## op (code, sreg, 0, MEM_INDEX_REG); \
1226 else { \
1227 hppa_ ## op (code, sreg, ins->inst_offset, ins->inst_destbasereg); \
1229 } while (0)
1231 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->sreg1 */
1232 #define EMIT_STORE_MEMBASE_REG(ins, op) do { \
1233 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1234 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1235 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1236 hppa_ ## op (code, ins->sreg1, 0, MEM_INDEX_REG); \
1238 else { \
1239 hppa_ ## op (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); \
1241 } while (0)
1243 void
1244 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
1246 MonoInst *ins;
1247 MonoCallInst *call;
1248 guint offset;
1249 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
1250 MonoInst *last_ins = NULL;
1251 int max_len, cpos;
1252 const char *spec;
1254 DEBUG_FUNC_ENTER();
1256 if (cfg->verbose_level > 2)
1257 g_print ("[%s::%s] Basic block %d starting at offset 0x%x\n", cfg->method->klass->name, cfg->method->name, bb->block_num, bb->native_offset);
1259 cpos = bb->max_offset;
1261 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
1262 NOT_IMPLEMENTED;
1265 MONO_BB_FOR_EACH_INS (bb, ins) {
1266 guint8* code_start;
1268 offset = (guint8*)code - cfg->native_code;
1270 spec = ins_get_spec (ins->opcode);
1272 max_len = ((guint8 *)spec) [MONO_INST_LEN];
1274 if (offset > (cfg->code_size - max_len - 16)) {
1275 cfg->code_size *= 2;
1276 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1277 code = (guint32*)(cfg->native_code + offset);
1278 mono_jit_stats.code_reallocs++;
1280 code_start = (guint8*)code;
1281 // if (ins->cil_code)
1282 // g_print ("cil code\n");
1283 mono_debug_record_line_number (cfg, ins, offset);
1285 switch (ins->opcode) {
1286 case OP_RELAXED_NOP:
1287 break;
1288 case OP_STOREI1_MEMBASE_IMM:
1289 EMIT_STORE_MEMBASE_IMM (ins, stb);
1290 break;
1291 case OP_STOREI2_MEMBASE_IMM:
1292 EMIT_STORE_MEMBASE_IMM (ins, sth);
1293 break;
1294 case OP_STORE_MEMBASE_IMM:
1295 case OP_STOREI4_MEMBASE_IMM:
1296 EMIT_STORE_MEMBASE_IMM (ins, stw);
1297 break;
1298 case OP_STOREI1_MEMBASE_REG:
1299 EMIT_STORE_MEMBASE_REG (ins, stb);
1300 break;
1301 case OP_STOREI2_MEMBASE_REG:
1302 EMIT_STORE_MEMBASE_REG (ins, sth);
1303 break;
1304 case OP_STORE_MEMBASE_REG:
1305 case OP_STOREI4_MEMBASE_REG:
1306 EMIT_STORE_MEMBASE_REG (ins, stw);
1307 break;
1308 case OP_LOADU1_MEMBASE:
1309 EMIT_LOAD_MEMBASE (ins, ldb);
1310 break;
1311 case OP_LOADI1_MEMBASE:
1312 EMIT_LOAD_MEMBASE (ins, ldb);
1313 hppa_extrs (code, ins->dreg, 31, 8, ins->dreg);
1314 break;
1315 case OP_LOADU2_MEMBASE:
1316 EMIT_LOAD_MEMBASE (ins, ldh);
1317 break;
1318 case OP_LOADI2_MEMBASE:
1319 EMIT_LOAD_MEMBASE (ins, ldh);
1320 hppa_extrs (code, ins->dreg, 31, 16, ins->dreg);
1321 break;
1322 case OP_LOAD_MEMBASE:
1323 case OP_LOADI4_MEMBASE:
1324 case OP_LOADU4_MEMBASE:
1325 EMIT_LOAD_MEMBASE (ins, ldw);
1326 break;
1327 case CEE_CONV_I1:
1328 hppa_extrs (code, ins->sreg1, 31, 8, ins->dreg);
1329 break;
1330 case CEE_CONV_I2:
1331 hppa_extrs (code, ins->sreg1, 31, 16, ins->dreg);
1332 break;
1333 case CEE_CONV_U1:
1334 hppa_extru (code, ins->sreg1, 31, 8, ins->dreg);
1335 break;
1336 case CEE_CONV_U2:
1337 hppa_extru (code, ins->sreg1, 31, 16, ins->dreg);
1338 break;
1339 case CEE_CONV_U:
1340 case CEE_CONV_I4:
1341 case CEE_CONV_U4:
1342 case OP_MOVE:
1343 if (ins->sreg1 != ins->dreg)
1344 hppa_copy (code, ins->sreg1, ins->dreg);
1345 break;
1346 case OP_SETLRET:
1347 hppa_copy (code, ins->sreg1 + 1, ins->dreg);
1348 hppa_copy (code, ins->sreg1, ins->dreg + 1);
1349 break;
1351 case OP_BREAK:
1352 /* break 4,8 - this is what gdb normally uses... */
1353 *code++ = 0x00010004;
1354 break;
1355 case OP_ADDCC:
1356 case CEE_ADD:
1357 hppa_add (code, ins->sreg1, ins->sreg2, ins->dreg);
1358 break;
1359 case OP_ADC:
1360 hppa_addc (code, ins->sreg1, ins->sreg2, ins->dreg);
1361 break;
1362 case OP_ADDCC_IMM:
1363 case OP_ADD_IMM:
1364 hppa_addi (code, ins->inst_imm, ins->sreg1, ins->dreg);
1365 break;
1366 case OP_ADC_IMM:
1367 hppa_set (code, ins->inst_imm, hppa_r1);
1368 hppa_addc (code, ins->sreg1, hppa_r1, ins->dreg);
1369 break;
1370 case OP_HPPA_ADD_OVF: {
1371 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1372 hppa_bl (code, 8, hppa_r2);
1373 hppa_depi (code, 0, 31, 2, hppa_r2);
1374 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1376 if (ins->backend.reg3 == CEE_ADD_OVF)
1377 hppa_add_cond (code, HPPA_ADD_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1378 else
1379 hppa_add_cond (code, HPPA_ADD_COND_NUV, ins->sreg1, ins->sreg2, ins->dreg);
1381 ovfj->data.exception = "OverflowException";
1382 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1383 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1384 hppa_bl_n (code, 8, hppa_r0);
1385 break;
1387 case OP_HPPA_ADDC_OVF: {
1388 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1389 hppa_bl (code, 8, hppa_r2);
1390 hppa_depi (code, 0, 31, 2, hppa_r2);
1391 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1393 if (ins->backend.reg3 == OP_LADD_OVF)
1394 hppa_addc_cond (code, HPPA_ADD_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1395 else
1396 hppa_addc_cond (code, HPPA_ADD_COND_NUV, ins->sreg1, ins->sreg2, ins->dreg);
1398 ovfj->data.exception = "OverflowException";
1399 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1400 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1401 hppa_bl_n (code, 8, hppa_r0);
1402 break;
1404 case OP_SUBCC:
1405 case CEE_SUB:
1406 hppa_sub (code, ins->sreg1, ins->sreg2, ins->dreg);
1407 break;
1408 case OP_SUBCC_IMM:
1409 case OP_SUB_IMM:
1410 hppa_addi (code, -ins->inst_imm, ins->sreg1, ins->dreg);
1411 break;
1412 case OP_SBB:
1413 hppa_subb (code, ins->sreg1, ins->sreg2, ins->dreg);
1414 break;
1415 case OP_SBB_IMM:
1416 hppa_set (code, ins->inst_imm, hppa_r1);
1417 hppa_subb (code, ins->sreg1, hppa_r1, ins->dreg);
1418 break;
1419 case OP_HPPA_SUB_OVF: {
1420 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1421 hppa_bl (code, 8, hppa_r2);
1422 hppa_depi (code, 0, 31, 2, hppa_r2);
1423 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1424 hppa_sub_cond (code, HPPA_SUB_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1425 ovfj->data.exception = "OverflowException";
1426 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1427 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1428 hppa_bl_n (code, 8, hppa_r0);
1429 break;
1431 case OP_HPPA_SUBB_OVF: {
1432 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1433 hppa_bl (code, 8, hppa_r2);
1434 hppa_depi (code, 0, 31, 2, hppa_r2);
1435 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1437 hppa_subb_cond (code, HPPA_SUB_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1438 ovfj->data.exception = "OverflowException";
1439 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1440 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1441 hppa_bl_n (code, 8, hppa_r0);
1442 break;
1445 case CEE_AND:
1446 hppa_and (code, ins->sreg1, ins->sreg2, ins->dreg);
1447 break;
1448 case OP_AND_IMM:
1449 hppa_set (code, ins->inst_imm, hppa_r1);
1450 hppa_and (code, ins->sreg1, hppa_r1, ins->dreg);
1451 break;
1453 case CEE_OR:
1454 hppa_or (code, ins->sreg1, ins->sreg2, ins->dreg);
1455 break;
1457 case OP_OR_IMM:
1458 hppa_set (code, ins->inst_imm, hppa_r1);
1459 hppa_or (code, ins->sreg1, hppa_r1, ins->dreg);
1460 break;
1462 case CEE_XOR:
1463 hppa_xor (code, ins->sreg1, ins->sreg2, ins->dreg);
1464 break;
1465 case OP_XOR_IMM:
1466 hppa_set (code, ins->inst_imm, hppa_r1);
1467 hppa_xor (code, ins->sreg1, hppa_r1, ins->dreg);
1468 break;
1469 case CEE_SHL:
1470 if (ins->sreg1 != ins->dreg) {
1471 hppa_shl (code, ins->sreg1, ins->sreg2, ins->dreg);
1473 else {
1474 hppa_copy (code, ins->sreg1, hppa_r1);
1475 hppa_shl (code, hppa_r1, ins->sreg2, ins->dreg);
1477 break;
1478 case OP_SHL_IMM:
1479 case OP_ISHL_IMM:
1480 g_assert (ins->inst_imm < 32);
1481 if (ins->sreg1 != ins->dreg) {
1482 hppa_zdep (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1484 else {
1485 hppa_copy (code, ins->sreg1, hppa_r1);
1486 hppa_zdep (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1488 break;
1489 case CEE_SHR:
1490 if (ins->sreg1 != ins->dreg) {
1491 hppa_shr (code, ins->sreg1, ins->sreg2, ins->dreg);
1493 else {
1494 hppa_copy (code, ins->sreg1, hppa_r1);
1495 hppa_shr (code, hppa_r1, ins->sreg2, ins->dreg);
1497 break;
1498 case OP_SHR_IMM:
1499 g_assert (ins->inst_imm < 32);
1500 if (ins->sreg1 != ins->dreg) {
1501 hppa_extrs (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1503 else {
1504 hppa_copy (code, ins->sreg1, hppa_r1);
1505 hppa_extrs (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1507 break;
1508 case OP_SHR_UN_IMM:
1509 g_assert (ins->inst_imm < 32);
1510 if (ins->sreg1 != ins->dreg) {
1511 hppa_extru (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1513 else {
1514 hppa_copy (code, ins->sreg1, hppa_r1);
1515 hppa_extru (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1517 break;
1518 case CEE_SHR_UN:
1519 if (ins->sreg1 != ins->dreg) {
1520 hppa_lshr (code, ins->sreg1, ins->sreg2, ins->dreg);
1522 else {
1523 hppa_copy (code, ins->sreg1, hppa_r1);
1524 hppa_lshr (code, hppa_r1, ins->sreg2, ins->dreg);
1526 break;
1527 case CEE_NOT:
1528 hppa_not (code, ins->sreg1, ins->dreg);
1529 break;
1530 case CEE_NEG:
1531 hppa_subi (code, 0, ins->sreg1, ins->dreg);
1532 break;
1534 case CEE_MUL:
1535 case OP_MUL_IMM:
1536 /* Should have been rewritten using xmpyu */
1537 g_assert_not_reached ();
1539 case OP_ICONST:
1540 if ((ins->inst_c0 > 0 && ins->inst_c0 >= (1 << 13)) ||
1541 (ins->inst_c0 < 0 && ins->inst_c0 < -(1 << 13))) {
1542 hppa_ldil (code, hppa_lsel (ins->inst_c0), ins->dreg);
1543 hppa_ldo (code, hppa_rsel (ins->inst_c0), ins->dreg, ins->dreg);
1544 } else {
1545 hppa_ldo (code, ins->inst_c0, hppa_r0, ins->dreg);
1547 break;
1548 case OP_AOTCONST:
1549 g_assert_not_reached ();
1551 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
1552 hppa_set_template (code, ins->dreg);
1554 g_warning ("unimplemented opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
1555 NOT_IMPLEMENTED;
1556 break;
1557 case OP_FMOVE:
1558 if (ins->sreg1 != ins->dreg)
1559 hppa_fcpy (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->dreg);
1560 break;
1562 case OP_HPPA_OUTARG_R4CONST:
1563 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1564 hppa_fldwx (code, hppa_r0, hppa_r1, ins->dreg, 0);
1565 break;
1567 case OP_HPPA_OUTARG_REGOFFSET:
1568 hppa_ldo (code, ins->inst_offset, ins->inst_basereg, ins->dreg);
1569 break;
1571 case OP_JMP:
1573 * Keep in sync with mono_arch_emit_epilog
1575 g_assert (!cfg->method->save_lmf);
1576 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
1577 hppa_bl (code, 8, hppa_r0);
1578 break;
1579 case OP_CHECK_THIS:
1580 /* ensure ins->sreg1 is not NULL */
1581 hppa_ldw (code, 0, ins->sreg1, hppa_r1);
1582 break;
1583 case OP_ARGLIST:
1584 break;
1585 case OP_FCALL:
1586 case OP_LCALL:
1587 case OP_VCALL:
1588 case OP_VOIDCALL:
1589 case OP_CALL:
1590 call = (MonoCallInst*)ins;
1591 if (ins->flags & MONO_INST_HAS_METHOD)
1592 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
1593 else
1594 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
1595 hppa_ldil (code, 0, hppa_r1);
1596 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1598 * We may have loaded an actual function address, or
1599 * it might be a plabel. Check to see if the plabel
1600 * bit is set, and load the actual fptr from it if
1601 * needed
1603 hppa_bb_n (code, HPPA_BIT_COND_MSB_CLR, hppa_r1, 30, 2);
1604 hppa_depi (code, 0, 31, 2, hppa_r1);
1605 hppa_ldw (code, 4, hppa_r1, hppa_r19);
1606 hppa_ldw (code, 0, hppa_r1, hppa_r1);
1607 hppa_ble (code, 0, hppa_r1);
1608 hppa_copy (code, hppa_r31, hppa_r2);
1609 if (call->signature->ret->type == MONO_TYPE_R4)
1610 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr4, hppa_fr4);
1611 break;
1612 case OP_FCALL_REG:
1613 case OP_LCALL_REG:
1614 case OP_VCALL_REG:
1615 case OP_VOIDCALL_REG:
1616 case OP_CALL_REG:
1617 call = (MonoCallInst*)ins;
1618 g_assert (!call->virtual);
1619 hppa_copy (code, ins->sreg1, hppa_r1);
1620 hppa_bb_n (code, HPPA_BIT_COND_MSB_CLR, hppa_r1, 30, 2);
1621 hppa_depi (code, 0, 31, 2, hppa_r1);
1622 hppa_ldw (code, 4, hppa_r1, hppa_r19);
1623 hppa_ldw (code, 0, hppa_r1, hppa_r1);
1624 hppa_ble (code, 0, hppa_r1);
1625 hppa_copy (code, hppa_r31, hppa_r2);
1626 if (call->signature->ret->type == MONO_TYPE_R4)
1627 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr4, hppa_fr4);
1628 break;
1629 case OP_FCALL_MEMBASE:
1630 case OP_LCALL_MEMBASE:
1631 case OP_VCALL_MEMBASE:
1632 case OP_VOIDCALL_MEMBASE:
1633 case OP_CALL_MEMBASE:
1634 call = (MonoCallInst*)ins;
1635 /* jump to ins->inst_sreg1 + ins->inst_offset */
1636 hppa_ldw (code, ins->inst_offset, ins->sreg1, hppa_r1);
1638 /* For virtual calls, emit a special token that can
1639 * be used by get_vcall_slot_addr
1641 if (call->virtual)
1642 hppa_ldo (code, 0x777, hppa_r0, hppa_r0);
1643 hppa_ble (code, 0, hppa_r1);
1644 hppa_copy (code, hppa_r31, hppa_r2);
1645 break;
1646 case OP_LOCALLOC: {
1647 guint32 size_reg;
1649 /* Keep alignment */
1650 hppa_ldo (code, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1, ins->dreg);
1651 hppa_depi (code, 0, 31, 6, ins->dreg);
1652 hppa_copy (code, hppa_sp, hppa_r1);
1653 hppa_addl (code, ins->dreg, hppa_sp, hppa_sp);
1654 hppa_copy (code, hppa_r1, ins->dreg);
1656 if (ins->flags & MONO_INST_INIT) {
1657 hppa_stw (code, hppa_r0, 0, hppa_r1);
1658 hppa_combt (code, hppa_r1, hppa_sp, HPPA_CMP_COND_ULT, -3);
1659 hppa_ldo (code, 4, hppa_r1, hppa_r1);
1661 break;
1664 case OP_THROW:
1665 hppa_copy (code, ins->sreg1, hppa_r26);
1666 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
1667 (gpointer)"mono_arch_throw_exception");
1668 hppa_ldil (code, 0, hppa_r1);
1669 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1670 hppa_ble (code, 0, hppa_r1);
1671 hppa_copy (code, hppa_r31, hppa_r2);
1672 /* should never return */
1673 *code++ = 0xffeeddcc;
1674 break;
1675 case OP_RETHROW:
1676 hppa_copy (code, ins->sreg1, hppa_r26);
1677 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
1678 (gpointer)"mono_arch_rethrow_exception");
1679 hppa_ldil (code, 0, hppa_r1);
1680 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1681 hppa_ble (code, 0, hppa_r1);
1682 hppa_copy (code, hppa_r31, hppa_r2);
1683 /* should never return */
1684 *code++ = 0xffeeddcc;
1685 break;
1686 case OP_START_HANDLER:
1687 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1688 hppa_stw (code, hppa_r2, ins->inst_left->inst_offset, ins->inst_left->inst_basereg);
1689 else {
1690 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1691 hppa_addl (code, ins->inst_left->inst_basereg, hppa_r1, hppa_r1);
1692 hppa_stw (code, hppa_r2, 0, hppa_r1);
1694 break;
1695 case OP_ENDFILTER:
1696 if (ins->sreg1 != hppa_r26)
1697 hppa_copy (code, ins->sreg1, hppa_r26);
1698 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1699 hppa_ldw (code, ins->inst_left->inst_offset, ins->inst_left->inst_basereg, hppa_r2);
1700 else {
1701 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1702 hppa_ldwx (code, hppa_r1, ins->inst_left->inst_basereg, hppa_r2);
1704 hppa_bv (code, hppa_r0, hppa_r2);
1705 hppa_nop (code);
1706 break;
1707 case OP_ENDFINALLY:
1708 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1709 hppa_ldw (code, ins->inst_left->inst_offset, ins->inst_left->inst_basereg, hppa_r1);
1710 else {
1711 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1712 hppa_ldwx (code, hppa_r1, ins->inst_left->inst_basereg, hppa_r1);
1714 hppa_bv (code, hppa_r0, hppa_r1);
1715 hppa_nop (code);
1716 break;
1717 case OP_CALL_HANDLER:
1718 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
1719 hppa_bl (code, 0, hppa_r2);
1720 hppa_nop (code);
1721 break;
1722 case OP_LABEL:
1723 ins->inst_c0 = (guint8*)code - cfg->native_code;
1724 break;
1725 case OP_BR: {
1726 guint32 target;
1727 DEBUG (printf ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins));
1728 if (ins->flags & MONO_INST_BRLABEL) {
1729 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
1730 } else {
1731 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
1733 hppa_bl (code, 8, hppa_r0);
1734 /* TODO: if the branch is too long, we may need to
1735 * use a long-branch sequence:
1736 * hppa_ldil (code, 0, hppa_r1);
1737 * hppa_ldo (code, 0, hppa_r1, hppa_r1);
1738 * hppa_bv (code, hppa_r0, hppa_r1);
1740 hppa_nop (code);
1741 break;
1743 case OP_BR_REG:
1744 hppa_bv (code, hppa_r0, ins->sreg1);
1745 hppa_nop(code);
1746 break;
1748 case OP_SWITCH: {
1749 int i;
1751 max_len += 8 * GPOINTER_TO_INT (ins->klass);
1752 if (offset > (cfg->code_size - max_len - 16)) {
1753 cfg->code_size += max_len;
1754 cfg->code_size *= 2;
1755 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1756 code = cfg->native_code + offset;
1757 code_start = (guint8*)code;
1759 hppa_blr (code, ins->sreg1, hppa_r0);
1760 hppa_nop (code);
1761 for (i = 0; i < GPOINTER_TO_INT (ins->klass); ++i) {
1762 *code++ = 0xdeadbeef;
1763 *code++ = 0xdeadbeef;
1765 break;
1768 /* comclr is cool :-) */
1769 case OP_HPPA_CEQ:
1770 hppa_comclr_cond (code, HPPA_SUB_COND_NE, ins->sreg1, ins->sreg2, ins->dreg);
1771 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1772 break;
1774 case OP_HPPA_CLT:
1775 hppa_comclr_cond (code, HPPA_SUB_COND_SGE, ins->sreg1, ins->sreg2, ins->dreg);
1776 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1777 break;
1779 case OP_HPPA_CLT_UN:
1780 hppa_comclr_cond (code, HPPA_SUB_COND_UGE, ins->sreg1, ins->sreg2, ins->dreg);
1781 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1782 break;
1784 case OP_HPPA_CGT:
1785 hppa_comclr_cond (code, HPPA_SUB_COND_SLE, ins->sreg1, ins->sreg2, ins->dreg);
1786 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1787 break;
1789 case OP_HPPA_CGT_UN:
1790 hppa_comclr_cond (code, HPPA_SUB_COND_ULE, ins->sreg1, ins->sreg2, ins->dreg);
1791 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1792 break;
1794 case OP_CEQ:
1795 case OP_CLT:
1796 case OP_CLT_UN:
1797 case OP_CGT:
1798 case OP_CGT_UN:
1799 case OP_COND_EXC_EQ:
1800 case OP_COND_EXC_NE_UN:
1801 case OP_COND_EXC_LT:
1802 case OP_COND_EXC_LT_UN:
1803 case OP_COND_EXC_GT:
1804 case OP_COND_EXC_GT_UN:
1805 case OP_COND_EXC_GE:
1806 case OP_COND_EXC_GE_UN:
1807 case OP_COND_EXC_LE:
1808 case OP_COND_EXC_LE_UN:
1809 case OP_COND_EXC_OV:
1810 case OP_COND_EXC_NO:
1811 case OP_COND_EXC_C:
1812 case OP_COND_EXC_NC:
1813 case OP_COND_EXC_IOV:
1814 case OP_COND_EXC_IC:
1815 case CEE_BEQ:
1816 case CEE_BNE_UN:
1817 case CEE_BLT:
1818 case CEE_BLT_UN:
1819 case CEE_BGT:
1820 case CEE_BGT_UN:
1821 case CEE_BGE:
1822 case CEE_BGE_UN:
1823 case CEE_BLE:
1824 case CEE_BLE_UN:
1825 case OP_COMPARE:
1826 case OP_LCOMPARE:
1827 case OP_ICOMPARE:
1828 case OP_COMPARE_IMM:
1829 case OP_ICOMPARE_IMM:
1830 g_warning ("got opcode %s in %s(), should be reduced\n", mono_inst_name (ins->opcode), __FUNCTION__);
1831 g_assert_not_reached ();
1832 break;
1834 case OP_HPPA_BEQ:
1835 case OP_HPPA_BNE:
1836 case OP_HPPA_BLT:
1837 case OP_HPPA_BLT_UN:
1838 case OP_HPPA_BGT:
1839 case OP_HPPA_BGT_UN:
1840 case OP_HPPA_BGE:
1841 case OP_HPPA_BGE_UN:
1842 case OP_HPPA_BLE:
1843 case OP_HPPA_BLE_UN:
1844 EMIT_COND_BRANCH (ins, ins->sreg1, ins->sreg2, ins->opcode - OP_HPPA_BEQ);
1845 break;
1847 case OP_HPPA_COND_EXC_EQ:
1848 case OP_HPPA_COND_EXC_GE:
1849 case OP_HPPA_COND_EXC_GT:
1850 case OP_HPPA_COND_EXC_LE:
1851 case OP_HPPA_COND_EXC_LT:
1852 case OP_HPPA_COND_EXC_NE_UN:
1853 case OP_HPPA_COND_EXC_GE_UN:
1854 case OP_HPPA_COND_EXC_GT_UN:
1855 case OP_HPPA_COND_EXC_LE_UN:
1856 case OP_HPPA_COND_EXC_LT_UN:
1857 EMIT_COND_SYSTEM_EXCEPTION (ins->sreg1, ins->sreg2, ins->opcode - OP_HPPA_COND_EXC_EQ, ins->inst_p1);
1858 break;
1860 case OP_HPPA_COND_EXC_OV:
1861 case OP_HPPA_COND_EXC_NO:
1862 case OP_HPPA_COND_EXC_C:
1863 case OP_HPPA_COND_EXC_NC:
1864 NOT_IMPLEMENTED;
1866 /* floating point opcodes */
1867 case OP_R8CONST:
1868 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1869 hppa_flddx (code, hppa_r0, hppa_r1, ins->dreg);
1870 break;
1871 case OP_R4CONST:
1872 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1873 hppa_fldwx (code, hppa_r0, hppa_r1, hppa_fr31, 0);
1874 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1875 break;
1876 case OP_STORER8_MEMBASE_REG:
1877 hppa_set (code, ins->inst_offset, hppa_r1);
1878 hppa_fstdx (code, ins->sreg1, hppa_r1, ins->inst_destbasereg);
1879 break;
1880 case OP_LOADR8_MEMBASE:
1881 hppa_set (code, ins->inst_offset, hppa_r1);
1882 hppa_flddx (code, hppa_r1, ins->inst_basereg, ins->dreg);
1883 break;
1884 case OP_STORER4_MEMBASE_REG:
1885 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, hppa_fr31);
1886 if (hppa_check_bits (ins->inst_offset, 5)) {
1887 hppa_fstws (code, hppa_fr31, 0, ins->inst_offset, ins->inst_destbasereg);
1888 } else {
1889 hppa_set (code, ins->inst_offset, hppa_r1);
1890 hppa_fstwx (code, hppa_fr31, 0, hppa_r1, ins->inst_destbasereg);
1892 break;
1893 case OP_HPPA_STORER4_LEFT:
1894 case OP_HPPA_STORER4_RIGHT:
1895 if (hppa_check_bits (ins->inst_offset, 5)) {
1896 hppa_fstws (code, ins->sreg1, (ins->opcode == OP_HPPA_STORER4_RIGHT), ins->inst_offset, ins->inst_destbasereg);
1897 } else {
1898 hppa_set (code, ins->inst_offset, hppa_r1);
1899 hppa_fstwx (code, ins->sreg1, (ins->opcode == OP_HPPA_STORER4_RIGHT), hppa_r1, ins->inst_destbasereg);
1901 break;
1902 case OP_LOADR4_MEMBASE:
1903 if (hppa_check_bits (ins->inst_offset, 5)) {
1904 hppa_fldws (code, ins->inst_offset, ins->inst_basereg, hppa_fr31, 0);
1905 } else {
1906 hppa_set (code, ins->inst_offset, hppa_r1);
1907 hppa_fldwx (code, hppa_r1, ins->inst_basereg, hppa_fr31, 0);
1909 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1910 break;
1911 case OP_HPPA_LOADR4_LEFT:
1912 case OP_HPPA_LOADR4_RIGHT:
1913 if (hppa_check_bits (ins->inst_offset, 5)) {
1914 hppa_fldws (code, ins->inst_offset, ins->inst_basereg, ins->dreg, (ins->opcode == OP_HPPA_LOADR4_RIGHT));
1915 } else {
1916 hppa_set (code, ins->inst_offset, hppa_r1);
1917 hppa_fldwx (code, hppa_r1, ins->inst_basereg, ins->dreg, (ins->opcode == OP_HPPA_LOADR4_RIGHT));
1919 break;
1921 case CEE_CONV_R4:
1922 hppa_stw (code, ins->sreg1, -16, hppa_sp);
1923 hppa_fldws (code, -16, hppa_sp, hppa_fr31, 0);
1924 hppa_fcnvxf (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_SGL, hppa_fr31, ins->dreg);
1925 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, ins->dreg, ins->dreg);
1926 break;
1928 case OP_FCONV_TO_R4:
1929 /* reduce precision */
1930 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, ins->dreg);
1931 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, ins->dreg, ins->dreg);
1932 break;
1934 case OP_HPPA_SETF4REG:
1935 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, ins->dreg);
1936 break;
1937 case CEE_CONV_R8:
1938 hppa_stw (code, ins->sreg1, -16, hppa_sp);
1939 hppa_fldws (code, -16, hppa_sp, hppa_fr31, 0);
1940 hppa_fcnvxf (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1941 break;
1943 case OP_FCONV_TO_I1:
1944 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
1945 break;
1946 case OP_FCONV_TO_U1:
1947 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
1948 break;
1949 case OP_FCONV_TO_I2:
1950 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
1951 break;
1952 case OP_FCONV_TO_U2:
1953 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
1954 break;
1955 case OP_FCONV_TO_I4:
1956 case OP_FCONV_TO_I:
1957 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
1958 break;
1959 case OP_FCONV_TO_U4:
1960 case OP_FCONV_TO_U:
1961 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
1962 break;
1964 case OP_FCONV_TO_I8:
1965 case OP_FCONV_TO_U8:
1966 g_assert_not_reached ();
1967 /* Implemented as helper calls */
1968 break;
1969 case OP_LCONV_TO_R_UN:
1970 g_assert_not_reached ();
1971 /* Implemented as helper calls */
1972 break;
1974 case OP_LCONV_TO_OVF_I:
1975 NOT_IMPLEMENTED;
1976 break;
1978 case OP_FADD:
1979 hppa_fadd (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1980 break;
1981 case OP_FSUB:
1982 hppa_fsub (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1983 break;
1984 case OP_FMUL:
1985 hppa_fmul (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1986 break;
1987 case OP_FDIV:
1988 hppa_fdiv (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1989 break;
1990 case OP_FREM:
1991 NOT_IMPLEMENTED;
1992 break;
1994 case OP_FCOMPARE:
1995 g_assert_not_reached();
1996 break;
1998 case OP_FCEQ:
1999 case OP_FCLT:
2000 case OP_FCLT_UN:
2001 case OP_FCGT:
2002 case OP_FCGT_UN:
2003 hppa_fcmp (code, HPPA_FP_FMT_DBL, float_ceq_table [ins->opcode - OP_FCEQ], ins->sreg1, ins->sreg2);
2004 hppa_ftest (code, 0);
2005 hppa_bl (code, 12, hppa_r0);
2006 hppa_ldo (code, 1, hppa_r0, ins->dreg);
2007 hppa_ldo (code, 0, hppa_r0, ins->dreg);
2008 break;
2010 case OP_FBEQ:
2011 case OP_FBLT:
2012 case OP_FBGT:
2013 case OP_FBGE:
2014 case OP_FBLE:
2015 case OP_FBNE_UN:
2016 case OP_FBLT_UN:
2017 case OP_FBGT_UN:
2018 case OP_FBGE_UN:
2019 case OP_FBLE_UN:
2020 EMIT_FLOAT_COND_BRANCH (ins, ins->sreg1, ins->sreg2, ins->opcode - OP_FBEQ);
2021 break;
2023 case OP_CKFINITE:
2024 case OP_MEMORY_BARRIER:
2025 break;
2027 case OP_HPPA_XMPYU:
2028 hppa_xmpyu (code, ins->sreg1, ins->sreg2, ins->dreg);
2029 break;
2031 default:
2032 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
2033 g_assert_not_reached ();
2036 if ((((guint8*)code) - code_start) > max_len) {
2037 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
2038 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
2039 g_assert_not_reached ();
2042 cpos += max_len;
2044 last_ins = ins;
2047 cfg->code_len = (guint8*)code - cfg->native_code;
2048 DEBUG_FUNC_EXIT();
2051 void
2052 mono_arch_register_lowlevel_calls (void)
2056 void
2057 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
2059 MonoJumpInfo *patch_info;
2061 DEBUG_FUNC_ENTER();
2062 /* FIXME: Move part of this to arch independent code */
2063 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
2064 unsigned char *ip = patch_info->ip.i + code;
2065 gpointer target;
2067 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
2068 DEBUG (printf ("patch_info->type = %d, target = %p\n", patch_info->type, target));
2070 switch (patch_info->type) {
2071 case MONO_PATCH_INFO_NONE:
2072 case MONO_PATCH_INFO_BB_OVF:
2073 case MONO_PATCH_INFO_EXC_OVF:
2074 continue;
2076 case MONO_PATCH_INFO_IP:
2077 hppa_patch ((guint32 *)ip, ip);
2078 continue;
2080 case MONO_PATCH_INFO_CLASS_INIT: {
2081 break;
2083 case MONO_PATCH_INFO_METHOD_JUMP: {
2084 break;
2086 case MONO_PATCH_INFO_SWITCH: {
2087 int i;
2088 gpointer *table = (gpointer *)target;
2089 ip += 8;
2090 for (i = 0; i < patch_info->data.table->table_size; i++) {
2091 DEBUG (printf ("Patching switch table, table[%d] = %p\n", i, table[i]));
2092 hppa_ldil (ip, hppa_lsel (table [i]), hppa_r1);
2093 hppa_be_n (ip, hppa_rsel (table [i]), hppa_r1);
2095 continue;
2097 default:
2098 break;
2100 hppa_patch ((guint32 *)ip, target);
2103 DEBUG_FUNC_EXIT();
2106 void*
2107 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2109 guint32 *code = (guint32*)p;
2111 DEBUG_FUNC_ENTER();
2113 hppa_set (code, cfg->method, hppa_r26);
2114 hppa_copy (code, hppa_r0, hppa_r25); /* NULL sp for now */
2115 hppa_set (code, func, hppa_r1);
2116 hppa_depi (code, 0, 31, 2, hppa_r1);
2117 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2118 hppa_ble (code, 0, hppa_r1);
2119 hppa_copy (code, hppa_r31, hppa_r2);
2121 DEBUG_FUNC_EXIT();
2122 return code;
2125 enum {
2126 SAVE_NONE,
2127 SAVE_STRUCT,
2128 SAVE_ONE,
2129 SAVE_TWO,
2130 SAVE_FP
2133 void*
2134 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2136 guint32 *code = (guint32*)p;
2137 DEBUG_FUNC_ENTER();
2138 #if 0
2139 int save_mode = SAVE_NONE;
2140 MonoMethod *method = cfg->method;
2142 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
2143 case MONO_TYPE_VOID:
2144 /* special case string .ctor icall */
2145 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2146 save_mode = SAVE_ONE;
2147 else
2148 save_mode = SAVE_NONE;
2149 break;
2150 case MONO_TYPE_I8:
2151 case MONO_TYPE_U8:
2152 #ifdef SPARCV9
2153 save_mode = SAVE_ONE;
2154 #else
2155 save_mode = SAVE_TWO;
2156 #endif
2157 break;
2158 case MONO_TYPE_R4:
2159 case MONO_TYPE_R8:
2160 save_mode = SAVE_FP;
2161 break;
2162 case MONO_TYPE_VALUETYPE:
2163 save_mode = SAVE_STRUCT;
2164 break;
2165 default:
2166 save_mode = SAVE_ONE;
2167 break;
2170 /* Save the result to the stack and also put it into the output registers */
2172 switch (save_mode) {
2173 case SAVE_TWO:
2174 /* V8 only */
2175 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
2176 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
2177 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2178 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
2179 break;
2180 case SAVE_ONE:
2181 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
2182 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2183 break;
2184 case SAVE_FP:
2185 #ifdef SPARCV9
2186 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
2187 #else
2188 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
2189 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
2190 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
2191 #endif
2192 break;
2193 case SAVE_STRUCT:
2194 #ifdef SPARCV9
2195 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2196 #else
2197 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
2198 #endif
2199 break;
2200 case SAVE_NONE:
2201 default:
2202 break;
2205 sparc_set (code, cfg->method, sparc_o0);
2207 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
2208 EMIT_CALL ();
2210 /* Restore result */
2212 switch (save_mode) {
2213 case SAVE_TWO:
2214 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
2215 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
2216 break;
2217 case SAVE_ONE:
2218 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
2219 break;
2220 case SAVE_FP:
2221 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
2222 break;
2223 case SAVE_NONE:
2224 default:
2225 break;
2227 #endif
2228 DEBUG_FUNC_EXIT();
2229 return code;
2233 * The HPPA stack frame should look like this:
2235 * ---------------------
2236 * incoming params area
2237 * ---------------------
2238 * linkage area size = ARGS_OFFSET
2239 * --------------------- fp = psp
2240 * HPPA_STACK_LMF_OFFSET
2241 * ---------------------
2242 * MonoLMF structure or saved registers
2243 * -------------------
2244 * locals size = cfg->stack_offset - cfg->param_area
2245 * ---------------------
2246 * params area size = cfg->param_area - ARGS_OFFSET (aligned)
2247 * ---------------------
2248 * callee linkage area size = ARGS_OFFSET
2249 * --------------------- sp
2251 guint8 *
2252 mono_arch_emit_prolog (MonoCompile *cfg)
2254 MonoMethod *method = cfg->method;
2255 MonoBasicBlock *bb;
2256 MonoMethodSignature *sig;
2257 MonoInst *inst;
2258 int alloc_size, pos, max_offset, i;
2259 guint8 *code;
2260 CallInfo *cinfo;
2261 int tracing = 0;
2262 int lmf_offset = 0;
2264 DEBUG_FUNC_ENTER();
2265 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
2266 tracing = 1;
2268 sig = mono_method_signature (method);
2269 cfg->code_size = 512 + sig->param_count * 20;
2270 code = cfg->native_code = g_malloc (cfg->code_size);
2272 /* TODO: enable tail call optimization */
2273 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
2274 hppa_stw (code, hppa_r2, -20, hppa_sp);
2277 /* locals area */
2278 pos = HPPA_STACK_LMF_OFFSET;
2280 /* figure out how much space we need for spilling */
2281 if (!method->save_lmf) {
2282 /* spill callee-save registers */
2283 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2284 for (i = 0; i < 32; i++) {
2285 if ((1 << i) & mask)
2286 pos += sizeof (gulong);
2288 } else {
2289 lmf_offset = pos;
2290 pos += sizeof (MonoLMF);
2293 alloc_size = ALIGN_TO (pos + cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
2294 g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) == 0);
2296 cfg->stack_usage = alloc_size;
2298 if (alloc_size) {
2299 hppa_copy (code, hppa_r3, hppa_r1);
2300 hppa_copy (code, hppa_sp, hppa_r3);
2301 if (hppa_check_bits (alloc_size, 14))
2302 hppa_stwm (code, hppa_r1, alloc_size, hppa_sp);
2303 else {
2304 hppa_stwm (code, hppa_r1, 8100, hppa_sp);
2305 hppa_addil (code, hppa_lsel (alloc_size - 8100), hppa_sp);
2306 hppa_ldo (code, hppa_rsel (alloc_size - 8100), hppa_r1, hppa_sp);
2310 /* compute max_offset in order to use short forward jumps
2311 * we always do it on hppa because the immediate displacement
2312 * for jumps is small
2314 max_offset = 0;
2315 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2316 MonoInst *ins = bb->code;
2317 bb->max_offset = max_offset;
2319 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
2320 max_offset += 6;
2322 MONO_BB_FOR_EACH_INS (bb, ins)
2323 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2326 DEBUG (printf ("Incoming arguments: \n"));
2327 cinfo = get_call_info (sig, sig->pinvoke);
2329 /* We do this first so that we don't have to worry about the LMF-
2330 * saving code clobbering r28
2332 if (cinfo->struct_return)
2333 hppa_stw (code, hppa_r28, cfg->ret->inst_offset, hppa_sp);
2335 /* Save the LMF or the spilled registers */
2336 pos = HPPA_STACK_LMF_OFFSET;
2337 if (!method->save_lmf) {
2338 /* spill callee-save registers */
2339 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2340 for (i = 0; i < 32; i++) {
2341 if ((1 << i) & mask) {
2342 if (i == hppa_r3) {
2343 hppa_ldw (code, 0, hppa_r3, hppa_r1);
2344 hppa_stw (code, hppa_r1, pos, hppa_r3);
2345 } else
2346 hppa_stw (code, i, pos, hppa_r3);
2347 pos += sizeof (gulong);
2350 } else {
2351 int ofs = lmf_offset + G_STRUCT_OFFSET (MonoLMF, regs);
2352 int reg;
2354 hppa_ldw (code, 0, hppa_r3, hppa_r1);
2355 hppa_stw (code, hppa_r1, ofs, hppa_r3);
2356 ofs += sizeof (gulong);
2357 for (reg = 4; reg < 32; reg++) {
2358 if (HPPA_IS_SAVED_GREG (reg)) {
2359 hppa_stw (code, reg, ofs, hppa_r3);
2360 ofs += sizeof (gulong);
2363 /* We shouldn't need to save the FP regs.... */
2364 ofs = ALIGN_TO (ofs, sizeof(double));
2365 hppa_set (code, ofs, hppa_r1);
2366 for (reg = 0; reg < 32; reg++) {
2367 if (HPPA_IS_SAVED_FREG (reg)) {
2368 hppa_fstdx (code, reg, hppa_r1, hppa_r3);
2369 hppa_ldo (code, sizeof(double), hppa_r1, hppa_r1);
2373 /* We also spill the arguments onto the stack, because
2374 * the call to hppa_get_lmf_addr below can clobber them
2376 * This goes in the param area that is always allocated
2378 ofs = -36;
2379 for (reg = hppa_r26; reg >= hppa_r23; reg--) {
2380 hppa_stw (code, reg, ofs, hppa_sp);
2381 ofs -= 4;
2385 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
2386 hppa_copy (code, hppa_r30, hppa_r4);
2388 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
2389 hppa_set (code, cfg->domain, hppa_r26);
2390 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
2391 hppa_ldil (code, 0, hppa_r1);
2392 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2393 hppa_depi (code, 0, 31, 2, hppa_r1);
2394 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2395 hppa_ble (code, 0, hppa_r1);
2396 hppa_copy (code, hppa_r31, hppa_r2);
2399 if (method->save_lmf) {
2400 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2401 (gpointer)"mono_get_lmf_addr");
2402 hppa_ldil (code, 0, hppa_r1);
2403 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2404 hppa_depi (code, 0, 31, 2, hppa_r1);
2405 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2406 hppa_ble (code, 0, hppa_r1);
2407 hppa_copy (code, hppa_r31, hppa_r2);
2409 /* lmf_offset is the offset from the previous stack pointer,
2410 * The pointer to the struct is put in hppa_r22 (new_lmf).
2411 * The callee-saved registers are already in the MonoLMF
2412 * structure
2415 /* hppa_r22 = new_lmf (on the stack) */
2416 hppa_ldo (code, lmf_offset, hppa_r3, hppa_r22);
2417 /* lmf_offset is the offset from the previous stack pointer,
2419 hppa_stw (code, hppa_r28, G_STRUCT_OFFSET(MonoLMF, lmf_addr), hppa_r22);
2420 /* new_lmf->previous_lmf = *lmf_addr */
2421 hppa_ldw (code, 0, hppa_r28, hppa_r1);
2422 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r22);
2423 /* *(lmf_addr) = r22 */
2424 hppa_stw (code, hppa_r22, 0, hppa_r28);
2425 hppa_set (code, method, hppa_r1);
2426 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, method), hppa_r22);
2427 hppa_stw (code, hppa_sp, G_STRUCT_OFFSET(MonoLMF, ebp), hppa_r22);
2428 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
2429 hppa_ldil (code, 0, hppa_r1);
2430 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2431 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, eip), hppa_r22);
2433 /* Now reload the arguments from the stack */
2434 hppa_ldw (code, -36, hppa_sp, hppa_r26);
2435 hppa_ldw (code, -40, hppa_sp, hppa_r25);
2436 hppa_ldw (code, -44, hppa_sp, hppa_r24);
2437 hppa_ldw (code, -48, hppa_sp, hppa_r23);
2440 /* load arguments allocated to register from the stack */
2441 pos = 0;
2443 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2444 ArgInfo *ainfo = cinfo->args + i;
2445 inst = cfg->args [pos];
2447 if (inst->opcode == OP_REGVAR) {
2448 /* Want the argument in a register */
2449 switch (ainfo->storage) {
2450 case ArgInIReg:
2451 if (ainfo->reg != inst->dreg)
2452 hppa_copy (code, ainfo->reg, inst->dreg);
2453 DEBUG (printf ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)));
2454 break;
2456 case ArgInIRegPair:
2457 if (ainfo->reg != inst->dreg) {
2458 hppa_copy (code, ainfo->reg, inst->dreg);
2459 hppa_copy (code, ainfo->reg + 1, inst->dreg + 1);
2461 DEBUG (printf ("Argument %d assigned to register %s, %s\n", pos, mono_arch_regname (inst->dreg), mono_arch_regname (inst->dreg + 1)));
2462 break;
2464 case ArgInFReg:
2465 if (ainfo->reg != inst->dreg)
2466 hppa_fcpy (code, HPPA_FP_FMT_SGL, ainfo->reg, inst->dreg);
2467 DEBUG (printf ("Argument %d assigned to single register %s\n", pos, mono_arch_fregname (inst->dreg)));
2468 break;
2470 case ArgInDReg:
2471 if (ainfo->reg != inst->dreg)
2472 hppa_fcpy (code, HPPA_FP_FMT_DBL, ainfo->reg, inst->dreg);
2473 DEBUG (printf ("Argument %d assigned to double register %s\n", pos, mono_arch_fregname (inst->dreg)));
2474 break;
2476 case ArgOnStack:
2477 switch (ainfo->size) {
2478 case 1:
2479 hppa_ldb (code, ainfo->offset, hppa_r3, inst->dreg);
2480 break;
2481 case 2:
2482 hppa_ldh (code, ainfo->offset, hppa_r3, inst->dreg);
2483 break;
2484 case 4:
2485 hppa_ldw (code, ainfo->offset, hppa_r3, inst->dreg);
2486 break;
2487 default:
2488 g_assert_not_reached ();
2492 DEBUG (printf ("Argument %d loaded from the stack [%s - %d]\n", pos, mono_arch_regname (hppa_r3), -ainfo->offset));
2493 break;
2495 default:
2496 g_assert_not_reached ();
2499 else {
2500 /* Want the argument on the stack */
2501 switch (ainfo->storage)
2503 case ArgInIReg: {
2504 int off, reg;
2505 DEBUG (printf ("Argument %d stored from register %s to stack [%s + %d]\n", pos, mono_arch_regname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2506 if (hppa_check_bits (inst->inst_offset, 14)) {
2507 off = inst->inst_offset;
2508 reg = inst->inst_basereg;
2510 else {
2511 hppa_set (code, inst->inst_offset, hppa_r1);
2512 hppa_add (code, hppa_r1, inst->inst_basereg, hppa_r1);
2513 off = 0;
2514 reg = hppa_r1;
2516 switch (ainfo->size)
2518 case 1:
2519 hppa_stb (code, ainfo->reg, off, reg);
2520 break;
2521 case 2:
2522 hppa_sth (code, ainfo->reg, off, reg);
2523 break;
2524 case 4:
2525 hppa_stw (code, ainfo->reg, off, reg);
2526 break;
2527 default:
2528 g_assert_not_reached ();
2530 break;
2532 case ArgInIRegPair:
2533 DEBUG (printf ("Argument %d stored from register (%s,%s) to stack [%s + %d]\n", pos, mono_arch_regname (ainfo->reg), mono_arch_regname (ainfo->reg+1), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2534 if (hppa_check_bits (inst->inst_offset + 4, 14)) {
2535 hppa_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
2536 hppa_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
2538 else {
2539 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2540 hppa_stw (code, ainfo->reg, 0, hppa_r1);
2541 hppa_stw (code, ainfo->reg + 1, 4, hppa_r1);
2543 break;
2545 case ArgInFReg:
2546 DEBUG (printf ("Argument %d (float) stored from register %s to stack [%s + %d]\n", pos, mono_arch_fregname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2547 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2548 hppa_fstwx (code, ainfo->reg, 0, hppa_r0, hppa_r1);
2549 break;
2551 case ArgInDReg:
2552 DEBUG (printf ("Argument %d (double) stored from register %s to stack [%s + %d]\n", pos, mono_arch_fregname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2553 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2554 hppa_fstdx (code, ainfo->reg, hppa_r0, hppa_r1);
2555 break;
2557 case ArgOnStack:
2558 DEBUG (printf ("Argument %d copied from [%s - %d] to [%s + %d] (size=%d)\n", pos, mono_arch_regname (hppa_r3), -ainfo->offset, mono_arch_regname (inst->inst_basereg), inst->inst_offset, ainfo->size));
2559 if (inst->inst_offset != ainfo->offset ||
2560 inst->inst_basereg != hppa_r3)
2561 code = emit_memcpy (code, inst->inst_offset, inst->inst_basereg, ainfo->offset, hppa_r3, ainfo->size);
2562 break;
2564 default:
2565 g_assert_not_reached ();
2569 pos++;
2573 if (tracing)
2574 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
2576 if (getenv("HPPA_BREAK")) {
2577 *(guint32*)code = 0x00010004;
2578 code += 4;
2581 cfg->code_len = code - cfg->native_code;
2582 g_assert (cfg->code_len < cfg->code_size);
2583 g_free (cinfo);
2585 DEBUG_FUNC_EXIT();
2586 return code;
2590 void
2591 mono_arch_emit_epilog (MonoCompile *cfg)
2593 MonoMethod *method = cfg->method;
2594 MonoMethodSignature *sig;
2595 guint32 *code;
2596 int max_epilog_size = 16 + 20 * 4;
2597 int pos;
2599 DEBUG_FUNC_ENTER();
2600 sig = mono_method_signature (cfg->method);
2601 if (cfg->method->save_lmf)
2602 max_epilog_size += 128;
2604 if (mono_jit_trace_calls != NULL)
2605 max_epilog_size += 50;
2607 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2608 max_epilog_size += 50;
2610 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
2611 cfg->code_size *= 2;
2612 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2613 mono_jit_stats.code_reallocs++;
2616 code = (guint32*)(cfg->native_code + cfg->code_len);
2618 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
2619 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
2621 pos = HPPA_STACK_LMF_OFFSET;
2622 if (cfg->method->save_lmf) {
2623 int reg;
2624 hppa_ldo (code, pos, hppa_r3, hppa_r22);
2625 hppa_ldw (code, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r22, hppa_r21);
2626 hppa_ldw (code, G_STRUCT_OFFSET(MonoLMF, lmf_addr), hppa_r22, hppa_r20);
2627 hppa_stw (code, hppa_r21, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r20);
2629 pos += G_STRUCT_OFFSET(MonoLMF, regs) + sizeof (gulong);
2630 /* We skip the restore of r3 here, it is restored from the
2631 * stack anyway. This makes the code a bit easier.
2633 for (reg = 4; reg < 31; reg++) {
2634 if (HPPA_IS_SAVED_GREG (reg)) {
2635 hppa_ldw (code, pos, hppa_r3, reg);
2636 pos += sizeof(gulong);
2640 pos = ALIGN_TO (pos, sizeof (double));
2641 hppa_set (code, pos, hppa_r1);
2642 for (reg = 0; reg < 31; reg++) {
2643 if (HPPA_IS_SAVED_FREG (reg)) {
2644 hppa_flddx (code, hppa_r1, hppa_r3, reg);
2645 hppa_ldo (code, sizeof (double), hppa_r1, hppa_r1);
2646 pos += sizeof (double);
2649 } else {
2650 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2651 int i;
2652 for (i = 0; i < 32; i++) {
2653 if (i == hppa_r3)
2654 continue;
2655 if ((1 << i) & mask) {
2656 hppa_ldw (code, pos, hppa_r3, i);
2657 pos += sizeof (gulong);
2662 if (sig->ret->type != MONO_TYPE_VOID &&
2663 mono_type_to_stind (sig->ret) == CEE_STOBJ) {
2664 CallInfo *cinfo = get_call_info (sig, sig->pinvoke);
2666 switch (cinfo->ret.storage) {
2667 case ArgInIReg:
2668 hppa_ldw (code, cfg->ret->inst_offset, hppa_sp, hppa_r28);
2669 hppa_ldw (code, 0, hppa_r28, hppa_r28);
2670 break;
2671 case ArgInIRegPair:
2672 hppa_ldw (code, cfg->ret->inst_offset, hppa_sp, hppa_r28);
2673 hppa_ldw (code, 4, hppa_r28, hppa_r29);
2674 hppa_ldw (code, 0, hppa_r28, hppa_r28);
2675 break;
2676 case ArgOnStack:
2677 /* Nothing to do */
2678 break;
2679 default:
2680 g_assert_not_reached ();
2682 g_free (cinfo);
2685 if (1 || cfg->flags & MONO_CFG_HAS_CALLS)
2686 hppa_ldw (code, -20, hppa_r3, hppa_r2);
2687 hppa_ldo (code, 64, hppa_r3, hppa_sp);
2688 hppa_bv (code, hppa_r0, hppa_r2);
2689 hppa_ldwm (code, -64, hppa_sp, hppa_r3);
2691 cfg->code_len = (guint8*)code - cfg->native_code;
2693 g_assert (cfg->code_len < cfg->code_size);
2694 DEBUG_FUNC_EXIT();
2697 /* remove once throw_exception_by_name is eliminated */
2698 static int
2699 exception_id_by_name (const char *name)
2701 if (strcmp (name, "IndexOutOfRangeException") == 0)
2702 return MONO_EXC_INDEX_OUT_OF_RANGE;
2703 if (strcmp (name, "OverflowException") == 0)
2704 return MONO_EXC_OVERFLOW;
2705 if (strcmp (name, "ArithmeticException") == 0)
2706 return MONO_EXC_ARITHMETIC;
2707 if (strcmp (name, "DivideByZeroException") == 0)
2708 return MONO_EXC_DIVIDE_BY_ZERO;
2709 if (strcmp (name, "InvalidCastException") == 0)
2710 return MONO_EXC_INVALID_CAST;
2711 if (strcmp (name, "NullReferenceException") == 0)
2712 return MONO_EXC_NULL_REF;
2713 if (strcmp (name, "ArrayTypeMismatchException") == 0)
2714 return MONO_EXC_ARRAY_TYPE_MISMATCH;
2715 g_error ("Unknown intrinsic exception %s\n", name);
2716 return 0;
2719 void
2720 mono_arch_emit_exceptions (MonoCompile *cfg)
2722 MonoJumpInfo *patch_info;
2723 int i;
2724 guint8 *code;
2725 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
2726 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
2727 int max_epilog_size = 50;
2729 DEBUG_FUNC_ENTER();
2731 /* count the number of exception infos */
2734 * make sure we have enough space for exceptions
2736 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2737 switch (patch_info->type) {
2738 case MONO_PATCH_INFO_BB_OVF:
2739 g_assert_not_reached ();
2740 break;
2742 case MONO_PATCH_INFO_EXC_OVF: {
2743 const MonoOvfJump *ovfj = patch_info->data.target;
2744 max_epilog_size += 8;
2745 i = exception_id_by_name (ovfj->data.exception);
2746 if (!exc_throw_found [i]) {
2747 max_epilog_size += 24;
2748 exc_throw_found [i] = TRUE;
2750 break;
2753 case MONO_PATCH_INFO_EXC:
2754 i = exception_id_by_name (patch_info->data.target);
2755 if (!exc_throw_found [i]) {
2756 max_epilog_size += 24;
2757 exc_throw_found [i] = TRUE;
2759 break;
2761 default:
2762 break;
2766 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
2767 cfg->code_size *= 2;
2768 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2769 mono_jit_stats.code_reallocs++;
2772 code = cfg->native_code + cfg->code_len;
2774 /* add code to raise exceptions */
2775 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2776 switch (patch_info->type) {
2777 case MONO_PATCH_INFO_BB_OVF: {
2778 /* TODO */
2779 break;
2781 case MONO_PATCH_INFO_EXC_OVF: {
2782 const MonoOvfJump *ovfj = patch_info->data.target;
2783 MonoJumpInfo *newji;
2784 unsigned char *ip = patch_info->ip.i + cfg->native_code;
2785 unsigned char *stub = code;
2787 /* Patch original call, point it at the stub */
2788 hppa_patch ((guint32 *)ip, code);
2790 /* Write the stub */
2791 /* SUBTLE: this has to be PIC, because the code block
2792 * can be relocated
2794 hppa_bl_n (code, 8, hppa_r0);
2795 hppa_nop (code);
2797 /* Add a patch info to patch the stub to point to the exception code */
2798 newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
2799 newji->type = MONO_PATCH_INFO_EXC;
2800 newji->ip.i = stub - cfg->native_code;
2801 newji->data.target = ovfj->data.exception;
2802 newji->next = patch_info->next;
2803 patch_info->next = newji;
2804 break;
2806 case MONO_PATCH_INFO_EXC: {
2807 unsigned char *ip = patch_info->ip.i + cfg->native_code;
2808 i = exception_id_by_name (patch_info->data.target);
2809 if (exc_throw_pos [i]) {
2810 hppa_patch ((guint32 *)ip, exc_throw_pos [i]);
2811 patch_info->type = MONO_PATCH_INFO_NONE;
2812 break;
2813 } else {
2814 exc_throw_pos [i] = code;
2816 hppa_patch ((guint32 *)ip, code);
2817 hppa_set (code, patch_info->data.target, hppa_r26);
2818 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2819 patch_info->data.name = "mono_arch_throw_exception_by_name";
2820 patch_info->ip.i = code - cfg->native_code;
2822 /* Assume the caller has set r2, we can't set it
2823 * here based on ip, because the caller may
2824 * be relocated (also the "ip" may be from an overflow
2825 * stub)
2827 hppa_ldil (code, 0, hppa_r1);
2828 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2829 hppa_bv (code, hppa_r0, hppa_r1);
2830 hppa_nop (code);
2831 break;
2833 default:
2834 /* do nothing */
2835 break;
2839 cfg->code_len = code - cfg->native_code;
2841 g_assert (cfg->code_len < cfg->code_size);
2842 DEBUG_FUNC_EXIT();
2845 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2847 #error "--with-sigaltstack=yes not supported on hppa"
2849 #endif
2851 void
2852 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
2856 void
2857 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
2861 void
2862 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
2864 /* add the this argument */
2865 if (this_reg != -1) {
2866 MonoInst *this;
2867 MONO_INST_NEW (cfg, this, OP_MOVE);
2868 this->type = this_type;
2869 this->sreg1 = this_reg;
2870 this->dreg = mono_alloc_ireg (cfg);
2871 mono_bblock_add_inst (cfg->cbb, this);
2872 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, hppa_r26, FALSE);
2875 if (vt_reg != -1) {
2876 MonoInst *vtarg;
2877 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2878 vtarg->type = STACK_MP;
2879 vtarg->sreg1 = vt_reg;
2880 vtarg->dreg = mono_alloc_ireg (cfg);
2881 mono_bblock_add_inst (cfg->cbb, vtarg);
2882 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, hppa_r28, FALSE);
2887 MonoInst*
2888 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
2890 MonoInst *ins = NULL;
2891 DEBUG_FUNC_ENTER();
2892 DEBUG_FUNC_EXIT();
2894 return ins;
2898 * mono_arch_get_argument_info:
2899 * @csig: a method signature
2900 * @param_count: the number of parameters to consider
2901 * @arg_info: an array to store the result infos
2903 * Gathers information on parameters such as size, alignment and
2904 * padding. arg_info should be large enought to hold param_count + 1 entries.
2906 * Returns the size of the activation frame.
2909 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
2911 int k, align;
2912 CallInfo *cinfo;
2913 ArgInfo *ainfo;
2915 DEBUG_FUNC_ENTER();
2916 cinfo = get_call_info (csig, FALSE);
2918 if (csig->hasthis) {
2919 ainfo = &cinfo->args [0];
2920 arg_info [0].offset = ainfo->offset;
2923 for (k = 0; k < param_count; k++) {
2924 ainfo = &cinfo->args [k + csig->hasthis];
2926 arg_info [k + 1].offset = ainfo->offset;
2927 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
2930 g_free (cinfo);
2931 DEBUG_FUNC_EXIT();
2934 gboolean
2935 mono_arch_print_tree (MonoInst *tree, int arity)
2937 return 0;
2940 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
2942 return NULL;
2945 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
2947 return NULL;
2950 gpointer
2951 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
2953 /* FIXME: implement */
2954 g_assert_not_reached ();