2 * mini-hppa.c: HPPA backend for the Mono code generator
4 * Copyright (c) 2007 Randolph Chung
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/debug-helpers.h>
35 #include <mono/metadata/tokentype.h>
36 #include <mono/utils/mono-math.h>
38 #include "mini-hppa.h"
42 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
43 #define SIGNAL_STACK_SIZE (64 * 1024)
46 #define DEBUG_FUNC_ENTER() // printf("Entering %s\n", __FUNCTION__)
47 #define DEBUG_FUNC_EXIT() // printf("Exiting %s\n", __FUNCTION__)
50 branch_b0_table
[] = {
51 TRUE
, /* OP_HPPA_BEQ */
52 FALSE
, /* OP_HPPA_BGE */
53 FALSE
, /* OP_HPPA_BGT */
54 TRUE
, /* OP_HPPA_BLE */
55 TRUE
, /* OP_HPPA_BLT */
56 FALSE
, /* OP_HPPA_BNE */
57 FALSE
, /* OP_HPPA_BGE_UN */
58 FALSE
, /* OP_HPPA_BGT_UN */
59 TRUE
, /* OP_HPPA_BLE_UN */
60 TRUE
, /* OP_HPPA_BLT_UN */
64 branch_b1_table
[] = {
65 HPPA_CMP_COND_EQ
, /* OP_HPPA_BEQ */
66 HPPA_CMP_COND_SLT
, /* OP_HPPA_BGE */
67 HPPA_CMP_COND_SLE
, /* OP_HPPA_BGT */
68 HPPA_CMP_COND_SLE
, /* OP_HPPA_BLE */
69 HPPA_CMP_COND_SLT
, /* OP_HPPA_BLT */
70 HPPA_CMP_COND_EQ
, /* OP_HPPA_BNE_UN */
71 HPPA_CMP_COND_ULT
, /* OP_HPPA_BGE_UN */
72 HPPA_CMP_COND_ULE
, /* OP_HPPA_BGT_UN */
73 HPPA_CMP_COND_ULE
, /* OP_HPPA_BLE_UN */
74 HPPA_CMP_COND_ULT
, /* OP_HPPA_BLT_UN */
77 /* Note that these are inverted from the OP_xxx, because we nullify
78 * the branch if the condition is met
81 float_branch_table
[] = {
95 float_ceq_table
[] = {
104 * Branches have short (14 or 17 bit) targets on HPPA. To make longer jumps,
105 * we will need to rely on stubs - basically we create stub structures in
106 * the epilogue that uses a long branch to the destination, and any short
107 * jumps inside a method that cannot reach the destination directly will
108 * branch first to the stub.
110 typedef struct MonoOvfJump
{
113 const char *exception
;
118 /* Create a literal 0.0 double for FNEG */
119 double hppa_zero
= 0;
122 mono_arch_regname (int reg
)
124 static const char * rnames
[] = {
125 "hppa_r0", "hppa_r1", "hppa_rp", "hppa_r3", "hppa_r4",
126 "hppa_r5", "hppa_r6", "hppa_r7", "hppa_r8", "hppa_r9",
127 "hppa_r10", "hppa_r11", "hppa_r12", "hppa_r13", "hppa_r14",
128 "hppa_r15", "hppa_r16", "hppa_r17", "hppa_r18", "hppa_r19",
129 "hppa_r20", "hppa_r21", "hppa_r22", "hppa_r23", "hppa_r24",
130 "hppa_r25", "hppa_r26", "hppa_r27", "hppa_r28", "hppa_r29",
131 "hppa_sp", "hppa_r31"
133 if (reg
>= 0 && reg
< MONO_MAX_IREGS
)
139 mono_arch_fregname (int reg
)
141 static const char *rnames
[] = {
142 "hppa_fr0", "hppa_fr1", "hppa_fr2", "hppa_fr3", "hppa_fr4",
143 "hppa_fr5", "hppa_fr6", "hppa_fr7", "hppa_fr8", "hppa_fr9",
144 "hppa_fr10", "hppa_fr11", "hppa_fr12", "hppa_fr13", "hppa_fr14",
145 "hppa_fr15", "hppa_fr16", "hppa_fr17", "hppa_fr18", "hppa_fr19",
146 "hppa_fr20", "hppa_fr21", "hppa_fr22", "hppa_fr23", "hppa_fr24",
147 "hppa_fr25", "hppa_fr26", "hppa_fr27", "hppa_fr28", "hppa_fr29",
148 "hppa_fr30", "hppa_fr31",
151 if (reg
>= 0 && reg
< MONO_MAX_FREGS
)
158 * Initialize the cpu to execute managed code.
161 mono_arch_cpu_init (void)
164 mono_arch_cpu_optimizazions(&dummy
);
168 * Initialize architecture specific code.
171 mono_arch_init (void)
176 * Cleanup architecture specific code.
179 mono_arch_cleanup (void)
184 * This function returns the optimizations supported on this cpu.
187 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
195 mono_arch_flush_icache (guint8
*code
, gint size
)
197 guint8
* p
= (guint8
*)((guint32
)code
& ~(0x3f));
198 guint8
* end
= (guint8
*)((guint32
)code
+ size
);
200 __asm__
__volatile__ ("fdc %%r0(%%sr3, %0)\n"
202 "fic %%r0(%%sr3, %0)\n"
205 p
+= 32; /* can be 64 on pa20 cpus */
210 mono_arch_flush_register_windows (void)
212 /* No register windows on hppa */
241 #define ARGS_OFFSET 36
244 add_parameter (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*type
)
246 int is_fp
= (type
->type
== MONO_TYPE_R4
|| type
->type
== MONO_TYPE_R8
);
251 ainfo
->size
= mono_type_size (type
, &align
);
252 ainfo
->type
= type
->type
;
254 if (ainfo
->size
<= 4) {
255 cinfo
->stack_usage
+= 4;
256 ainfo
->offset
= cinfo
->stack_usage
- (4 - ainfo
->size
);
258 else if (ainfo
->size
<= 8)
260 cinfo
->stack_usage
+= 8;
261 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, 8);
262 ainfo
->offset
= cinfo
->stack_usage
- (8 - ainfo
->size
);
266 cinfo
->stack_usage
+= ainfo
->size
;
267 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
268 ainfo
->offset
= cinfo
->stack_usage
;
271 ofs
= (ALIGN_TO (ainfo
->offset
, 4) - ARGS_OFFSET
) / 4;
272 if (ofs
< PARAM_REGS
) {
274 if (ainfo
->size
<= 4)
275 ainfo
->storage
= ArgInIReg
;
277 ainfo
->storage
= ArgInIRegPair
;
278 ainfo
->reg
= hppa_r26
- ofs
;
279 } else if (type
->type
== MONO_TYPE_R4
) {
280 ainfo
->storage
= ArgInFReg
;
281 ainfo
->reg
= hppa_fr4
+ ofs
;
282 } else { /* type->type == MONO_TYPE_R8 */
283 ainfo
->storage
= ArgInDReg
;
284 ainfo
->reg
= hppa_fr4
+ ofs
;
288 /* frame pointer based offset */
289 ainfo
->reg
= hppa_r3
;
290 ainfo
->storage
= ArgOnStack
;
293 /* All offsets are negative relative to the frame pointer */
294 ainfo
->offset
= -ainfo
->offset
;
300 analyze_return (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
307 size
= mono_type_size (type
, &align
);
309 /* ref: mono_type_to_stind */
310 cinfo
->ret
.type
= type
->type
;
312 cinfo
->ret
.storage
= ArgInIReg
;
313 cinfo
->ret
.reg
= hppa_r28
;
316 switch (type
->type
) {
319 case MONO_TYPE_BOOLEAN
:
330 case MONO_TYPE_FNPTR
:
331 case MONO_TYPE_CLASS
:
332 case MONO_TYPE_STRING
:
333 case MONO_TYPE_OBJECT
:
334 case MONO_TYPE_SZARRAY
:
335 case MONO_TYPE_ARRAY
:
336 cinfo
->ret
.storage
= ArgInIReg
;
337 cinfo
->ret
.reg
= hppa_r28
;
341 cinfo
->ret
.storage
= ArgInIRegPair
;
342 cinfo
->ret
.reg
= hppa_r28
;
345 cinfo
->ret
.storage
= ArgInFReg
;
346 cinfo
->ret
.reg
= hppa_fr4
;
349 cinfo
->ret
.storage
= ArgInDReg
;
350 cinfo
->ret
.reg
= hppa_fr4
;
352 case MONO_TYPE_GENERICINST
:
353 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
356 case MONO_TYPE_VALUETYPE
:
357 if (type
->data
.klass
->enumtype
) {
358 type
= mono_class_enum_basetype (type
->data
.klass
);
362 case MONO_TYPE_TYPEDBYREF
:
363 cinfo
->struct_return
= 1;
364 /* cinfo->ret.storage tells us how the ABI expects
365 * the parameter to be returned
368 cinfo
->ret
.storage
= ArgInIReg
;
369 cinfo
->ret
.reg
= hppa_r28
;
370 } else if (size
<= 8) {
371 cinfo
->ret
.storage
= ArgInIRegPair
;
372 cinfo
->ret
.reg
= hppa_r28
;
374 cinfo
->ret
.storage
= ArgOnStack
;
375 cinfo
->ret
.reg
= hppa_sp
;
378 /* We always allocate stack space for this because the
379 * arch-indep code expects us to
381 cinfo
->stack_usage
+= size
;
382 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
383 cinfo
->ret
.offset
= -cinfo
->stack_usage
;
387 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
395 * Obtain information about a call according to the calling convention.
398 get_call_info (MonoMethodSignature
*sig
, gboolean is_pinvoke
)
401 int n
= sig
->hasthis
+ sig
->param_count
;
407 ptrtype
.type
= MONO_TYPE_PTR
;
410 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
412 /* The area below ARGS_OFFSET is the linkage area... */
413 cinfo
->stack_usage
= ARGS_OFFSET
- 4;
414 /* -4, because the first argument will allocate the area it needs */
418 add_parameter (cinfo
, cinfo
->args
+ 0, &ptrtype
);
419 DEBUG (printf ("param <this>: assigned to reg %s offset %d\n", mono_arch_regname (cinfo
->args
[0].reg
), cinfo
->args
[0].offset
));
422 /* TODO: What to do with varargs? */
424 for (i
= 0; i
< sig
->param_count
; ++i
) {
425 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
426 if (sig
->params
[i
]->byref
)
429 type
= mono_type_get_underlying_type (sig
->params
[i
]);
430 add_parameter (cinfo
, ainfo
, type
);
432 DEBUG (printf ("param %d: type %d size %d assigned to reg %s offset %d\n", i
, type
->type
, mono_type_size (type
, &dummy
), mono_arch_regname (ainfo
->reg
), ainfo
->offset
));
435 analyze_return (cinfo
, sig
);
442 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
448 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
449 MonoInst
*ins
= cfg
->varinfo
[i
];
450 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
453 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
456 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
457 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
460 if (mono_is_regsize_var (ins
->inst_vtype
)) {
461 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
462 g_assert (i
== vmv
->idx
);
463 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
472 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
477 /* r3 is sometimes used as our frame pointer, so don't allocate it
478 * r19 is the GOT pointer, don't allocate it either
482 for (i
= 4; i
<= 18; i
++)
483 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
490 * mono_arch_regalloc_cost:
492 * Return the cost, in number of memory references, of the action of
493 * allocating the variable VMV into a register during global register
497 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
504 * Set var information according to the calling convention.
505 * The locals var stuff should most likely be split in another method.
507 * updates m->stack_offset based on the amount of stack space needed for
511 mono_arch_allocate_vars (MonoCompile
*m
)
513 MonoMethodSignature
*sig
;
514 MonoMethodHeader
*header
;
516 int i
, offset
, size
, align
, curinst
;
522 m
->flags
|= MONO_CFG_HAS_SPILLUP
;
524 header
= mono_method_get_header (m
->method
);
526 sig
= mono_method_signature (m
->method
);
527 DEBUG (printf ("Allocating locals - incoming params:\n"));
528 cinfo
= get_call_info (sig
, FALSE
);
531 * We use the ABI calling conventions for managed code as well.
533 if (m
->flags
& MONO_CFG_HAS_ALLOCA
) {
535 m
->used_int_regs
|= 1 << hppa_r4
;
540 /* Before this function is called, we would have looked at all
541 * calls from this method and figured out how much space is needed
542 * for the param area.
544 * Locals are allocated backwards, right before the param area
546 /* TODO: in some cases we don't need the frame pointer... */
547 m
->frame_reg
= hppa_r3
;
548 offset
= m
->param_area
;
550 /* Return values can be passed back either in four ways:
551 * r28 is used for data <= 4 bytes (32-bit ABI)
552 * r28/r29 are used for data >4 && <= 8 bytes
553 * fr4 is used for floating point data
554 * data larger than 8 bytes is returned on the stack pointed to
557 * This code needs to be in sync with how CEE_RET is handled
558 * in mono_method_to_ir (). In some cases when we return small
559 * structs, the ABI specifies that they should be returned in
560 * registers, but the code in mono_method_to_ir () always emits
561 * a memcpy for valuetype returns, so we need to make sure we
562 * allocate space on the stack for this copy.
564 if (cinfo
->struct_return
) {
565 /* this is used to stash the incoming r28 pointer */
566 offset
+= sizeof (gpointer
);
567 m
->ret
->opcode
= OP_REGOFFSET
;
568 m
->ret
->inst_basereg
= stack_ptr
;
569 m
->ret
->inst_offset
= -offset
;
570 } else if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
571 m
->ret
->opcode
= OP_REGVAR
;
572 m
->ret
->inst_c0
= cinfo
->ret
.reg
;
575 curinst
= m
->locals_start
;
576 for (i
= curinst
; i
< m
->num_varinfo
; ++i
) {
577 inst
= m
->varinfo
[i
];
579 if (inst
->opcode
== OP_REGVAR
) {
580 DEBUG (printf ("allocating local %d to %s\n", i
, mono_arch_regname (inst
->dreg
)));
584 if (inst
->flags
& MONO_INST_IS_DEAD
)
587 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
588 * pinvoke wrappers when they call functions returning structure */
589 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
)
590 size
= mono_class_native_size (inst
->inst_vtype
->data
.klass
, &align
);
592 size
= mini_type_stack_size (cfg
->generic_sharing_context
, inst
->inst_vtype
, &align
);
595 * This is needed since structures containing doubles must be doubleword
597 * FIXME: Do this only if needed.
599 if (MONO_TYPE_ISSTRUCT (inst
->inst_vtype
))
603 * variables are accessed as negative offsets from hppa_sp
605 inst
->opcode
= OP_REGOFFSET
;
606 inst
->inst_basereg
= stack_ptr
;
608 offset
= ALIGN_TO (offset
, align
);
609 inst
->inst_offset
= -offset
;
611 DEBUG (printf ("allocating local %d (size = %d) to [%s - %d]\n", i
, size
, mono_arch_regname (inst
->inst_basereg
), -inst
->inst_offset
));
614 if (sig
->call_convention
== MONO_CALL_VARARG
) {
618 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
619 ArgInfo
*ainfo
= &cinfo
->args
[i
];
621 if (inst
->opcode
!= OP_REGVAR
) {
622 switch (ainfo
->storage
) {
627 /* Currently mono requests all incoming registers
628 * be assigned to a stack location :-(
631 if (!(inst
->flags
& (MONO_INST_VOLATILE
| MONO_INST_INDIRECT
))) {
632 inst
->opcode
= OP_REGVAR
;
633 inst
->dreg
= ainfo
->reg
;
634 DEBUG (printf ("param %d in register %s\n", i
, mono_arch_regname (inst
->dreg
)));
640 inst
->opcode
= OP_REGOFFSET
;
641 inst
->inst_basereg
= hppa_r3
;
642 inst
->inst_offset
= ainfo
->offset
;
643 DEBUG (printf ("param %d stored on stack [%s - %d]\n", i
, mono_arch_regname (hppa_r3
), -inst
->inst_offset
));
649 m
->stack_offset
= offset
; /* Includes cfg->param_area */
656 * take the arguments and generate the arch-specific
657 * instructions to properly call the function in call.
658 * This includes pushing, moving arguments to the right register
661 * sets call->stack_usage and cfg->param_area
664 mono_arch_call_opcode (MonoCompile
*cfg
, MonoBasicBlock
* bb
, MonoCallInst
*call
, int is_virtual
)
667 MonoMethodSignature
*sig
;
673 DEBUG (printf ("is_virtual = %d\n", is_virtual
));
675 sig
= call
->signature
;
676 n
= sig
->param_count
+ sig
->hasthis
;
678 DEBUG (printf ("Calling method with %d parameters\n", n
));
680 cinfo
= get_call_info (sig
, sig
->pinvoke
);
683 g_assert (sig
->call_convention
!= MONO_CALL_VARARG
);
685 for (i
= 0; i
< n
; ++i
) {
686 ainfo
= &cinfo
->args
[i
];
688 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
692 if (is_virtual
&& i
== 0) {
693 /* the argument will be attached to the call instruction */
695 call
->used_iregs
|= 1 << ainfo
->reg
;
697 MONO_INST_NEW (cfg
, arg
, OP_OUTARG
);
699 arg
->cil_code
= in
->cil_code
;
701 arg
->inst_call
= call
;
702 arg
->type
= in
->type
;
704 /* prepend, we'll need to reverse them later */
705 arg
->next
= call
->out_args
;
706 call
->out_args
= arg
;
708 switch (ainfo
->storage
) {
710 case ArgInIRegPair
: {
711 MonoHPPAArgInfo
*ai
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoHPPAArgInfo
));
712 ai
->reg
= ainfo
->reg
;
713 ai
->size
= ainfo
->size
;
714 ai
->offset
= ainfo
->offset
;
716 arg
->backend
.data
= ai
;
718 call
->used_iregs
|= 1 << ainfo
->reg
;
719 if (ainfo
->storage
== ArgInIRegPair
)
720 call
->used_iregs
|= 1 << (ainfo
->reg
+ 1);
721 if (ainfo
->type
== MONO_TYPE_VALUETYPE
)
722 arg
->opcode
= OP_OUTARG_VT
;
726 MonoHPPAArgInfo
*ai
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoHPPAArgInfo
));
728 ai
->size
= ainfo
->size
;
729 ai
->offset
= ainfo
->offset
;
731 arg
->backend
.data
= ai
;
732 if (ainfo
->type
== MONO_TYPE_VALUETYPE
)
733 arg
->opcode
= OP_OUTARG_VT
;
735 arg
->opcode
= OP_OUTARG_MEMBASE
;
736 call
->used_iregs
|= 1 << ainfo
->reg
;
740 arg
->backend
.reg3
= ainfo
->reg
;
741 arg
->opcode
= OP_OUTARG_R4
;
742 call
->used_fregs
|= 1 << ainfo
->reg
;
745 arg
->backend
.reg3
= ainfo
->reg
;
746 arg
->opcode
= OP_OUTARG_R8
;
747 call
->used_fregs
|= 1 << ainfo
->reg
;
756 * Reverse the call->out_args list.
759 MonoInst
*prev
= NULL
, *list
= call
->out_args
, *next
;
766 call
->out_args
= prev
;
768 call
->stack_usage
= cinfo
->stack_usage
;
769 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
770 cfg
->param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
772 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
781 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
786 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
793 insert_after_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*to_insert
)
797 bb
->code
= to_insert
;
798 to_insert
->next
= ins
;
800 to_insert
->next
= ins
->next
;
801 ins
->next
= to_insert
;
805 #define NEW_INS(cfg,dest,op) do { \
806 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
807 (dest)->opcode = (op); \
808 insert_after_ins (bb, last_ins, (dest)); \
812 map_to_reg_reg_op (int op
)
837 case OP_LOAD_MEMBASE
:
838 return OP_LOAD_MEMINDEX
;
839 case OP_LOADI4_MEMBASE
:
840 return OP_LOADI4_MEMINDEX
;
841 case OP_LOADU4_MEMBASE
:
842 return OP_LOADU4_MEMINDEX
;
843 case OP_LOADU1_MEMBASE
:
844 return OP_LOADU1_MEMINDEX
;
845 case OP_LOADI2_MEMBASE
:
846 return OP_LOADI2_MEMINDEX
;
847 case OP_LOADU2_MEMBASE
:
848 return OP_LOADU2_MEMINDEX
;
849 case OP_LOADI1_MEMBASE
:
850 return OP_LOADI1_MEMINDEX
;
851 case OP_LOADR4_MEMBASE
:
852 return OP_LOADR4_MEMINDEX
;
853 case OP_LOADR8_MEMBASE
:
854 return OP_LOADR8_MEMINDEX
;
855 case OP_STOREI1_MEMBASE_REG
:
856 return OP_STOREI1_MEMINDEX
;
857 case OP_STOREI2_MEMBASE_REG
:
858 return OP_STOREI2_MEMINDEX
;
859 case OP_STOREI4_MEMBASE_REG
:
860 return OP_STOREI4_MEMINDEX
;
861 case OP_STORE_MEMBASE_REG
:
862 return OP_STORE_MEMINDEX
;
863 case OP_STORER4_MEMBASE_REG
:
864 return OP_STORER4_MEMINDEX
;
865 case OP_STORER8_MEMBASE_REG
:
866 return OP_STORER8_MEMINDEX
;
867 case OP_STORE_MEMBASE_IMM
:
868 return OP_STORE_MEMBASE_REG
;
869 case OP_STOREI1_MEMBASE_IMM
:
870 return OP_STOREI1_MEMBASE_REG
;
871 case OP_STOREI2_MEMBASE_IMM
:
872 return OP_STOREI2_MEMBASE_REG
;
873 case OP_STOREI4_MEMBASE_IMM
:
874 return OP_STOREI4_MEMBASE_REG
;
876 g_assert_not_reached ();
880 * Remove from the instruction list the instructions that can't be
881 * represented with very simple instructions with no register
885 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
887 MonoInst
*ins
, *next
, *temp
, *last_ins
= NULL
;
890 MONO_BB_FOR_EACH_INS (bb
, ins
) {
892 switch (ins
->opcode
) {
895 if (!hppa_check_bits (ins
->inst_imm
, 11)) {
896 NEW_INS (cfg
, temp
, OP_ICONST
);
897 temp
->inst_c0
= ins
->inst_imm
;
898 temp
->dreg
= mono_alloc_ireg (cfg
);
899 ins
->sreg2
= temp
->dreg
;
900 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
905 if (!hppa_check_bits (ins
->inst_imm
, 11)) {
906 NEW_INS (cfg
, temp
, OP_ICONST
);
907 temp
->inst_c0
= ins
->inst_imm
;
908 temp
->dreg
= mono_alloc_ireg (cfg
);
909 ins
->sreg2
= temp
->dreg
;
910 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
915 if (ins
->inst_imm
== 1) {
916 ins
->opcode
= OP_MOVE
;
919 if (ins
->inst_imm
== 0) {
920 ins
->opcode
= OP_ICONST
;
924 imm
= mono_is_power_of_two (ins
->inst_imm
);
926 ins
->opcode
= OP_SHL_IMM
;
931 int tmp
= mono_alloc_ireg (cfg
);
932 NEW_INS (cfg
, temp
, OP_ICONST
);
933 temp
->inst_c0
= ins
->inst_c0
;
936 ins
->opcode
= CEE_MUL
;
938 /* Need to rewrite the CEE_MUL too... */
944 int freg1
= mono_alloc_freg (cfg
);
945 int freg2
= mono_alloc_freg (cfg
);
947 NEW_INS(cfg
, temp
, OP_STORE_MEMBASE_REG
);
948 temp
->sreg1
= ins
->sreg1
;
949 temp
->inst_destbasereg
= hppa_sp
;
950 temp
->inst_offset
= -16;
952 NEW_INS(cfg
, temp
, OP_LOADR4_MEMBASE
);
954 temp
->inst_basereg
= hppa_sp
;
955 temp
->inst_offset
= -16;
957 NEW_INS(cfg
, temp
, OP_STORE_MEMBASE_REG
);
958 temp
->sreg1
= ins
->sreg2
;
959 temp
->inst_destbasereg
= hppa_sp
;
960 temp
->inst_offset
= -16;
962 NEW_INS(cfg
, temp
, OP_LOADR4_MEMBASE
);
964 temp
->inst_basereg
= hppa_sp
;
965 temp
->inst_offset
= -16;
967 NEW_INS (cfg
, temp
, OP_HPPA_XMPYU
);
972 NEW_INS(cfg
, temp
, OP_HPPA_STORER4_RIGHT
);
974 temp
->inst_destbasereg
= hppa_sp
;
975 temp
->inst_offset
= -16;
977 ins
->opcode
= OP_LOAD_MEMBASE
;
978 ins
->inst_basereg
= hppa_sp
;
979 ins
->inst_offset
= -16;
988 bb
->last_ins
= last_ins
;
989 bb
->max_vreg
= cfg
->next_vreg
;
994 hppa_patch (guint32
*code
, const gpointer target
)
997 gint32 val
= (gint32
)target
;
998 gint32 disp
= (val
- (gint32
)code
- 8) >> 2;
1001 DEBUG (printf ("patching 0x%08x (0x%08x) to point to 0x%08x (disp = %d)\n", code
, ins
, val
, disp
));
1003 switch (*code
>> 26) {
1004 case 0x08: /* ldil, next insn can be a ldo, ldw, or ble */
1005 *code
= *code
& ~0x1fffff;
1006 *code
= *code
| hppa_op_imm21 (hppa_lsel (val
));
1009 if ((*code
>> 26) == 0x0D) { /* ldo */
1010 *code
= *code
& ~0x3fff;
1011 *code
= *code
| hppa_op_imm14 (hppa_rsel (val
));
1012 } else if ((*code
>> 26) == 0x12) { /* ldw */
1013 *code
= *code
& ~0x3fff;
1014 *code
= *code
| hppa_op_imm14 (hppa_rsel (val
));
1015 } else if ((*code
>> 26) == 0x39) { /* ble */
1016 *code
= *code
& ~0x1f1ffd;
1017 *code
= *code
| hppa_op_imm17 (hppa_rsel (val
));
1027 if (!hppa_check_bits (disp
, 17))
1029 reg1
= (*code
>> 21) & 0x1f;
1030 *code
= (*code
& ~0x1f1ffd) | hppa_op_imm17(disp
);
1033 case 0x20: /* combt */
1034 case 0x22: /* combf */
1035 if (!hppa_check_bits (disp
>> 2, 12))
1037 *code
= (*code
& ~0x1ffd) | hppa_op_imm12(disp
);
1041 g_warning ("Unpatched opcode %x\n", *code
>> 26);
1047 g_warning ("cannot branch to target, insn is %08x, displacement is %d\n", (int)*code
, (int)disp
);
1048 g_assert_not_reached ();
1052 emit_float_to_int (MonoCompile
*cfg
, guint32
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
1054 /* sreg is a float, dreg is an integer reg. */
1055 hppa_fcnvfxt (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, sreg
, sreg
);
1056 hppa_fstws (code
, sreg
, 0, -16, hppa_sp
);
1057 hppa_ldw (code
, -16, hppa_sp
, dreg
);
1060 hppa_extru (code
, dreg
, 31, 8, dreg
);
1062 hppa_extru (code
, dreg
, 31, 16, dreg
);
1065 hppa_extrs (code
, dreg
, 31, 8, dreg
);
1067 hppa_extrs (code
, dreg
, 31, 16, dreg
);
1072 /* Clobbers r1, r20, r21 */
1074 emit_memcpy (guint32
*code
, int doff
, int dreg
, int soff
, int sreg
, int size
)
1076 /* r20 is the destination */
1077 hppa_set (code
, doff
, hppa_r20
);
1078 hppa_add (code
, hppa_r20
, dreg
, hppa_r20
);
1080 /* r21 is the source */
1081 hppa_set (code
, soff
, hppa_r21
);
1082 hppa_add (code
, hppa_r21
, sreg
, hppa_r21
);
1085 hppa_ldw (code
, 0, hppa_r21
, hppa_r1
);
1086 hppa_stw (code
, hppa_r1
, 0, hppa_r20
);
1087 hppa_ldo (code
, 4, hppa_r21
, hppa_r21
);
1088 hppa_ldo (code
, 4, hppa_r20
, hppa_r20
);
1092 hppa_ldh (code
, 0, hppa_r21
, hppa_r1
);
1093 hppa_sth (code
, hppa_r1
, 0, hppa_r20
);
1094 hppa_ldo (code
, 2, hppa_r21
, hppa_r21
);
1095 hppa_ldo (code
, 2, hppa_r20
, hppa_r20
);
1099 hppa_ldb (code
, 0, hppa_r21
, hppa_r1
);
1100 hppa_stb (code
, hppa_r1
, 0, hppa_r20
);
1101 hppa_ldo (code
, 1, hppa_r21
, hppa_r21
);
1102 hppa_ldo (code
, 1, hppa_r20
, hppa_r20
);
1110 * mono_arch_get_vcall_slot_addr:
1112 * Determine the vtable slot used by a virtual call.
1115 mono_arch_get_vcall_slot_addr (guint8
*code8
, gpointer
*regs
)
1117 guint32
*code
= (guint32
*)((unsigned long)code8
& ~3);
1122 /* This is the special virtual call token */
1123 if (code
[-1] != 0x34000eee) /* ldo 0x777(r0),r0 */
1126 if ((code
[0] >> 26) == 0x39 && /* ble */
1127 (code
[-2] >> 26) == 0x12) { /* ldw */
1128 guint32 ldw
= code
[-2];
1129 guint32 reg
= (ldw
>> 21) & 0x1f;
1130 gint32 disp
= ((ldw
& 1) ? (-1 << 13) : 0) | ((ldw
& 0x3fff) >> 1);
1131 /* FIXME: we are not guaranteed that reg is saved in the LMF.
1132 * In fact, it probably isn't, since it is allocated as a
1133 * callee register. Right now just return an address; this
1134 * is sufficient for non-AOT operation
1136 // return (gpointer)((guint8*)regs [reg] + disp);
1140 g_assert_not_reached ();
1145 /* ins->dreg = *(ins->inst_desgbasereg + ins->inst_offset) */
1146 #define EMIT_LOAD_MEMBASE(ins, op) do { \
1147 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1148 hppa_set (code, ins->inst_offset, hppa_r1); \
1149 hppa_ ## op ## x (code, hppa_r1, ins->inst_basereg, ins->dreg); \
1152 hppa_ ## op (code, ins->inst_offset, ins->inst_basereg, ins->dreg); \
1156 #define EMIT_COND_BRANCH_FLAGS(ins,r1,r2,b0,b1) do {\
1157 if (ins->flags & MONO_INST_BRLABEL) { \
1158 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1160 hppa_combt (code, r1, r2, b1, 0); \
1162 hppa_combf (code, r1, r2, b1, 0); \
1165 hppa_combf (code, r1, r2, b1, 2); \
1167 hppa_combt (code, r1, r2, b1, 2); \
1169 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1170 hppa_bl (code, 0, hppa_r0); \
1175 #define EMIT_COND_BRANCH(ins,r1,r2,cond) EMIT_COND_BRANCH_FLAGS(ins, r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)])
1177 #define EMIT_FLOAT_COND_BRANCH_FLAGS(ins,r1,r2,b0) do {\
1178 hppa_fcmp (code, HPPA_FP_FMT_DBL, b0, r1, r2); \
1179 hppa_ftest (code, 0); \
1180 if (ins->flags & MONO_INST_BRLABEL) \
1181 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1183 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1184 hppa_bl (code, 8, hppa_r0); \
1188 #define EMIT_FLOAT_COND_BRANCH(ins,r1,r2,cond) EMIT_FLOAT_COND_BRANCH_FLAGS(ins, r1, r2, float_branch_table [cond])
1190 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1,r2,b0,b1,exc_name) \
1192 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1193 ovfj->data.exception = (exc_name); \
1194 ovfj->ip_offset = (guint8*)code - cfg->native_code; \
1195 hppa_bl (code, 8, hppa_r2); \
1196 hppa_depi (code, 0, 31, 2, hppa_r2); \
1197 hppa_ldo (code, 8, hppa_r2, hppa_r2); \
1199 hppa_combf (code, r1, r2, b1, 2); \
1201 hppa_combt (code, r1, r2, b1, 2); \
1203 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1204 hppa_bl (code, 0, hppa_r0); \
1208 #define EMIT_COND_SYSTEM_EXCEPTION(r1,r2,cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1210 /* TODO: MEM_INDEX_REG - cannot be r1 */
1211 #define MEM_INDEX_REG hppa_r31
1212 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->inst_imm */
1213 #define EMIT_STORE_MEMBASE_IMM(ins, op) do { \
1215 if (ins->inst_imm == 0) \
1218 hppa_set (code, ins->inst_imm, hppa_r1); \
1221 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1222 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1223 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1224 hppa_ ## op (code, sreg, 0, MEM_INDEX_REG); \
1227 hppa_ ## op (code, sreg, ins->inst_offset, ins->inst_destbasereg); \
1231 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->sreg1 */
1232 #define EMIT_STORE_MEMBASE_REG(ins, op) do { \
1233 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1234 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1235 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1236 hppa_ ## op (code, ins->sreg1, 0, MEM_INDEX_REG); \
1239 hppa_ ## op (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); \
1244 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1249 guint32
*code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
1250 MonoInst
*last_ins
= NULL
;
1256 if (cfg
->verbose_level
> 2)
1257 g_print ("[%s::%s] Basic block %d starting at offset 0x%x\n", cfg
->method
->klass
->name
, cfg
->method
->name
, bb
->block_num
, bb
->native_offset
);
1259 cpos
= bb
->max_offset
;
1261 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
1265 MONO_BB_FOR_EACH_INS (bb
, ins
) {
1268 offset
= (guint8
*)code
- cfg
->native_code
;
1270 spec
= ins_get_spec (ins
->opcode
);
1272 max_len
= ((guint8
*)spec
) [MONO_INST_LEN
];
1274 if (offset
> (cfg
->code_size
- max_len
- 16)) {
1275 cfg
->code_size
*= 2;
1276 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
1277 code
= (guint32
*)(cfg
->native_code
+ offset
);
1278 mono_jit_stats
.code_reallocs
++;
1280 code_start
= (guint8
*)code
;
1281 // if (ins->cil_code)
1282 // g_print ("cil code\n");
1283 mono_debug_record_line_number (cfg
, ins
, offset
);
1285 switch (ins
->opcode
) {
1286 case OP_RELAXED_NOP
:
1288 case OP_STOREI1_MEMBASE_IMM
:
1289 EMIT_STORE_MEMBASE_IMM (ins
, stb
);
1291 case OP_STOREI2_MEMBASE_IMM
:
1292 EMIT_STORE_MEMBASE_IMM (ins
, sth
);
1294 case OP_STORE_MEMBASE_IMM
:
1295 case OP_STOREI4_MEMBASE_IMM
:
1296 EMIT_STORE_MEMBASE_IMM (ins
, stw
);
1298 case OP_STOREI1_MEMBASE_REG
:
1299 EMIT_STORE_MEMBASE_REG (ins
, stb
);
1301 case OP_STOREI2_MEMBASE_REG
:
1302 EMIT_STORE_MEMBASE_REG (ins
, sth
);
1304 case OP_STORE_MEMBASE_REG
:
1305 case OP_STOREI4_MEMBASE_REG
:
1306 EMIT_STORE_MEMBASE_REG (ins
, stw
);
1308 case OP_LOADU1_MEMBASE
:
1309 EMIT_LOAD_MEMBASE (ins
, ldb
);
1311 case OP_LOADI1_MEMBASE
:
1312 EMIT_LOAD_MEMBASE (ins
, ldb
);
1313 hppa_extrs (code
, ins
->dreg
, 31, 8, ins
->dreg
);
1315 case OP_LOADU2_MEMBASE
:
1316 EMIT_LOAD_MEMBASE (ins
, ldh
);
1318 case OP_LOADI2_MEMBASE
:
1319 EMIT_LOAD_MEMBASE (ins
, ldh
);
1320 hppa_extrs (code
, ins
->dreg
, 31, 16, ins
->dreg
);
1322 case OP_LOAD_MEMBASE
:
1323 case OP_LOADI4_MEMBASE
:
1324 case OP_LOADU4_MEMBASE
:
1325 EMIT_LOAD_MEMBASE (ins
, ldw
);
1328 hppa_extrs (code
, ins
->sreg1
, 31, 8, ins
->dreg
);
1331 hppa_extrs (code
, ins
->sreg1
, 31, 16, ins
->dreg
);
1334 hppa_extru (code
, ins
->sreg1
, 31, 8, ins
->dreg
);
1337 hppa_extru (code
, ins
->sreg1
, 31, 16, ins
->dreg
);
1343 if (ins
->sreg1
!= ins
->dreg
)
1344 hppa_copy (code
, ins
->sreg1
, ins
->dreg
);
1347 hppa_copy (code
, ins
->sreg1
+ 1, ins
->dreg
);
1348 hppa_copy (code
, ins
->sreg1
, ins
->dreg
+ 1);
1352 /* break 4,8 - this is what gdb normally uses... */
1353 *code
++ = 0x00010004;
1357 hppa_add (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1360 hppa_addc (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1364 hppa_addi (code
, ins
->inst_imm
, ins
->sreg1
, ins
->dreg
);
1367 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1368 hppa_addc (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1370 case OP_HPPA_ADD_OVF
: {
1371 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1372 hppa_bl (code
, 8, hppa_r2
);
1373 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1374 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1376 if (ins
->backend
.reg3
== CEE_ADD_OVF
)
1377 hppa_add_cond (code
, HPPA_ADD_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1379 hppa_add_cond (code
, HPPA_ADD_COND_NUV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1381 ovfj
->data
.exception
= "OverflowException";
1382 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1383 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1384 hppa_bl_n (code
, 8, hppa_r0
);
1387 case OP_HPPA_ADDC_OVF
: {
1388 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1389 hppa_bl (code
, 8, hppa_r2
);
1390 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1391 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1393 if (ins
->backend
.reg3
== OP_LADD_OVF
)
1394 hppa_addc_cond (code
, HPPA_ADD_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1396 hppa_addc_cond (code
, HPPA_ADD_COND_NUV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1398 ovfj
->data
.exception
= "OverflowException";
1399 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1400 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1401 hppa_bl_n (code
, 8, hppa_r0
);
1406 hppa_sub (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1410 hppa_addi (code
, -ins
->inst_imm
, ins
->sreg1
, ins
->dreg
);
1413 hppa_subb (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1416 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1417 hppa_subb (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1419 case OP_HPPA_SUB_OVF
: {
1420 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1421 hppa_bl (code
, 8, hppa_r2
);
1422 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1423 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1424 hppa_sub_cond (code
, HPPA_SUB_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1425 ovfj
->data
.exception
= "OverflowException";
1426 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1427 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1428 hppa_bl_n (code
, 8, hppa_r0
);
1431 case OP_HPPA_SUBB_OVF
: {
1432 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1433 hppa_bl (code
, 8, hppa_r2
);
1434 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1435 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1437 hppa_subb_cond (code
, HPPA_SUB_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1438 ovfj
->data
.exception
= "OverflowException";
1439 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1440 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1441 hppa_bl_n (code
, 8, hppa_r0
);
1446 hppa_and (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1449 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1450 hppa_and (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1454 hppa_or (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1458 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1459 hppa_or (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1463 hppa_xor (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1466 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1467 hppa_xor (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1470 if (ins
->sreg1
!= ins
->dreg
) {
1471 hppa_shl (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1474 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1475 hppa_shl (code
, hppa_r1
, ins
->sreg2
, ins
->dreg
);
1480 g_assert (ins
->inst_imm
< 32);
1481 if (ins
->sreg1
!= ins
->dreg
) {
1482 hppa_zdep (code
, ins
->sreg1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1485 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1486 hppa_zdep (code
, hppa_r1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1490 if (ins
->sreg1
!= ins
->dreg
) {
1491 hppa_shr (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1494 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1495 hppa_shr (code
, hppa_r1
, ins
->sreg2
, ins
->dreg
);
1499 g_assert (ins
->inst_imm
< 32);
1500 if (ins
->sreg1
!= ins
->dreg
) {
1501 hppa_extrs (code
, ins
->sreg1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1504 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1505 hppa_extrs (code
, hppa_r1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1509 g_assert (ins
->inst_imm
< 32);
1510 if (ins
->sreg1
!= ins
->dreg
) {
1511 hppa_extru (code
, ins
->sreg1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1514 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1515 hppa_extru (code
, hppa_r1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1519 if (ins
->sreg1
!= ins
->dreg
) {
1520 hppa_lshr (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1523 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1524 hppa_lshr (code
, hppa_r1
, ins
->sreg2
, ins
->dreg
);
1528 hppa_not (code
, ins
->sreg1
, ins
->dreg
);
1531 hppa_subi (code
, 0, ins
->sreg1
, ins
->dreg
);
1536 /* Should have been rewritten using xmpyu */
1537 g_assert_not_reached ();
1540 if ((ins
->inst_c0
> 0 && ins
->inst_c0
>= (1 << 13)) ||
1541 (ins
->inst_c0
< 0 && ins
->inst_c0
< -(1 << 13))) {
1542 hppa_ldil (code
, hppa_lsel (ins
->inst_c0
), ins
->dreg
);
1543 hppa_ldo (code
, hppa_rsel (ins
->inst_c0
), ins
->dreg
, ins
->dreg
);
1545 hppa_ldo (code
, ins
->inst_c0
, hppa_r0
, ins
->dreg
);
1549 g_assert_not_reached ();
1551 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
1552 hppa_set_template (code, ins->dreg);
1554 g_warning ("unimplemented opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
1558 if (ins
->sreg1
!= ins
->dreg
)
1559 hppa_fcpy (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->dreg
);
1562 case OP_HPPA_OUTARG_R4CONST
:
1563 hppa_set (code
, (unsigned int)ins
->inst_p0
, hppa_r1
);
1564 hppa_fldwx (code
, hppa_r0
, hppa_r1
, ins
->dreg
, 0);
1567 case OP_HPPA_OUTARG_REGOFFSET
:
1568 hppa_ldo (code
, ins
->inst_offset
, ins
->inst_basereg
, ins
->dreg
);
1573 * Keep in sync with mono_arch_emit_epilog
1575 g_assert (!cfg
->method
->save_lmf
);
1576 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
1577 hppa_bl (code
, 8, hppa_r0
);
1580 /* ensure ins->sreg1 is not NULL */
1581 hppa_ldw (code
, 0, ins
->sreg1
, hppa_r1
);
1590 call
= (MonoCallInst
*)ins
;
1591 if (ins
->flags
& MONO_INST_HAS_METHOD
)
1592 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
1594 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
1595 hppa_ldil (code
, 0, hppa_r1
);
1596 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
1598 * We may have loaded an actual function address, or
1599 * it might be a plabel. Check to see if the plabel
1600 * bit is set, and load the actual fptr from it if
1603 hppa_bb_n (code
, HPPA_BIT_COND_MSB_CLR
, hppa_r1
, 30, 2);
1604 hppa_depi (code
, 0, 31, 2, hppa_r1
);
1605 hppa_ldw (code
, 4, hppa_r1
, hppa_r19
);
1606 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
1607 hppa_ble (code
, 0, hppa_r1
);
1608 hppa_copy (code
, hppa_r31
, hppa_r2
);
1609 if (call
->signature
->ret
->type
== MONO_TYPE_R4
)
1610 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr4
, hppa_fr4
);
1615 case OP_VOIDCALL_REG
:
1617 call
= (MonoCallInst
*)ins
;
1618 g_assert (!call
->virtual);
1619 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1620 hppa_bb_n (code
, HPPA_BIT_COND_MSB_CLR
, hppa_r1
, 30, 2);
1621 hppa_depi (code
, 0, 31, 2, hppa_r1
);
1622 hppa_ldw (code
, 4, hppa_r1
, hppa_r19
);
1623 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
1624 hppa_ble (code
, 0, hppa_r1
);
1625 hppa_copy (code
, hppa_r31
, hppa_r2
);
1626 if (call
->signature
->ret
->type
== MONO_TYPE_R4
)
1627 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr4
, hppa_fr4
);
1629 case OP_FCALL_MEMBASE
:
1630 case OP_LCALL_MEMBASE
:
1631 case OP_VCALL_MEMBASE
:
1632 case OP_VOIDCALL_MEMBASE
:
1633 case OP_CALL_MEMBASE
:
1634 call
= (MonoCallInst
*)ins
;
1635 /* jump to ins->inst_sreg1 + ins->inst_offset */
1636 hppa_ldw (code
, ins
->inst_offset
, ins
->sreg1
, hppa_r1
);
1638 /* For virtual calls, emit a special token that can
1639 * be used by get_vcall_slot_addr
1642 hppa_ldo (code
, 0x777, hppa_r0
, hppa_r0
);
1643 hppa_ble (code
, 0, hppa_r1
);
1644 hppa_copy (code
, hppa_r31
, hppa_r2
);
1649 /* Keep alignment */
1650 hppa_ldo (code
, MONO_ARCH_LOCALLOC_ALIGNMENT
- 1, ins
->sreg1
, ins
->dreg
);
1651 hppa_depi (code
, 0, 31, 6, ins
->dreg
);
1652 hppa_copy (code
, hppa_sp
, hppa_r1
);
1653 hppa_addl (code
, ins
->dreg
, hppa_sp
, hppa_sp
);
1654 hppa_copy (code
, hppa_r1
, ins
->dreg
);
1656 if (ins
->flags
& MONO_INST_INIT
) {
1657 hppa_stw (code
, hppa_r0
, 0, hppa_r1
);
1658 hppa_combt (code
, hppa_r1
, hppa_sp
, HPPA_CMP_COND_ULT
, -3);
1659 hppa_ldo (code
, 4, hppa_r1
, hppa_r1
);
1665 hppa_copy (code
, ins
->sreg1
, hppa_r26
);
1666 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
1667 (gpointer
)"mono_arch_throw_exception");
1668 hppa_ldil (code
, 0, hppa_r1
);
1669 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
1670 hppa_ble (code
, 0, hppa_r1
);
1671 hppa_copy (code
, hppa_r31
, hppa_r2
);
1672 /* should never return */
1673 *code
++ = 0xffeeddcc;
1676 hppa_copy (code
, ins
->sreg1
, hppa_r26
);
1677 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
1678 (gpointer
)"mono_arch_rethrow_exception");
1679 hppa_ldil (code
, 0, hppa_r1
);
1680 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
1681 hppa_ble (code
, 0, hppa_r1
);
1682 hppa_copy (code
, hppa_r31
, hppa_r2
);
1683 /* should never return */
1684 *code
++ = 0xffeeddcc;
1686 case OP_START_HANDLER
:
1687 if (hppa_check_bits (ins
->inst_left
->inst_offset
, 14))
1688 hppa_stw (code
, hppa_r2
, ins
->inst_left
->inst_offset
, ins
->inst_left
->inst_basereg
);
1690 hppa_set (code
, ins
->inst_left
->inst_offset
, hppa_r1
);
1691 hppa_addl (code
, ins
->inst_left
->inst_basereg
, hppa_r1
, hppa_r1
);
1692 hppa_stw (code
, hppa_r2
, 0, hppa_r1
);
1696 if (ins
->sreg1
!= hppa_r26
)
1697 hppa_copy (code
, ins
->sreg1
, hppa_r26
);
1698 if (hppa_check_bits (ins
->inst_left
->inst_offset
, 14))
1699 hppa_ldw (code
, ins
->inst_left
->inst_offset
, ins
->inst_left
->inst_basereg
, hppa_r2
);
1701 hppa_set (code
, ins
->inst_left
->inst_offset
, hppa_r1
);
1702 hppa_ldwx (code
, hppa_r1
, ins
->inst_left
->inst_basereg
, hppa_r2
);
1704 hppa_bv (code
, hppa_r0
, hppa_r2
);
1708 if (hppa_check_bits (ins
->inst_left
->inst_offset
, 14))
1709 hppa_ldw (code
, ins
->inst_left
->inst_offset
, ins
->inst_left
->inst_basereg
, hppa_r1
);
1711 hppa_set (code
, ins
->inst_left
->inst_offset
, hppa_r1
);
1712 hppa_ldwx (code
, hppa_r1
, ins
->inst_left
->inst_basereg
, hppa_r1
);
1714 hppa_bv (code
, hppa_r0
, hppa_r1
);
1717 case OP_CALL_HANDLER
:
1718 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
1719 hppa_bl (code
, 0, hppa_r2
);
1723 ins
->inst_c0
= (guint8
*)code
- cfg
->native_code
;
1727 DEBUG (printf ("target: %p, next: %p, curr: %p, last: %p\n", ins
->inst_target_bb
, bb
->next_bb
, ins
, bb
->last_ins
));
1728 if (ins
->flags
& MONO_INST_BRLABEL
) {
1729 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_LABEL
, ins
->inst_i0
);
1731 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
1733 hppa_bl (code
, 8, hppa_r0
);
1734 /* TODO: if the branch is too long, we may need to
1735 * use a long-branch sequence:
1736 * hppa_ldil (code, 0, hppa_r1);
1737 * hppa_ldo (code, 0, hppa_r1, hppa_r1);
1738 * hppa_bv (code, hppa_r0, hppa_r1);
1744 hppa_bv (code
, hppa_r0
, ins
->sreg1
);
1751 max_len
+= 8 * GPOINTER_TO_INT (ins
->klass
);
1752 if (offset
> (cfg
->code_size
- max_len
- 16)) {
1753 cfg
->code_size
+= max_len
;
1754 cfg
->code_size
*= 2;
1755 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
1756 code
= cfg
->native_code
+ offset
;
1757 code_start
= (guint8
*)code
;
1759 hppa_blr (code
, ins
->sreg1
, hppa_r0
);
1761 for (i
= 0; i
< GPOINTER_TO_INT (ins
->klass
); ++i
) {
1762 *code
++ = 0xdeadbeef;
1763 *code
++ = 0xdeadbeef;
1768 /* comclr is cool :-) */
1770 hppa_comclr_cond (code
, HPPA_SUB_COND_NE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1771 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1775 hppa_comclr_cond (code
, HPPA_SUB_COND_SGE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1776 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1779 case OP_HPPA_CLT_UN
:
1780 hppa_comclr_cond (code
, HPPA_SUB_COND_UGE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1781 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1785 hppa_comclr_cond (code
, HPPA_SUB_COND_SLE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1786 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1789 case OP_HPPA_CGT_UN
:
1790 hppa_comclr_cond (code
, HPPA_SUB_COND_ULE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1791 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1799 case OP_COND_EXC_EQ
:
1800 case OP_COND_EXC_NE_UN
:
1801 case OP_COND_EXC_LT
:
1802 case OP_COND_EXC_LT_UN
:
1803 case OP_COND_EXC_GT
:
1804 case OP_COND_EXC_GT_UN
:
1805 case OP_COND_EXC_GE
:
1806 case OP_COND_EXC_GE_UN
:
1807 case OP_COND_EXC_LE
:
1808 case OP_COND_EXC_LE_UN
:
1809 case OP_COND_EXC_OV
:
1810 case OP_COND_EXC_NO
:
1812 case OP_COND_EXC_NC
:
1813 case OP_COND_EXC_IOV
:
1814 case OP_COND_EXC_IC
:
1828 case OP_COMPARE_IMM
:
1829 case OP_ICOMPARE_IMM
:
1830 g_warning ("got opcode %s in %s(), should be reduced\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
1831 g_assert_not_reached ();
1837 case OP_HPPA_BLT_UN
:
1839 case OP_HPPA_BGT_UN
:
1841 case OP_HPPA_BGE_UN
:
1843 case OP_HPPA_BLE_UN
:
1844 EMIT_COND_BRANCH (ins
, ins
->sreg1
, ins
->sreg2
, ins
->opcode
- OP_HPPA_BEQ
);
1847 case OP_HPPA_COND_EXC_EQ
:
1848 case OP_HPPA_COND_EXC_GE
:
1849 case OP_HPPA_COND_EXC_GT
:
1850 case OP_HPPA_COND_EXC_LE
:
1851 case OP_HPPA_COND_EXC_LT
:
1852 case OP_HPPA_COND_EXC_NE_UN
:
1853 case OP_HPPA_COND_EXC_GE_UN
:
1854 case OP_HPPA_COND_EXC_GT_UN
:
1855 case OP_HPPA_COND_EXC_LE_UN
:
1856 case OP_HPPA_COND_EXC_LT_UN
:
1857 EMIT_COND_SYSTEM_EXCEPTION (ins
->sreg1
, ins
->sreg2
, ins
->opcode
- OP_HPPA_COND_EXC_EQ
, ins
->inst_p1
);
1860 case OP_HPPA_COND_EXC_OV
:
1861 case OP_HPPA_COND_EXC_NO
:
1862 case OP_HPPA_COND_EXC_C
:
1863 case OP_HPPA_COND_EXC_NC
:
1866 /* floating point opcodes */
1868 hppa_set (code
, (unsigned int)ins
->inst_p0
, hppa_r1
);
1869 hppa_flddx (code
, hppa_r0
, hppa_r1
, ins
->dreg
);
1872 hppa_set (code
, (unsigned int)ins
->inst_p0
, hppa_r1
);
1873 hppa_fldwx (code
, hppa_r0
, hppa_r1
, hppa_fr31
, 0);
1874 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr31
, ins
->dreg
);
1876 case OP_STORER8_MEMBASE_REG
:
1877 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1878 hppa_fstdx (code
, ins
->sreg1
, hppa_r1
, ins
->inst_destbasereg
);
1880 case OP_LOADR8_MEMBASE
:
1881 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1882 hppa_flddx (code
, hppa_r1
, ins
->inst_basereg
, ins
->dreg
);
1884 case OP_STORER4_MEMBASE_REG
:
1885 hppa_fcnvff (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, ins
->sreg1
, hppa_fr31
);
1886 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1887 hppa_fstws (code
, hppa_fr31
, 0, ins
->inst_offset
, ins
->inst_destbasereg
);
1889 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1890 hppa_fstwx (code
, hppa_fr31
, 0, hppa_r1
, ins
->inst_destbasereg
);
1893 case OP_HPPA_STORER4_LEFT
:
1894 case OP_HPPA_STORER4_RIGHT
:
1895 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1896 hppa_fstws (code
, ins
->sreg1
, (ins
->opcode
== OP_HPPA_STORER4_RIGHT
), ins
->inst_offset
, ins
->inst_destbasereg
);
1898 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1899 hppa_fstwx (code
, ins
->sreg1
, (ins
->opcode
== OP_HPPA_STORER4_RIGHT
), hppa_r1
, ins
->inst_destbasereg
);
1902 case OP_LOADR4_MEMBASE
:
1903 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1904 hppa_fldws (code
, ins
->inst_offset
, ins
->inst_basereg
, hppa_fr31
, 0);
1906 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1907 hppa_fldwx (code
, hppa_r1
, ins
->inst_basereg
, hppa_fr31
, 0);
1909 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr31
, ins
->dreg
);
1911 case OP_HPPA_LOADR4_LEFT
:
1912 case OP_HPPA_LOADR4_RIGHT
:
1913 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1914 hppa_fldws (code
, ins
->inst_offset
, ins
->inst_basereg
, ins
->dreg
, (ins
->opcode
== OP_HPPA_LOADR4_RIGHT
));
1916 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1917 hppa_fldwx (code
, hppa_r1
, ins
->inst_basereg
, ins
->dreg
, (ins
->opcode
== OP_HPPA_LOADR4_RIGHT
));
1922 hppa_stw (code
, ins
->sreg1
, -16, hppa_sp
);
1923 hppa_fldws (code
, -16, hppa_sp
, hppa_fr31
, 0);
1924 hppa_fcnvxf (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_SGL
, hppa_fr31
, ins
->dreg
);
1925 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, ins
->dreg
, ins
->dreg
);
1928 case OP_FCONV_TO_R4
:
1929 /* reduce precision */
1930 hppa_fcnvff (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, ins
->sreg1
, ins
->dreg
);
1931 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, ins
->dreg
, ins
->dreg
);
1934 case OP_HPPA_SETF4REG
:
1935 hppa_fcnvff (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, ins
->sreg1
, ins
->dreg
);
1938 hppa_stw (code
, ins
->sreg1
, -16, hppa_sp
);
1939 hppa_fldws (code
, -16, hppa_sp
, hppa_fr31
, 0);
1940 hppa_fcnvxf (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr31
, ins
->dreg
);
1943 case OP_FCONV_TO_I1
:
1944 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
1946 case OP_FCONV_TO_U1
:
1947 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
1949 case OP_FCONV_TO_I2
:
1950 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
1952 case OP_FCONV_TO_U2
:
1953 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
1955 case OP_FCONV_TO_I4
:
1957 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
1959 case OP_FCONV_TO_U4
:
1961 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
1964 case OP_FCONV_TO_I8
:
1965 case OP_FCONV_TO_U8
:
1966 g_assert_not_reached ();
1967 /* Implemented as helper calls */
1969 case OP_LCONV_TO_R_UN
:
1970 g_assert_not_reached ();
1971 /* Implemented as helper calls */
1974 case OP_LCONV_TO_OVF_I
:
1979 hppa_fadd (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1982 hppa_fsub (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1985 hppa_fmul (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1988 hppa_fdiv (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1995 g_assert_not_reached();
2003 hppa_fcmp (code
, HPPA_FP_FMT_DBL
, float_ceq_table
[ins
->opcode
- OP_FCEQ
], ins
->sreg1
, ins
->sreg2
);
2004 hppa_ftest (code
, 0);
2005 hppa_bl (code
, 12, hppa_r0
);
2006 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
2007 hppa_ldo (code
, 0, hppa_r0
, ins
->dreg
);
2020 EMIT_FLOAT_COND_BRANCH (ins
, ins
->sreg1
, ins
->sreg2
, ins
->opcode
- OP_FBEQ
);
2024 case OP_MEMORY_BARRIER
:
2028 hppa_xmpyu (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2032 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
2033 g_assert_not_reached ();
2036 if ((((guint8
*)code
) - code_start
) > max_len
) {
2037 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
2038 mono_inst_name (ins
->opcode
), max_len
, ((guint8
*)code
) - code_start
);
2039 g_assert_not_reached ();
2047 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
2052 mono_arch_register_lowlevel_calls (void)
2057 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
2059 MonoJumpInfo
*patch_info
;
2062 /* FIXME: Move part of this to arch independent code */
2063 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
2064 unsigned char *ip
= patch_info
->ip
.i
+ code
;
2067 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
2068 DEBUG (printf ("patch_info->type = %d, target = %p\n", patch_info
->type
, target
));
2070 switch (patch_info
->type
) {
2071 case MONO_PATCH_INFO_NONE
:
2072 case MONO_PATCH_INFO_BB_OVF
:
2073 case MONO_PATCH_INFO_EXC_OVF
:
2076 case MONO_PATCH_INFO_IP
:
2077 hppa_patch ((guint32
*)ip
, ip
);
2080 case MONO_PATCH_INFO_CLASS_INIT
: {
2083 case MONO_PATCH_INFO_METHOD_JUMP
: {
2086 case MONO_PATCH_INFO_SWITCH
: {
2088 gpointer
*table
= (gpointer
*)target
;
2090 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++) {
2091 DEBUG (printf ("Patching switch table, table[%d] = %p\n", i
, table
[i
]));
2092 hppa_ldil (ip
, hppa_lsel (table
[i
]), hppa_r1
);
2093 hppa_be_n (ip
, hppa_rsel (table
[i
]), hppa_r1
);
2100 hppa_patch ((guint32
*)ip
, target
);
2107 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
2109 guint32
*code
= (guint32
*)p
;
2113 hppa_set (code
, cfg
->method
, hppa_r26
);
2114 hppa_copy (code
, hppa_r0
, hppa_r25
); /* NULL sp for now */
2115 hppa_set (code
, func
, hppa_r1
);
2116 hppa_depi (code
, 0, 31, 2, hppa_r1
);
2117 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
2118 hppa_ble (code
, 0, hppa_r1
);
2119 hppa_copy (code
, hppa_r31
, hppa_r2
);
2134 mono_arch_instrument_epilog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
2136 guint32
*code
= (guint32
*)p
;
2139 int save_mode
= SAVE_NONE
;
2140 MonoMethod
*method
= cfg
->method
;
2142 switch (mono_type_get_underlying_type (mono_method_signature (method
)->ret
)->type
) {
2143 case MONO_TYPE_VOID
:
2144 /* special case string .ctor icall */
2145 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
2146 save_mode
= SAVE_ONE
;
2148 save_mode
= SAVE_NONE
;
2153 save_mode
= SAVE_ONE
;
2155 save_mode
= SAVE_TWO
;
2160 save_mode
= SAVE_FP
;
2162 case MONO_TYPE_VALUETYPE
:
2163 save_mode
= SAVE_STRUCT
;
2166 save_mode
= SAVE_ONE
;
2170 /* Save the result to the stack and also put it into the output registers */
2172 switch (save_mode
) {
2175 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 68);
2176 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 72);
2177 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
2178 sparc_mov_reg_reg (code
, sparc_i1
, sparc_o2
);
2181 sparc_sti_imm (code
, sparc_i0
, sparc_fp
, ARGS_OFFSET
);
2182 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
2186 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, ARGS_OFFSET
);
2188 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, 72);
2189 sparc_ld_imm (code
, sparc_fp
, 72, sparc_o1
);
2190 sparc_ld_imm (code
, sparc_fp
, 72 + 4, sparc_o2
);
2195 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
2197 sparc_ld_imm (code
, sparc_fp
, 64, sparc_o1
);
2205 sparc_set (code
, cfg
->method
, sparc_o0
);
2207 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
2210 /* Restore result */
2212 switch (save_mode
) {
2214 sparc_ld_imm (code
, sparc_fp
, 68, sparc_i0
);
2215 sparc_ld_imm (code
, sparc_fp
, 72, sparc_i0
);
2218 sparc_ldi_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_i0
);
2221 sparc_lddf_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_f0
);
2233 * The HPPA stack frame should look like this:
2235 * ---------------------
2236 * incoming params area
2237 * ---------------------
2238 * linkage area size = ARGS_OFFSET
2239 * --------------------- fp = psp
2240 * HPPA_STACK_LMF_OFFSET
2241 * ---------------------
2242 * MonoLMF structure or saved registers
2243 * -------------------
2244 * locals size = cfg->stack_offset - cfg->param_area
2245 * ---------------------
2246 * params area size = cfg->param_area - ARGS_OFFSET (aligned)
2247 * ---------------------
2248 * callee linkage area size = ARGS_OFFSET
2249 * --------------------- sp
2252 mono_arch_emit_prolog (MonoCompile
*cfg
)
2254 MonoMethod
*method
= cfg
->method
;
2256 MonoMethodSignature
*sig
;
2258 int alloc_size
, pos
, max_offset
, i
;
2265 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
2268 sig
= mono_method_signature (method
);
2269 cfg
->code_size
= 512 + sig
->param_count
* 20;
2270 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
2272 /* TODO: enable tail call optimization */
2273 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
2274 hppa_stw (code
, hppa_r2
, -20, hppa_sp
);
2278 pos
= HPPA_STACK_LMF_OFFSET
;
2280 /* figure out how much space we need for spilling */
2281 if (!method
->save_lmf
) {
2282 /* spill callee-save registers */
2283 guint32 mask
= cfg
->used_int_regs
& MONO_ARCH_CALLEE_SAVED_REGS
;
2284 for (i
= 0; i
< 32; i
++) {
2285 if ((1 << i
) & mask
)
2286 pos
+= sizeof (gulong
);
2290 pos
+= sizeof (MonoLMF
);
2293 alloc_size
= ALIGN_TO (pos
+ cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
2294 g_assert ((alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) == 0);
2296 cfg
->stack_usage
= alloc_size
;
2299 hppa_copy (code
, hppa_r3
, hppa_r1
);
2300 hppa_copy (code
, hppa_sp
, hppa_r3
);
2301 if (hppa_check_bits (alloc_size
, 14))
2302 hppa_stwm (code
, hppa_r1
, alloc_size
, hppa_sp
);
2304 hppa_stwm (code
, hppa_r1
, 8100, hppa_sp
);
2305 hppa_addil (code
, hppa_lsel (alloc_size
- 8100), hppa_sp
);
2306 hppa_ldo (code
, hppa_rsel (alloc_size
- 8100), hppa_r1
, hppa_sp
);
2310 /* compute max_offset in order to use short forward jumps
2311 * we always do it on hppa because the immediate displacement
2312 * for jumps is small
2315 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2316 MonoInst
*ins
= bb
->code
;
2317 bb
->max_offset
= max_offset
;
2319 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
2322 MONO_BB_FOR_EACH_INS (bb
, ins
)
2323 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
2326 DEBUG (printf ("Incoming arguments: \n"));
2327 cinfo
= get_call_info (sig
, sig
->pinvoke
);
2329 /* We do this first so that we don't have to worry about the LMF-
2330 * saving code clobbering r28
2332 if (cinfo
->struct_return
)
2333 hppa_stw (code
, hppa_r28
, cfg
->ret
->inst_offset
, hppa_sp
);
2335 /* Save the LMF or the spilled registers */
2336 pos
= HPPA_STACK_LMF_OFFSET
;
2337 if (!method
->save_lmf
) {
2338 /* spill callee-save registers */
2339 guint32 mask
= cfg
->used_int_regs
& MONO_ARCH_CALLEE_SAVED_REGS
;
2340 for (i
= 0; i
< 32; i
++) {
2341 if ((1 << i
) & mask
) {
2343 hppa_ldw (code
, 0, hppa_r3
, hppa_r1
);
2344 hppa_stw (code
, hppa_r1
, pos
, hppa_r3
);
2346 hppa_stw (code
, i
, pos
, hppa_r3
);
2347 pos
+= sizeof (gulong
);
2351 int ofs
= lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, regs
);
2354 hppa_ldw (code
, 0, hppa_r3
, hppa_r1
);
2355 hppa_stw (code
, hppa_r1
, ofs
, hppa_r3
);
2356 ofs
+= sizeof (gulong
);
2357 for (reg
= 4; reg
< 32; reg
++) {
2358 if (HPPA_IS_SAVED_GREG (reg
)) {
2359 hppa_stw (code
, reg
, ofs
, hppa_r3
);
2360 ofs
+= sizeof (gulong
);
2363 /* We shouldn't need to save the FP regs.... */
2364 ofs
= ALIGN_TO (ofs
, sizeof(double));
2365 hppa_set (code
, ofs
, hppa_r1
);
2366 for (reg
= 0; reg
< 32; reg
++) {
2367 if (HPPA_IS_SAVED_FREG (reg
)) {
2368 hppa_fstdx (code
, reg
, hppa_r1
, hppa_r3
);
2369 hppa_ldo (code
, sizeof(double), hppa_r1
, hppa_r1
);
2373 /* We also spill the arguments onto the stack, because
2374 * the call to hppa_get_lmf_addr below can clobber them
2376 * This goes in the param area that is always allocated
2379 for (reg
= hppa_r26
; reg
>= hppa_r23
; reg
--) {
2380 hppa_stw (code
, reg
, ofs
, hppa_sp
);
2385 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
2386 hppa_copy (code
, hppa_r30
, hppa_r4
);
2388 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
2389 hppa_set (code
, cfg
->domain
, hppa_r26
);
2390 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
, (gpointer
)"mono_jit_thread_attach");
2391 hppa_ldil (code
, 0, hppa_r1
);
2392 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2393 hppa_depi (code
, 0, 31, 2, hppa_r1
);
2394 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
2395 hppa_ble (code
, 0, hppa_r1
);
2396 hppa_copy (code
, hppa_r31
, hppa_r2
);
2399 if (method
->save_lmf
) {
2400 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2401 (gpointer
)"mono_get_lmf_addr");
2402 hppa_ldil (code
, 0, hppa_r1
);
2403 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2404 hppa_depi (code
, 0, 31, 2, hppa_r1
);
2405 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
2406 hppa_ble (code
, 0, hppa_r1
);
2407 hppa_copy (code
, hppa_r31
, hppa_r2
);
2409 /* lmf_offset is the offset from the previous stack pointer,
2410 * The pointer to the struct is put in hppa_r22 (new_lmf).
2411 * The callee-saved registers are already in the MonoLMF
2415 /* hppa_r22 = new_lmf (on the stack) */
2416 hppa_ldo (code
, lmf_offset
, hppa_r3
, hppa_r22
);
2417 /* lmf_offset is the offset from the previous stack pointer,
2419 hppa_stw (code
, hppa_r28
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), hppa_r22
);
2420 /* new_lmf->previous_lmf = *lmf_addr */
2421 hppa_ldw (code
, 0, hppa_r28
, hppa_r1
);
2422 hppa_stw (code
, hppa_r1
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), hppa_r22
);
2423 /* *(lmf_addr) = r22 */
2424 hppa_stw (code
, hppa_r22
, 0, hppa_r28
);
2425 hppa_set (code
, method
, hppa_r1
);
2426 hppa_stw (code
, hppa_r1
, G_STRUCT_OFFSET(MonoLMF
, method
), hppa_r22
);
2427 hppa_stw (code
, hppa_sp
, G_STRUCT_OFFSET(MonoLMF
, ebp
), hppa_r22
);
2428 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
2429 hppa_ldil (code
, 0, hppa_r1
);
2430 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2431 hppa_stw (code
, hppa_r1
, G_STRUCT_OFFSET(MonoLMF
, eip
), hppa_r22
);
2433 /* Now reload the arguments from the stack */
2434 hppa_ldw (code
, -36, hppa_sp
, hppa_r26
);
2435 hppa_ldw (code
, -40, hppa_sp
, hppa_r25
);
2436 hppa_ldw (code
, -44, hppa_sp
, hppa_r24
);
2437 hppa_ldw (code
, -48, hppa_sp
, hppa_r23
);
2440 /* load arguments allocated to register from the stack */
2443 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2444 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2445 inst
= cfg
->args
[pos
];
2447 if (inst
->opcode
== OP_REGVAR
) {
2448 /* Want the argument in a register */
2449 switch (ainfo
->storage
) {
2451 if (ainfo
->reg
!= inst
->dreg
)
2452 hppa_copy (code
, ainfo
->reg
, inst
->dreg
);
2453 DEBUG (printf ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
)));
2457 if (ainfo
->reg
!= inst
->dreg
) {
2458 hppa_copy (code
, ainfo
->reg
, inst
->dreg
);
2459 hppa_copy (code
, ainfo
->reg
+ 1, inst
->dreg
+ 1);
2461 DEBUG (printf ("Argument %d assigned to register %s, %s\n", pos
, mono_arch_regname (inst
->dreg
), mono_arch_regname (inst
->dreg
+ 1)));
2465 if (ainfo
->reg
!= inst
->dreg
)
2466 hppa_fcpy (code
, HPPA_FP_FMT_SGL
, ainfo
->reg
, inst
->dreg
);
2467 DEBUG (printf ("Argument %d assigned to single register %s\n", pos
, mono_arch_fregname (inst
->dreg
)));
2471 if (ainfo
->reg
!= inst
->dreg
)
2472 hppa_fcpy (code
, HPPA_FP_FMT_DBL
, ainfo
->reg
, inst
->dreg
);
2473 DEBUG (printf ("Argument %d assigned to double register %s\n", pos
, mono_arch_fregname (inst
->dreg
)));
2477 switch (ainfo
->size
) {
2479 hppa_ldb (code
, ainfo
->offset
, hppa_r3
, inst
->dreg
);
2482 hppa_ldh (code
, ainfo
->offset
, hppa_r3
, inst
->dreg
);
2485 hppa_ldw (code
, ainfo
->offset
, hppa_r3
, inst
->dreg
);
2488 g_assert_not_reached ();
2492 DEBUG (printf ("Argument %d loaded from the stack [%s - %d]\n", pos
, mono_arch_regname (hppa_r3
), -ainfo
->offset
));
2496 g_assert_not_reached ();
2500 /* Want the argument on the stack */
2501 switch (ainfo
->storage
)
2505 DEBUG (printf ("Argument %d stored from register %s to stack [%s + %d]\n", pos
, mono_arch_regname (ainfo
->reg
), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2506 if (hppa_check_bits (inst
->inst_offset
, 14)) {
2507 off
= inst
->inst_offset
;
2508 reg
= inst
->inst_basereg
;
2511 hppa_set (code
, inst
->inst_offset
, hppa_r1
);
2512 hppa_add (code
, hppa_r1
, inst
->inst_basereg
, hppa_r1
);
2516 switch (ainfo
->size
)
2519 hppa_stb (code
, ainfo
->reg
, off
, reg
);
2522 hppa_sth (code
, ainfo
->reg
, off
, reg
);
2525 hppa_stw (code
, ainfo
->reg
, off
, reg
);
2528 g_assert_not_reached ();
2533 DEBUG (printf ("Argument %d stored from register (%s,%s) to stack [%s + %d]\n", pos
, mono_arch_regname (ainfo
->reg
), mono_arch_regname (ainfo
->reg
+1), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2534 if (hppa_check_bits (inst
->inst_offset
+ 4, 14)) {
2535 hppa_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
2536 hppa_stw (code
, ainfo
->reg
+ 1, inst
->inst_offset
+ 4, inst
->inst_basereg
);
2539 hppa_ldo (code
, inst
->inst_offset
, inst
->inst_basereg
, hppa_r1
);
2540 hppa_stw (code
, ainfo
->reg
, 0, hppa_r1
);
2541 hppa_stw (code
, ainfo
->reg
+ 1, 4, hppa_r1
);
2546 DEBUG (printf ("Argument %d (float) stored from register %s to stack [%s + %d]\n", pos
, mono_arch_fregname (ainfo
->reg
), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2547 hppa_ldo (code
, inst
->inst_offset
, inst
->inst_basereg
, hppa_r1
);
2548 hppa_fstwx (code
, ainfo
->reg
, 0, hppa_r0
, hppa_r1
);
2552 DEBUG (printf ("Argument %d (double) stored from register %s to stack [%s + %d]\n", pos
, mono_arch_fregname (ainfo
->reg
), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2553 hppa_ldo (code
, inst
->inst_offset
, inst
->inst_basereg
, hppa_r1
);
2554 hppa_fstdx (code
, ainfo
->reg
, hppa_r0
, hppa_r1
);
2558 DEBUG (printf ("Argument %d copied from [%s - %d] to [%s + %d] (size=%d)\n", pos
, mono_arch_regname (hppa_r3
), -ainfo
->offset
, mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
, ainfo
->size
));
2559 if (inst
->inst_offset
!= ainfo
->offset
||
2560 inst
->inst_basereg
!= hppa_r3
)
2561 code
= emit_memcpy (code
, inst
->inst_offset
, inst
->inst_basereg
, ainfo
->offset
, hppa_r3
, ainfo
->size
);
2565 g_assert_not_reached ();
2574 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
2576 if (getenv("HPPA_BREAK")) {
2577 *(guint32
*)code
= 0x00010004;
2581 cfg
->code_len
= code
- cfg
->native_code
;
2582 g_assert (cfg
->code_len
< cfg
->code_size
);
2591 mono_arch_emit_epilog (MonoCompile
*cfg
)
2593 MonoMethod
*method
= cfg
->method
;
2594 MonoMethodSignature
*sig
;
2596 int max_epilog_size
= 16 + 20 * 4;
2600 sig
= mono_method_signature (cfg
->method
);
2601 if (cfg
->method
->save_lmf
)
2602 max_epilog_size
+= 128;
2604 if (mono_jit_trace_calls
!= NULL
)
2605 max_epilog_size
+= 50;
2607 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
2608 max_epilog_size
+= 50;
2610 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
2611 cfg
->code_size
*= 2;
2612 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2613 mono_jit_stats
.code_reallocs
++;
2616 code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
2618 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
2619 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
2621 pos
= HPPA_STACK_LMF_OFFSET
;
2622 if (cfg
->method
->save_lmf
) {
2624 hppa_ldo (code
, pos
, hppa_r3
, hppa_r22
);
2625 hppa_ldw (code
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), hppa_r22
, hppa_r21
);
2626 hppa_ldw (code
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), hppa_r22
, hppa_r20
);
2627 hppa_stw (code
, hppa_r21
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), hppa_r20
);
2629 pos
+= G_STRUCT_OFFSET(MonoLMF
, regs
) + sizeof (gulong
);
2630 /* We skip the restore of r3 here, it is restored from the
2631 * stack anyway. This makes the code a bit easier.
2633 for (reg
= 4; reg
< 31; reg
++) {
2634 if (HPPA_IS_SAVED_GREG (reg
)) {
2635 hppa_ldw (code
, pos
, hppa_r3
, reg
);
2636 pos
+= sizeof(gulong
);
2640 pos
= ALIGN_TO (pos
, sizeof (double));
2641 hppa_set (code
, pos
, hppa_r1
);
2642 for (reg
= 0; reg
< 31; reg
++) {
2643 if (HPPA_IS_SAVED_FREG (reg
)) {
2644 hppa_flddx (code
, hppa_r1
, hppa_r3
, reg
);
2645 hppa_ldo (code
, sizeof (double), hppa_r1
, hppa_r1
);
2646 pos
+= sizeof (double);
2650 guint32 mask
= cfg
->used_int_regs
& MONO_ARCH_CALLEE_SAVED_REGS
;
2652 for (i
= 0; i
< 32; i
++) {
2655 if ((1 << i
) & mask
) {
2656 hppa_ldw (code
, pos
, hppa_r3
, i
);
2657 pos
+= sizeof (gulong
);
2662 if (sig
->ret
->type
!= MONO_TYPE_VOID
&&
2663 mono_type_to_stind (sig
->ret
) == CEE_STOBJ
) {
2664 CallInfo
*cinfo
= get_call_info (sig
, sig
->pinvoke
);
2666 switch (cinfo
->ret
.storage
) {
2668 hppa_ldw (code
, cfg
->ret
->inst_offset
, hppa_sp
, hppa_r28
);
2669 hppa_ldw (code
, 0, hppa_r28
, hppa_r28
);
2672 hppa_ldw (code
, cfg
->ret
->inst_offset
, hppa_sp
, hppa_r28
);
2673 hppa_ldw (code
, 4, hppa_r28
, hppa_r29
);
2674 hppa_ldw (code
, 0, hppa_r28
, hppa_r28
);
2680 g_assert_not_reached ();
2685 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
)
2686 hppa_ldw (code
, -20, hppa_r3
, hppa_r2
);
2687 hppa_ldo (code
, 64, hppa_r3
, hppa_sp
);
2688 hppa_bv (code
, hppa_r0
, hppa_r2
);
2689 hppa_ldwm (code
, -64, hppa_sp
, hppa_r3
);
2691 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
2693 g_assert (cfg
->code_len
< cfg
->code_size
);
2697 /* remove once throw_exception_by_name is eliminated */
2699 exception_id_by_name (const char *name
)
2701 if (strcmp (name
, "IndexOutOfRangeException") == 0)
2702 return MONO_EXC_INDEX_OUT_OF_RANGE
;
2703 if (strcmp (name
, "OverflowException") == 0)
2704 return MONO_EXC_OVERFLOW
;
2705 if (strcmp (name
, "ArithmeticException") == 0)
2706 return MONO_EXC_ARITHMETIC
;
2707 if (strcmp (name
, "DivideByZeroException") == 0)
2708 return MONO_EXC_DIVIDE_BY_ZERO
;
2709 if (strcmp (name
, "InvalidCastException") == 0)
2710 return MONO_EXC_INVALID_CAST
;
2711 if (strcmp (name
, "NullReferenceException") == 0)
2712 return MONO_EXC_NULL_REF
;
2713 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
2714 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
2715 g_error ("Unknown intrinsic exception %s\n", name
);
2720 mono_arch_emit_exceptions (MonoCompile
*cfg
)
2722 MonoJumpInfo
*patch_info
;
2725 const guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
] = {NULL
};
2726 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
] = {0};
2727 int max_epilog_size
= 50;
2731 /* count the number of exception infos */
2734 * make sure we have enough space for exceptions
2736 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2737 switch (patch_info
->type
) {
2738 case MONO_PATCH_INFO_BB_OVF
:
2739 g_assert_not_reached ();
2742 case MONO_PATCH_INFO_EXC_OVF
: {
2743 const MonoOvfJump
*ovfj
= patch_info
->data
.target
;
2744 max_epilog_size
+= 8;
2745 i
= exception_id_by_name (ovfj
->data
.exception
);
2746 if (!exc_throw_found
[i
]) {
2747 max_epilog_size
+= 24;
2748 exc_throw_found
[i
] = TRUE
;
2753 case MONO_PATCH_INFO_EXC
:
2754 i
= exception_id_by_name (patch_info
->data
.target
);
2755 if (!exc_throw_found
[i
]) {
2756 max_epilog_size
+= 24;
2757 exc_throw_found
[i
] = TRUE
;
2766 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
2767 cfg
->code_size
*= 2;
2768 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2769 mono_jit_stats
.code_reallocs
++;
2772 code
= cfg
->native_code
+ cfg
->code_len
;
2774 /* add code to raise exceptions */
2775 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2776 switch (patch_info
->type
) {
2777 case MONO_PATCH_INFO_BB_OVF
: {
2781 case MONO_PATCH_INFO_EXC_OVF
: {
2782 const MonoOvfJump
*ovfj
= patch_info
->data
.target
;
2783 MonoJumpInfo
*newji
;
2784 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
2785 unsigned char *stub
= code
;
2787 /* Patch original call, point it at the stub */
2788 hppa_patch ((guint32
*)ip
, code
);
2790 /* Write the stub */
2791 /* SUBTLE: this has to be PIC, because the code block
2794 hppa_bl_n (code
, 8, hppa_r0
);
2797 /* Add a patch info to patch the stub to point to the exception code */
2798 newji
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
2799 newji
->type
= MONO_PATCH_INFO_EXC
;
2800 newji
->ip
.i
= stub
- cfg
->native_code
;
2801 newji
->data
.target
= ovfj
->data
.exception
;
2802 newji
->next
= patch_info
->next
;
2803 patch_info
->next
= newji
;
2806 case MONO_PATCH_INFO_EXC
: {
2807 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
2808 i
= exception_id_by_name (patch_info
->data
.target
);
2809 if (exc_throw_pos
[i
]) {
2810 hppa_patch ((guint32
*)ip
, exc_throw_pos
[i
]);
2811 patch_info
->type
= MONO_PATCH_INFO_NONE
;
2814 exc_throw_pos
[i
] = code
;
2816 hppa_patch ((guint32
*)ip
, code
);
2817 hppa_set (code
, patch_info
->data
.target
, hppa_r26
);
2818 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
2819 patch_info
->data
.name
= "mono_arch_throw_exception_by_name";
2820 patch_info
->ip
.i
= code
- cfg
->native_code
;
2822 /* Assume the caller has set r2, we can't set it
2823 * here based on ip, because the caller may
2824 * be relocated (also the "ip" may be from an overflow
2827 hppa_ldil (code
, 0, hppa_r1
);
2828 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2829 hppa_bv (code
, hppa_r0
, hppa_r1
);
2839 cfg
->code_len
= code
- cfg
->native_code
;
2841 g_assert (cfg
->code_len
< cfg
->code_size
);
2845 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2847 #error "--with-sigaltstack=yes not supported on hppa"
2852 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
2857 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
2862 mono_arch_emit_this_vret_args (MonoCompile
*cfg
, MonoCallInst
*inst
, int this_reg
, int this_type
, int vt_reg
)
2864 /* add the this argument */
2865 if (this_reg
!= -1) {
2867 MONO_INST_NEW (cfg
, this, OP_MOVE
);
2868 this->type
= this_type
;
2869 this->sreg1
= this_reg
;
2870 this->dreg
= mono_alloc_ireg (cfg
);
2871 mono_bblock_add_inst (cfg
->cbb
, this);
2872 mono_call_inst_add_outarg_reg (cfg
, inst
, this->dreg
, hppa_r26
, FALSE
);
2877 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2878 vtarg
->type
= STACK_MP
;
2879 vtarg
->sreg1
= vt_reg
;
2880 vtarg
->dreg
= mono_alloc_ireg (cfg
);
2881 mono_bblock_add_inst (cfg
->cbb
, vtarg
);
2882 mono_call_inst_add_outarg_reg (cfg
, inst
, vtarg
->dreg
, hppa_r28
, FALSE
);
2888 mono_arch_get_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
2890 MonoInst
*ins
= NULL
;
2898 * mono_arch_get_argument_info:
2899 * @csig: a method signature
2900 * @param_count: the number of parameters to consider
2901 * @arg_info: an array to store the result infos
2903 * Gathers information on parameters such as size, alignment and
2904 * padding. arg_info should be large enought to hold param_count + 1 entries.
2906 * Returns the size of the activation frame.
2909 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
2916 cinfo
= get_call_info (csig
, FALSE
);
2918 if (csig
->hasthis
) {
2919 ainfo
= &cinfo
->args
[0];
2920 arg_info
[0].offset
= ainfo
->offset
;
2923 for (k
= 0; k
< param_count
; k
++) {
2924 ainfo
= &cinfo
->args
[k
+ csig
->hasthis
];
2926 arg_info
[k
+ 1].offset
= ainfo
->offset
;
2927 arg_info
[k
+ 1].size
= mono_type_size (csig
->params
[k
], &align
);
2935 mono_arch_print_tree (MonoInst
*tree
, int arity
)
2940 MonoInst
* mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
2945 MonoInst
* mono_arch_get_thread_intrinsic (MonoCompile
* cfg
)
2951 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
2953 /* FIXME: implement */
2954 g_assert_not_reached ();