2 * ARMv4 code generator for TCC
4 * Copyright (c) 2003 Daniel Glöckner
5 * Copyright (c) 2012 Thomas Preud'homme
7 * Based on i386-gen.c by Fabrice Bellard
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #ifdef TARGET_DEFS_ONLY
26 #if defined(TCC_ARM_EABI) && !defined(TCC_ARM_VFP)
27 #error "Currently TinyCC only supports float computation with VFP instructions"
30 /* number of available registers */
37 #ifndef TCC_CPU_VERSION
38 # define TCC_CPU_VERSION 5
41 /* a register can belong to several classes. The classes must be
42 sorted from more general to more precise (see gv2() code which does
43 assumptions on it). */
44 #define RC_INT 0x0001 /* generic integer register */
45 #define RC_FLOAT 0x0002 /* generic float register */
61 #define RC_IRET RC_R0 /* function return: integer register */
62 #define RC_LRET RC_R1 /* function return: second integer register */
63 #define RC_FRET RC_F0 /* function return: float register */
65 /* pretty names for the registers */
87 #define T2CPR(t) (((t) & VT_BTYPE) != VT_FLOAT ? 0x100 : 0)
90 /* return registers for function */
91 #define REG_IRET TREG_R0 /* single word int return register */
92 #define REG_LRET TREG_R1 /* second word return register (for long long) */
93 #define REG_FRET TREG_F0 /* float return register */
96 #define TOK___divdi3 TOK___aeabi_ldivmod
97 #define TOK___moddi3 TOK___aeabi_ldivmod
98 #define TOK___udivdi3 TOK___aeabi_uldivmod
99 #define TOK___umoddi3 TOK___aeabi_uldivmod
102 /* defined if function parameters must be evaluated in reverse order */
103 #define INVERT_FUNC_PARAMS
105 /* defined if structures are passed as pointers. Otherwise structures
106 are directly pushed on stack. */
107 /* #define FUNC_STRUCT_PARAM_AS_PTR */
109 /* pointer size, in bytes */
112 /* long double size and alignment, in bytes */
114 #define LDOUBLE_SIZE 8
118 #define LDOUBLE_SIZE 8
122 #define LDOUBLE_ALIGN 8
124 #define LDOUBLE_ALIGN 4
127 /* maximum alignment (for aligned attribute support) */
130 #define CHAR_IS_UNSIGNED
132 /******************************************************/
133 #else /* ! TARGET_DEFS_ONLY */
134 /******************************************************/
137 enum float_abi float_abi
;
139 ST_DATA
const int reg_classes
[NB_REGS
] = {
140 /* r0 */ RC_INT
| RC_R0
,
141 /* r1 */ RC_INT
| RC_R1
,
142 /* r2 */ RC_INT
| RC_R2
,
143 /* r3 */ RC_INT
| RC_R3
,
144 /* r12 */ RC_INT
| RC_R12
,
145 /* f0 */ RC_FLOAT
| RC_F0
,
146 /* f1 */ RC_FLOAT
| RC_F1
,
147 /* f2 */ RC_FLOAT
| RC_F2
,
148 /* f3 */ RC_FLOAT
| RC_F3
,
150 /* d4/s8 */ RC_FLOAT
| RC_F4
,
151 /* d5/s10 */ RC_FLOAT
| RC_F5
,
152 /* d6/s12 */ RC_FLOAT
| RC_F6
,
153 /* d7/s14 */ RC_FLOAT
| RC_F7
,
157 static int func_sub_sp_offset
, last_itod_magic
;
160 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
161 static CType float_type
, double_type
, func_float_type
, func_double_type
;
162 ST_FUNC
void arm_init(struct TCCState
*s
)
164 float_type
.t
= VT_FLOAT
;
165 double_type
.t
= VT_DOUBLE
;
166 func_float_type
.t
= VT_FUNC
;
167 func_float_type
.ref
= sym_push(SYM_FIELD
, &float_type
, FUNC_CDECL
, FUNC_OLD
);
168 func_double_type
.t
= VT_FUNC
;
169 func_double_type
.ref
= sym_push(SYM_FIELD
, &double_type
, FUNC_CDECL
, FUNC_OLD
);
171 float_abi
= s
->float_abi
;
172 #ifndef TCC_ARM_HARDFLOAT
173 tcc_warning("soft float ABI currently not supported: default to softfp");
177 #define func_float_type func_old_type
178 #define func_double_type func_old_type
179 #define func_ldouble_type func_old_type
180 ST_FUNC
void arm_init(struct TCCState
*s
)
183 #if !defined (TCC_ARM_VFP)
184 tcc_warning("Support for FPA is deprecated and will be removed in next"
187 #if !defined (TCC_ARM_EABI)
188 tcc_warning("Support for OABI is deprecated and will be removed in next"
195 static int two2mask(int a
,int b
) {
196 return (reg_classes
[a
]|reg_classes
[b
])&~(RC_INT
|RC_FLOAT
);
199 static int regmask(int r
) {
200 return reg_classes
[r
]&~(RC_INT
|RC_FLOAT
);
203 /******************************************************/
205 #if defined(TCC_ARM_EABI) && !defined(CONFIG_TCC_ELFINTERP)
206 const char *default_elfinterp(struct TCCState
*s
)
208 if (s
->float_abi
== ARM_HARD_FLOAT
)
209 return "/lib/ld-linux-armhf.so.3";
211 return "/lib/ld-linux.so.3";
217 /* this is a good place to start adding big-endian support*/
222 if (!cur_text_section
)
223 tcc_error("compiler error! This happens f.ex. if the compiler\n"
224 "can't evaluate constant expressions outside of a function.");
225 if (ind1
> cur_text_section
->data_allocated
)
226 section_realloc(cur_text_section
, ind1
);
227 cur_text_section
->data
[ind
++] = i
&255;
229 cur_text_section
->data
[ind
++] = i
&255;
231 cur_text_section
->data
[ind
++] = i
&255;
233 cur_text_section
->data
[ind
++] = i
;
236 static uint32_t stuff_const(uint32_t op
, uint32_t c
)
239 uint32_t nc
= 0, negop
= 0;
249 case 0x1A00000: //mov
250 case 0x1E00000: //mvn
257 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1E00000;
261 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1A00000;
262 case 0x1C00000: //bic
267 case 0x1800000: //orr
269 return (op
&0xFFF0FFFF)|0x1E00000;
275 if(c
<256) /* catch undefined <<32 */
278 m
=(0xff>>i
)|(0xff<<(32-i
));
280 return op
|(i
<<7)|(c
<<i
)|(c
>>(32-i
));
290 void stuff_const_harder(uint32_t op
, uint32_t v
) {
296 uint32_t a
[16], nv
, no
, o2
, n2
;
299 o2
=(op
&0xfff0ffff)|((op
&0xf000)<<4);;
301 a
[i
]=(a
[i
-1]>>2)|(a
[i
-1]<<30);
303 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
304 if((v
&(a
[i
]|a
[j
]))==v
) {
305 o(stuff_const(op
,v
&a
[i
]));
306 o(stuff_const(o2
,v
&a
[j
]));
313 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
314 if((nv
&(a
[i
]|a
[j
]))==nv
) {
315 o(stuff_const(no
,nv
&a
[i
]));
316 o(stuff_const(n2
,nv
&a
[j
]));
321 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
322 if((v
&(a
[i
]|a
[j
]|a
[k
]))==v
) {
323 o(stuff_const(op
,v
&a
[i
]));
324 o(stuff_const(o2
,v
&a
[j
]));
325 o(stuff_const(o2
,v
&a
[k
]));
332 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
333 if((nv
&(a
[i
]|a
[j
]|a
[k
]))==nv
) {
334 o(stuff_const(no
,nv
&a
[i
]));
335 o(stuff_const(n2
,nv
&a
[j
]));
336 o(stuff_const(n2
,nv
&a
[k
]));
339 o(stuff_const(op
,v
&a
[0]));
340 o(stuff_const(o2
,v
&a
[4]));
341 o(stuff_const(o2
,v
&a
[8]));
342 o(stuff_const(o2
,v
&a
[12]));
346 uint32_t encbranch(int pos
, int addr
, int fail
)
350 if(addr
>=0x1000000 || addr
<-0x1000000) {
352 tcc_error("FIXME: function bigger than 32MB");
355 return 0x0A000000|(addr
&0xffffff);
358 int decbranch(int pos
)
361 x
=*(uint32_t *)(cur_text_section
->data
+ pos
);
368 /* output a symbol and patch all calls to it */
369 void gsym_addr(int t
, int a
)
374 x
=(uint32_t *)(cur_text_section
->data
+ t
);
377 *x
=0xE1A00000; // nop
380 *x
|= encbranch(lt
,a
,1);
391 static uint32_t vfpr(int r
)
393 if(r
<TREG_F0
|| r
>TREG_F7
)
394 tcc_error("compiler error! register %i is no vfp register",r
);
398 static uint32_t fpr(int r
)
400 if(r
<TREG_F0
|| r
>TREG_F3
)
401 tcc_error("compiler error! register %i is no fpa register",r
);
406 static uint32_t intr(int r
)
410 if(r
>= TREG_R0
&& r
<= TREG_R3
)
412 if (r
>= TREG_SP
&& r
<= TREG_LR
)
413 return r
+ (13 - TREG_SP
);
414 tcc_error("compiler error! register %i is no int register",r
);
417 static void calcaddr(uint32_t *base
, int *off
, int *sgn
, int maxoff
, unsigned shift
)
419 if(*off
>maxoff
|| *off
&((1<<shift
)-1)) {
426 y
=stuff_const(x
,*off
&~maxoff
);
432 y
=stuff_const(x
,(*off
+maxoff
)&~maxoff
);
436 *off
=((*off
+maxoff
)&~maxoff
)-*off
;
439 stuff_const_harder(x
,*off
&~maxoff
);
444 static uint32_t mapcc(int cc
)
449 return 0x30000000; /* CC/LO */
451 return 0x20000000; /* CS/HS */
453 return 0x00000000; /* EQ */
455 return 0x10000000; /* NE */
457 return 0x90000000; /* LS */
459 return 0x80000000; /* HI */
461 return 0x40000000; /* MI */
463 return 0x50000000; /* PL */
465 return 0xB0000000; /* LT */
467 return 0xA0000000; /* GE */
469 return 0xD0000000; /* LE */
471 return 0xC0000000; /* GT */
473 tcc_error("unexpected condition code");
474 return 0xE0000000; /* AL */
477 static int negcc(int cc
)
506 tcc_error("unexpected condition code");
510 /* load 'r' from value 'sv' */
511 void load(int r
, SValue
*sv
)
513 int v
, ft
, fc
, fr
, sign
;
530 uint32_t base
= 0xB; // fp
533 v1
.r
= VT_LOCAL
| VT_LVAL
;
539 } else if(v
== VT_CONST
) {
548 } else if(v
< VT_CONST
) {
555 calcaddr(&base
,&fc
,&sign
,1020,2);
557 op
=0xED100A00; /* flds */
560 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
561 op
|=0x100; /* flds -> fldd */
562 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
567 #if LDOUBLE_SIZE == 8
568 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
571 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
573 else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
576 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
578 } else if((ft
& (VT_BTYPE
|VT_UNSIGNED
)) == VT_BYTE
579 || (ft
& VT_BTYPE
) == VT_SHORT
) {
580 calcaddr(&base
,&fc
,&sign
,255,0);
582 if ((ft
& VT_BTYPE
) == VT_SHORT
)
584 if ((ft
& VT_UNSIGNED
) == 0)
588 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
590 calcaddr(&base
,&fc
,&sign
,4095,0);
594 if ((ft
& VT_BTYPE
) == VT_BYTE
|| (ft
& VT_BTYPE
) == VT_BOOL
)
596 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
602 op
=stuff_const(0xE3A00000|(intr(r
)<<12),sv
->c
.i
);
603 if (fr
& VT_SYM
|| !op
) {
604 o(0xE59F0000|(intr(r
)<<12));
607 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
612 } else if (v
== VT_LOCAL
) {
613 op
=stuff_const(0xE28B0000|(intr(r
)<<12),sv
->c
.i
);
614 if (fr
& VT_SYM
|| !op
) {
615 o(0xE59F0000|(intr(r
)<<12));
617 if(fr
& VT_SYM
) // needed ?
618 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
620 o(0xE08B0000|(intr(r
)<<12)|intr(r
));
624 } else if(v
== VT_CMP
) {
625 o(mapcc(sv
->c
.i
)|0x3A00001|(intr(r
)<<12));
626 o(mapcc(negcc(sv
->c
.i
))|0x3A00000|(intr(r
)<<12));
628 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
631 o(0xE3A00000|(intr(r
)<<12)|t
);
634 o(0xE3A00000|(intr(r
)<<12)|(t
^1));
636 } else if (v
< VT_CONST
) {
639 o(0xEEB00A40|(vfpr(r
)<<12)|vfpr(v
)|T2CPR(ft
)); /* fcpyX */
641 o(0xEE008180|(fpr(r
)<<12)|fpr(v
));
644 o(0xE1A00000|(intr(r
)<<12)|intr(v
));
648 tcc_error("load unimplemented!");
651 /* store register 'r' in lvalue 'v' */
652 void store(int r
, SValue
*sv
)
655 int v
, ft
, fc
, fr
, sign
;
670 if (fr
& VT_LVAL
|| fr
== VT_LOCAL
) {
671 uint32_t base
= 0xb; /* fp */
676 } else if(v
== VT_CONST
) {
688 calcaddr(&base
,&fc
,&sign
,1020,2);
690 op
=0xED000A00; /* fsts */
693 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
694 op
|=0x100; /* fsts -> fstd */
695 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
700 #if LDOUBLE_SIZE == 8
701 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
704 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
706 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
709 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
712 } else if((ft
& VT_BTYPE
) == VT_SHORT
) {
713 calcaddr(&base
,&fc
,&sign
,255,0);
717 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
719 calcaddr(&base
,&fc
,&sign
,4095,0);
723 if ((ft
& VT_BTYPE
) == VT_BYTE
|| (ft
& VT_BTYPE
) == VT_BOOL
)
725 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
730 tcc_error("store unimplemented");
733 static void gadd_sp(int val
)
735 stuff_const_harder(0xE28DD000,val
);
738 /* 'is_jmp' is '1' if it is a jump */
739 static void gcall_or_jmp(int is_jmp
)
742 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
745 x
=encbranch(ind
,ind
+vtop
->c
.i
,0);
747 if (vtop
->r
& VT_SYM
) {
748 /* relocation case */
749 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_PC24
);
751 put_elf_reloc(symtab_section
, cur_text_section
, ind
, R_ARM_PC24
, 0);
752 o(x
|(is_jmp
?0xE0000000:0xE1000000));
755 o(0xE28FE004); // add lr,pc,#4
756 o(0xE51FF004); // ldr pc,[pc,#-4]
757 if (vtop
->r
& VT_SYM
)
758 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_ABS32
);
762 /* otherwise, indirect call */
765 o(0xE1A0E00F); // mov lr,pc
766 o(0xE1A0F000|intr(r
)); // mov pc,r
770 static int unalias_ldbl(int btype
)
772 #if LDOUBLE_SIZE == 8
773 if (btype
== VT_LDOUBLE
)
779 /* Return whether a structure is an homogeneous float aggregate or not.
780 The answer is true if all the elements of the structure are of the same
781 primitive float type and there is less than 4 elements.
783 type: the type corresponding to the structure to be tested */
784 static int is_hgen_float_aggr(CType
*type
)
786 if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
788 int btype
, nb_fields
= 0;
790 ref
= type
->ref
->next
;
791 btype
= unalias_ldbl(ref
->type
.t
& VT_BTYPE
);
792 if (btype
== VT_FLOAT
|| btype
== VT_DOUBLE
) {
793 for(; ref
&& btype
== unalias_ldbl(ref
->type
.t
& VT_BTYPE
); ref
= ref
->next
, nb_fields
++);
794 return !ref
&& nb_fields
<= 4;
801 signed char avail
[3]; /* 3 holes max with only float and double alignments */
802 int first_hole
; /* first available hole */
803 int last_hole
; /* last available hole (none if equal to first_hole) */
804 int first_free_reg
; /* next free register in the sequence, hole excluded */
807 #define AVAIL_REGS_INITIALIZER (struct avail_regs) { { 0, 0, 0}, 0, 0, 0 }
809 /* Find suitable registers for a VFP Co-Processor Register Candidate (VFP CPRC
810 param) according to the rules described in the procedure call standard for
811 the ARM architecture (AAPCS). If found, the registers are assigned to this
812 VFP CPRC parameter. Registers are allocated in sequence unless a hole exists
813 and the parameter is a single float.
815 avregs: opaque structure to keep track of available VFP co-processor regs
816 align: alignment constraints for the param, as returned by type_size()
817 size: size of the parameter, as returned by type_size() */
818 int assign_vfpreg(struct avail_regs
*avregs
, int align
, int size
)
822 if (avregs
->first_free_reg
== -1)
824 if (align
>> 3) { /* double alignment */
825 first_reg
= avregs
->first_free_reg
;
826 /* alignment constraint not respected so use next reg and record hole */
828 avregs
->avail
[avregs
->last_hole
++] = first_reg
++;
829 } else { /* no special alignment (float or array of float) */
830 /* if single float and a hole is available, assign the param to it */
831 if (size
== 4 && avregs
->first_hole
!= avregs
->last_hole
)
832 return avregs
->avail
[avregs
->first_hole
++];
834 first_reg
= avregs
->first_free_reg
;
836 if (first_reg
+ size
/ 4 <= 16) {
837 avregs
->first_free_reg
= first_reg
+ size
/ 4;
840 avregs
->first_free_reg
= -1;
844 /* Returns whether all params need to be passed in core registers or not.
845 This is the case for function part of the runtime ABI. */
846 int floats_in_core_regs(SValue
*sval
)
851 switch (sval
->sym
->v
) {
852 case TOK___floatundisf
:
853 case TOK___floatundidf
:
854 case TOK___fixunssfdi
:
855 case TOK___fixunsdfdi
:
857 case TOK___fixunsxfdi
:
859 case TOK___floatdisf
:
860 case TOK___floatdidf
:
870 /* Return the number of registers needed to return the struct, or 0 if
871 returning via struct pointer. */
872 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
) {
875 size
= type_size(vt
, &align
);
876 if (float_abi
== ARM_HARD_FLOAT
&& !variadic
&&
877 (is_float(vt
->t
) || is_hgen_float_aggr(vt
))) {
882 return (size
+ 7) >> 3;
883 } else if (size
<= 4) {
896 /* Parameters are classified according to how they are copied to their final
897 destination for the function call. Because the copying is performed class
898 after class according to the order in the union below, it is important that
899 some constraints about the order of the members of this union are respected:
900 - CORE_STRUCT_CLASS must come after STACK_CLASS;
901 - CORE_CLASS must come after STACK_CLASS, CORE_STRUCT_CLASS and
903 - VFP_STRUCT_CLASS must come after VFP_CLASS.
904 See the comment for the main loop in copy_params() for the reason. */
915 int start
; /* first reg or addr used depending on the class */
916 int end
; /* last reg used or next free addr depending on the class */
917 SValue
*sval
; /* pointer to SValue on the value stack */
918 struct param_plan
*prev
; /* previous element in this class */
922 struct param_plan
*pplans
; /* array of all the param plans */
923 struct param_plan
*clsplans
[NB_CLASSES
]; /* per class lists of param plans */
926 #define add_param_plan(plan,pplan,class) \
928 pplan.prev = plan->clsplans[class]; \
929 plan->pplans[plan ## _nb] = pplan; \
930 plan->clsplans[class] = &plan->pplans[plan ## _nb++]; \
933 /* Assign parameters to registers and stack with alignment according to the
934 rules in the procedure call standard for the ARM architecture (AAPCS).
935 The overall assignment is recorded in an array of per parameter structures
936 called parameter plans. The parameter plans are also further organized in a
937 number of linked lists, one per class of parameter (see the comment for the
938 definition of union reg_class).
940 nb_args: number of parameters of the function for which a call is generated
941 float_abi: float ABI in use for this function call
942 plan: the structure where the overall assignment is recorded
943 todo: a bitmap that record which core registers hold a parameter
945 Returns the amount of stack space needed for parameter passing
947 Note: this function allocated an array in plan->pplans with tcc_malloc. It
948 is the responsibility of the caller to free this array once used (ie not
949 before copy_params). */
950 static int assign_regs(int nb_args
, int float_abi
, struct plan
*plan
, int *todo
)
953 int ncrn
/* next core register number */, nsaa
/* next stacked argument address*/;
955 struct param_plan pplan
;
956 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
960 plan
->pplans
= tcc_malloc(nb_args
* sizeof(*plan
->pplans
));
961 memset(plan
->clsplans
, 0, sizeof(plan
->clsplans
));
962 for(i
= nb_args
; i
-- ;) {
963 int j
, start_vfpreg
= 0;
964 CType type
= vtop
[-i
].type
;
966 size
= type_size(&type
, &align
);
967 size
= (size
+ 3) & ~3;
968 align
= (align
+ 3) & ~3;
969 switch(vtop
[-i
].type
.t
& VT_BTYPE
) {
974 if (float_abi
== ARM_HARD_FLOAT
) {
975 int is_hfa
= 0; /* Homogeneous float aggregate */
977 if (is_float(vtop
[-i
].type
.t
)
978 || (is_hfa
= is_hgen_float_aggr(&vtop
[-i
].type
))) {
981 start_vfpreg
= assign_vfpreg(&avregs
, align
, size
);
982 end_vfpreg
= start_vfpreg
+ ((size
- 1) >> 2);
983 if (start_vfpreg
>= 0) {
984 pplan
= (struct param_plan
) {start_vfpreg
, end_vfpreg
, &vtop
[-i
]};
986 add_param_plan(plan
, pplan
, VFP_STRUCT_CLASS
);
988 add_param_plan(plan
, pplan
, VFP_CLASS
);
994 ncrn
= (ncrn
+ (align
-1)/4) & ~((align
/4) - 1);
995 if (ncrn
+ size
/4 <= 4 || (ncrn
< 4 && start_vfpreg
!= -1)) {
996 /* The parameter is allocated both in core register and on stack. As
997 * such, it can be of either class: it would either be the last of
998 * CORE_STRUCT_CLASS or the first of STACK_CLASS. */
999 for (j
= ncrn
; j
< 4 && j
< ncrn
+ size
/ 4; j
++)
1001 pplan
= (struct param_plan
) {ncrn
, j
, &vtop
[-i
]};
1002 add_param_plan(plan
, pplan
, CORE_STRUCT_CLASS
);
1005 nsaa
= (ncrn
- 4) * 4;
1013 int is_long
= (vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LLONG
;
1016 ncrn
= (ncrn
+ 1) & -2;
1020 pplan
= (struct param_plan
) {ncrn
, ncrn
, &vtop
[-i
]};
1024 add_param_plan(plan
, pplan
, CORE_CLASS
);
1028 nsaa
= (nsaa
+ (align
- 1)) & ~(align
- 1);
1029 pplan
= (struct param_plan
) {nsaa
, nsaa
+ size
, &vtop
[-i
]};
1030 add_param_plan(plan
, pplan
, STACK_CLASS
);
1031 nsaa
+= size
; /* size already rounded up before */
1036 #undef add_param_plan
1038 /* Copy parameters to their final destination (core reg, VFP reg or stack) for
1041 nb_args: number of parameters the function take
1042 plan: the overall assignment plan for parameters
1043 todo: a bitmap indicating what core reg will hold a parameter
1045 Returns the number of SValue added by this function on the value stack */
1046 static int copy_params(int nb_args
, struct plan
*plan
, int todo
)
1048 int size
, align
, r
, i
, nb_extra_sval
= 0;
1049 struct param_plan
*pplan
;
1052 /* Several constraints require parameters to be copied in a specific order:
1053 - structures are copied to the stack before being loaded in a reg;
1054 - floats loaded to an odd numbered VFP reg are first copied to the
1055 preceding even numbered VFP reg and then moved to the next VFP reg.
1057 It is thus important that:
1058 - structures assigned to core regs must be copied after parameters
1059 assigned to the stack but before structures assigned to VFP regs because
1060 a structure can lie partly in core registers and partly on the stack;
1061 - parameters assigned to the stack and all structures be copied before
1062 parameters assigned to a core reg since copying a parameter to the stack
1063 require using a core reg;
1064 - parameters assigned to VFP regs be copied before structures assigned to
1065 VFP regs as the copy might use an even numbered VFP reg that already
1066 holds part of a structure. */
1068 for(i
= 0; i
< NB_CLASSES
; i
++) {
1069 for(pplan
= plan
->clsplans
[i
]; pplan
; pplan
= pplan
->prev
) {
1072 && (i
!= CORE_CLASS
|| pplan
->sval
->r
< VT_CONST
))
1075 vpushv(pplan
->sval
);
1076 pplan
->sval
->r
= pplan
->sval
->r2
= VT_CONST
; /* disable entry */
1079 case CORE_STRUCT_CLASS
:
1080 case VFP_STRUCT_CLASS
:
1081 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
1083 size
= type_size(&pplan
->sval
->type
, &align
);
1084 /* align to stack align size */
1085 size
= (size
+ 3) & ~3;
1086 if (i
== STACK_CLASS
&& pplan
->prev
)
1087 padding
= pplan
->start
- pplan
->prev
->end
;
1088 size
+= padding
; /* Add padding if any */
1089 /* allocate the necessary size on stack */
1091 /* generate structure store */
1092 r
= get_reg(RC_INT
);
1093 o(0xE28D0000|(intr(r
)<<12)|padding
); /* add r, sp, padding */
1094 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1096 vstore(); /* memcpy to current sp + potential padding */
1098 /* Homogeneous float aggregate are loaded to VFP registers
1099 immediately since there is no way of loading data in multiple
1100 non consecutive VFP registers as what is done for other
1101 structures (see the use of todo). */
1102 if (i
== VFP_STRUCT_CLASS
) {
1103 int first
= pplan
->start
, nb
= pplan
->end
- first
+ 1;
1104 /* vpop.32 {pplan->start, ..., pplan->end} */
1105 o(0xECBD0A00|(first
&1)<<22|(first
>>1)<<12|nb
);
1106 /* No need to write the register used to a SValue since VFP regs
1107 cannot be used for gcall_or_jmp */
1110 if (is_float(pplan
->sval
->type
.t
)) {
1112 r
= vfpr(gv(RC_FLOAT
)) << 12;
1113 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_FLOAT
)
1117 r
|= 0x101; /* vpush.32 -> vpush.64 */
1119 o(0xED2D0A01 + r
); /* vpush */
1121 r
= fpr(gv(RC_FLOAT
)) << 12;
1122 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_FLOAT
)
1124 else if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1127 size
= LDOUBLE_SIZE
;
1134 o(0xED2D0100|r
|(size
>>2)); /* some kind of vpush for FPA */
1137 /* simple type (currently always same size) */
1138 /* XXX: implicit cast ? */
1140 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1144 o(0xE52D0004|(intr(r
)<<12)); /* push r */
1148 o(0xE52D0004|(intr(r
)<<12)); /* push r */
1150 if (i
== STACK_CLASS
&& pplan
->prev
)
1151 gadd_sp(pplan
->prev
->end
- pplan
->start
); /* Add padding if any */
1156 gv(regmask(TREG_F0
+ (pplan
->start
>> 1)));
1157 if (pplan
->start
& 1) { /* Must be in upper part of double register */
1158 o(0xEEF00A40|((pplan
->start
>>1)<<12)|(pplan
->start
>>1)); /* vmov.f32 s(n+1), sn */
1159 vtop
->r
= VT_CONST
; /* avoid being saved on stack by gv for next float */
1164 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1166 gv(regmask(pplan
->end
));
1167 pplan
->sval
->r2
= vtop
->r
;
1170 gv(regmask(pplan
->start
));
1171 /* Mark register as used so that gcall_or_jmp use another one
1172 (regs >=4 are free as never used to pass parameters) */
1173 pplan
->sval
->r
= vtop
->r
;
1180 /* second pass to restore registers that were saved on stack by accident.
1181 Maybe redundant after the "lvalue_save" patch in tccgen.c:gv() */
1185 /* Manually free remaining registers since next parameters are loaded
1186 * manually, without the help of gv(int). */
1190 o(0xE8BD0000|todo
); /* pop {todo} */
1191 for(pplan
= plan
->clsplans
[CORE_STRUCT_CLASS
]; pplan
; pplan
= pplan
->prev
) {
1193 pplan
->sval
->r
= pplan
->start
;
1194 /* An SValue can only pin 2 registers at best (r and r2) but a structure
1195 can occupy more than 2 registers. Thus, we need to push on the value
1196 stack some fake parameter to have on SValue for each registers used
1197 by a structure (r2 is not used). */
1198 for (r
= pplan
->start
+ 1; r
<= pplan
->end
; r
++) {
1199 if (todo
& (1 << r
)) {
1207 return nb_extra_sval
;
1210 /* Generate function call. The function address is pushed first, then
1211 all the parameters in call order. This functions pops all the
1212 parameters and the function address. */
1213 void gfunc_call(int nb_args
)
1216 int def_float_abi
= float_abi
;
1223 if (float_abi
== ARM_HARD_FLOAT
) {
1224 variadic
= (vtop
[-nb_args
].type
.ref
->f
.func_type
== FUNC_ELLIPSIS
);
1225 if (variadic
|| floats_in_core_regs(&vtop
[-nb_args
]))
1226 float_abi
= ARM_SOFTFP_FLOAT
;
1229 /* cannot let cpu flags if other instruction are generated. Also avoid leaving
1230 VT_JMP anywhere except on the top of the stack because it would complicate
1231 the code generator. */
1232 r
= vtop
->r
& VT_VALMASK
;
1233 if (r
== VT_CMP
|| (r
& ~1) == VT_JMP
)
1236 args_size
= assign_regs(nb_args
, float_abi
, &plan
, &todo
);
1239 if (args_size
& 7) { /* Stack must be 8 byte aligned at fct call for EABI */
1240 args_size
= (args_size
+ 7) & ~7;
1241 o(0xE24DD004); /* sub sp, sp, #4 */
1245 nb_args
+= copy_params(nb_args
, &plan
, todo
);
1246 tcc_free(plan
.pplans
);
1248 /* Move fct SValue on top as required by gcall_or_jmp */
1252 gadd_sp(args_size
); /* pop all parameters passed on the stack */
1253 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
1254 if(float_abi
== ARM_SOFTFP_FLOAT
&& is_float(vtop
->type
.ref
->type
.t
)) {
1255 if((vtop
->type
.ref
->type
.t
& VT_BTYPE
) == VT_FLOAT
) {
1256 o(0xEE000A10); /*vmov s0, r0 */
1258 o(0xEE000B10); /* vmov.32 d0[0], r0 */
1259 o(0xEE201B10); /* vmov.32 d0[1], r1 */
1263 vtop
-= nb_args
+ 1; /* Pop all params and fct address from value stack */
1264 leaffunc
= 0; /* we are calling a function, so we aren't in a leaf function */
1265 float_abi
= def_float_abi
;
1268 /* generate function prolog of type 't' */
1269 void gfunc_prolog(CType
*func_type
)
1272 int n
, nf
, size
, align
, rs
, struct_ret
= 0;
1273 int addr
, pn
, sn
; /* pn=core, sn=stack */
1277 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
1280 sym
= func_type
->ref
;
1281 func_vt
= sym
->type
;
1282 func_var
= (func_type
->ref
->f
.func_type
== FUNC_ELLIPSIS
);
1285 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
&&
1286 !gfunc_sret(&func_vt
, func_var
, &ret_type
, &align
, &rs
))
1290 func_vc
= 12; /* Offset from fp of the place to store the result */
1292 for(sym2
= sym
->next
; sym2
&& (n
< 4 || nf
< 16); sym2
= sym2
->next
) {
1293 size
= type_size(&sym2
->type
, &align
);
1295 if (float_abi
== ARM_HARD_FLOAT
&& !func_var
&&
1296 (is_float(sym2
->type
.t
) || is_hgen_float_aggr(&sym2
->type
))) {
1297 int tmpnf
= assign_vfpreg(&avregs
, align
, size
);
1298 tmpnf
+= (size
+ 3) / 4;
1299 nf
= (tmpnf
> nf
) ? tmpnf
: nf
;
1303 n
+= (size
+ 3) / 4;
1305 o(0xE1A0C00D); /* mov ip,sp */
1314 o(0xE92D0000|((1<<n
)-1)); /* save r0-r4 on stack if needed */
1319 nf
=(nf
+1)&-2; /* nf => HARDFLOAT => EABI */
1320 o(0xED2D0A00|nf
); /* save s0-s15 on stack if needed */
1322 o(0xE92D5800); /* save fp, ip, lr */
1323 o(0xE1A0B00D); /* mov fp, sp */
1324 func_sub_sp_offset
= ind
;
1325 o(0xE1A00000); /* nop, leave space for stack adjustment in epilog */
1328 if (float_abi
== ARM_HARD_FLOAT
) {
1330 avregs
= AVAIL_REGS_INITIALIZER
;
1333 pn
= struct_ret
, sn
= 0;
1334 while ((sym
= sym
->next
)) {
1337 size
= type_size(type
, &align
);
1338 size
= (size
+ 3) >> 2;
1339 align
= (align
+ 3) & ~3;
1341 if (float_abi
== ARM_HARD_FLOAT
&& !func_var
&& (is_float(sym
->type
.t
)
1342 || is_hgen_float_aggr(&sym
->type
))) {
1343 int fpn
= assign_vfpreg(&avregs
, align
, size
<< 2);
1352 pn
= (pn
+ (align
-1)/4) & -(align
/4);
1354 addr
= (nf
+ pn
) * 4;
1361 sn
= (sn
+ (align
-1)/4) & -(align
/4);
1363 addr
= (n
+ nf
+ sn
) * 4;
1366 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| lvalue_type(type
->t
),
1374 /* generate function epilog */
1375 void gfunc_epilog(void)
1379 /* Copy float return value to core register if base standard is used and
1380 float computation is made with VFP */
1381 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
1382 if ((float_abi
== ARM_SOFTFP_FLOAT
|| func_var
) && is_float(func_vt
.t
)) {
1383 if((func_vt
.t
& VT_BTYPE
) == VT_FLOAT
)
1384 o(0xEE100A10); /* fmrs r0, s0 */
1386 o(0xEE100B10); /* fmrdl r0, d0 */
1387 o(0xEE301B10); /* fmrdh r1, d0 */
1391 o(0xE89BA800); /* restore fp, sp, pc */
1392 diff
= (-loc
+ 3) & -4;
1395 diff
= ((diff
+ 11) & -8) - 4;
1398 x
=stuff_const(0xE24BD000, diff
); /* sub sp,fp,# */
1400 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = x
;
1404 o(0xE59FC004); /* ldr ip,[pc+4] */
1405 o(0xE04BD00C); /* sub sp,fp,ip */
1406 o(0xE1A0F00E); /* mov pc,lr */
1408 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = 0xE1000000|encbranch(func_sub_sp_offset
,addr
,1);
1413 ST_FUNC
void gen_fill_nops(int bytes
)
1416 tcc_error("alignment of code section not multiple of 4");
1423 /* generate a jump to a label */
1430 o(0xE0000000|encbranch(r
,t
,1));
1434 /* generate a jump to a fixed address */
1435 void gjmp_addr(int a
)
1440 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1441 int gtst(int inv
, int t
)
1446 v
= vtop
->r
& VT_VALMASK
;
1449 if (nocode_wanted
) {
1451 } else if (v
== VT_CMP
) {
1452 op
=mapcc(inv
?negcc(vtop
->c
.i
):vtop
->c
.i
);
1453 op
|=encbranch(r
,t
,1);
1456 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1457 if ((v
& 1) == inv
) {
1466 p
= decbranch(lp
=p
);
1468 x
= (uint32_t *)(cur_text_section
->data
+ lp
);
1470 *x
|= encbranch(lp
,t
,1);
1483 /* generate an integer binary operation */
1484 void gen_opi(int op
)
1487 uint32_t opc
= 0, r
, fr
;
1488 unsigned short retreg
= REG_IRET
;
1496 case TOK_ADDC1
: /* add with carry generation */
1504 case TOK_SUBC1
: /* sub with carry generation */
1508 case TOK_ADDC2
: /* add with carry use */
1512 case TOK_SUBC2
: /* sub with carry use */
1529 gv2(RC_INT
, RC_INT
);
1533 o(0xE0000090|(intr(r
)<<16)|(intr(r
)<<8)|intr(fr
));
1558 func
=TOK___aeabi_idivmod
;
1567 func
=TOK___aeabi_uidivmod
;
1575 gv2(RC_INT
, RC_INT
);
1576 r
=intr(vtop
[-1].r2
=get_reg(RC_INT
));
1578 vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(c
));
1580 o(0xE0800090|(r
<<16)|(intr(vtop
->r
)<<12)|(intr(c
)<<8)|intr(vtop
[1].r
));
1589 if((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1590 if(opc
== 4 || opc
== 5 || opc
== 0xc) {
1592 opc
|=2; // sub -> rsb
1595 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1596 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1601 opc
=0xE0000000|(opc
<<20)|(c
<<16);
1602 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1604 x
=stuff_const(opc
|0x2000000,vtop
->c
.i
);
1606 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1611 fr
=intr(gv(RC_INT
));
1612 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1616 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1622 opc
=0xE1A00000|(opc
<<5);
1623 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1624 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1630 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1631 fr
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1632 c
= vtop
->c
.i
& 0x1f;
1633 o(opc
|(c
<<7)|(fr
<<12));
1635 fr
=intr(gv(RC_INT
));
1636 c
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1637 o(opc
|(c
<<12)|(fr
<<8)|0x10);
1642 vpush_global_sym(&func_old_type
, func
);
1649 tcc_error("gen_opi %i unimplemented!",op
);
1654 static int is_zero(int i
)
1656 if((vtop
[i
].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1658 if (vtop
[i
].type
.t
== VT_FLOAT
)
1659 return (vtop
[i
].c
.f
== 0.f
);
1660 else if (vtop
[i
].type
.t
== VT_DOUBLE
)
1661 return (vtop
[i
].c
.d
== 0.0);
1662 return (vtop
[i
].c
.ld
== 0.l
);
1665 /* generate a floating point operation 'v = t1 op t2' instruction. The
1666 * two operands are guaranteed to have the same floating point type */
1667 void gen_opf(int op
)
1671 x
=0xEE000A00|T2CPR(vtop
->type
.t
);
1689 x
|=0x810000; /* fsubX -> fnegX */
1702 if(op
< TOK_ULT
|| op
> TOK_GT
) {
1703 tcc_error("unknown fp op %x!",op
);
1709 case TOK_LT
: op
=TOK_GT
; break;
1710 case TOK_GE
: op
=TOK_ULE
; break;
1711 case TOK_LE
: op
=TOK_GE
; break;
1712 case TOK_GT
: op
=TOK_ULT
; break;
1715 x
|=0xB40040; /* fcmpX */
1716 if(op
!=TOK_EQ
&& op
!=TOK_NE
)
1717 x
|=0x80; /* fcmpX -> fcmpeX */
1720 o(x
|0x10000|(vfpr(gv(RC_FLOAT
))<<12)); /* fcmp(e)X -> fcmp(e)zX */
1722 x
|=vfpr(gv(RC_FLOAT
));
1724 o(x
|(vfpr(gv(RC_FLOAT
))<<12));
1727 o(0xEEF1FA10); /* fmstat */
1730 case TOK_LE
: op
=TOK_ULE
; break;
1731 case TOK_LT
: op
=TOK_ULT
; break;
1732 case TOK_UGE
: op
=TOK_GE
; break;
1733 case TOK_UGT
: op
=TOK_GT
; break;
1750 vtop
->r
=get_reg_ex(RC_FLOAT
,r
);
1753 o(x
|(vfpr(vtop
->r
)<<12));
1757 static uint32_t is_fconst()
1761 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1763 if (vtop
->type
.t
== VT_FLOAT
)
1765 else if (vtop
->type
.t
== VT_DOUBLE
)
1795 /* generate a floating point operation 'v = t1 op t2' instruction. The
1796 two operands are guaranteed to have the same floating point type */
1797 void gen_opf(int op
)
1799 uint32_t x
, r
, r2
, c1
, c2
;
1800 //fputs("gen_opf\n",stderr);
1806 #if LDOUBLE_SIZE == 8
1807 if ((vtop
->type
.t
& VT_BTYPE
) != VT_FLOAT
)
1810 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1812 else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
)
1823 r
=fpr(gv(RC_FLOAT
));
1830 r2
=fpr(gv(RC_FLOAT
));
1839 r
=fpr(gv(RC_FLOAT
));
1841 } else if(c1
&& c1
<=0xf) {
1844 r
=fpr(gv(RC_FLOAT
));
1849 r
=fpr(gv(RC_FLOAT
));
1851 r2
=fpr(gv(RC_FLOAT
));
1860 r
=fpr(gv(RC_FLOAT
));
1865 r2
=fpr(gv(RC_FLOAT
));
1873 r
=fpr(gv(RC_FLOAT
));
1875 } else if(c1
&& c1
<=0xf) {
1878 r
=fpr(gv(RC_FLOAT
));
1883 r
=fpr(gv(RC_FLOAT
));
1885 r2
=fpr(gv(RC_FLOAT
));
1889 if(op
>= TOK_ULT
&& op
<= TOK_GT
) {
1890 x
|=0xd0f110; // cmfe
1891 /* bug (intention?) in Linux FPU emulator
1892 doesn't set carry if equal */
1898 tcc_error("unsigned comparison on floats?");
1904 op
=TOK_ULE
; /* correct in unordered case only if AC bit in FPSR set */
1908 x
&=~0x400000; // cmfe -> cmf
1930 r
=fpr(gv(RC_FLOAT
));
1937 r2
=fpr(gv(RC_FLOAT
));
1939 vtop
[-1].r
= VT_CMP
;
1942 tcc_error("unknown fp op %x!",op
);
1946 if(vtop
[-1].r
== VT_CMP
)
1952 vtop
[-1].r
=get_reg_ex(RC_FLOAT
,two2mask(vtop
[-1].r
,c1
));
1956 o(x
|(r
<<16)|(c1
<<12)|r2
);
1960 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1961 and 'long long' cases. */
1962 ST_FUNC
void gen_cvt_itof1(int t
)
1966 bt
=vtop
->type
.t
& VT_BTYPE
;
1967 if(bt
== VT_INT
|| bt
== VT_SHORT
|| bt
== VT_BYTE
) {
1973 r2
=vfpr(vtop
->r
=get_reg(RC_FLOAT
));
1974 o(0xEE000A10|(r
<<12)|(r2
<<16)); /* fmsr */
1976 if(!(vtop
->type
.t
& VT_UNSIGNED
))
1977 r2
|=0x80; /* fuitoX -> fsituX */
1978 o(0xEEB80A40|r2
|T2CPR(t
)); /* fYitoX*/
1980 r2
=fpr(vtop
->r
=get_reg(RC_FLOAT
));
1981 if((t
& VT_BTYPE
) != VT_FLOAT
)
1982 dsize
=0x80; /* flts -> fltd */
1983 o(0xEE000110|dsize
|(r2
<<16)|(r
<<12)); /* flts */
1984 if((vtop
->type
.t
& (VT_UNSIGNED
|VT_BTYPE
)) == (VT_UNSIGNED
|VT_INT
)) {
1986 o(0xE3500000|(r
<<12)); /* cmp */
1987 r
=fpr(get_reg(RC_FLOAT
));
1988 if(last_itod_magic
) {
1989 off
=ind
+8-last_itod_magic
;
1994 o(0xBD1F0100|(r
<<12)|off
); /* ldflts */
1996 o(0xEA000000); /* b */
1997 last_itod_magic
=ind
;
1998 o(0x4F800000); /* 4294967296.0f */
2000 o(0xBE000100|dsize
|(r2
<<16)|(r2
<<12)|r
); /* adflt */
2004 } else if(bt
== VT_LLONG
) {
2006 CType
*func_type
= 0;
2007 if((t
& VT_BTYPE
) == VT_FLOAT
) {
2008 func_type
= &func_float_type
;
2009 if(vtop
->type
.t
& VT_UNSIGNED
)
2010 func
=TOK___floatundisf
;
2012 func
=TOK___floatdisf
;
2013 #if LDOUBLE_SIZE != 8
2014 } else if((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2015 func_type
= &func_ldouble_type
;
2016 if(vtop
->type
.t
& VT_UNSIGNED
)
2017 func
=TOK___floatundixf
;
2019 func
=TOK___floatdixf
;
2020 } else if((t
& VT_BTYPE
) == VT_DOUBLE
) {
2022 } else if((t
& VT_BTYPE
) == VT_DOUBLE
|| (t
& VT_BTYPE
) == VT_LDOUBLE
) {
2024 func_type
= &func_double_type
;
2025 if(vtop
->type
.t
& VT_UNSIGNED
)
2026 func
=TOK___floatundidf
;
2028 func
=TOK___floatdidf
;
2031 vpush_global_sym(func_type
, func
);
2039 tcc_error("unimplemented gen_cvt_itof %x!",vtop
->type
.t
);
2042 /* convert fp to int 't' type */
2043 void gen_cvt_ftoi(int t
)
2049 r2
=vtop
->type
.t
& VT_BTYPE
;
2052 r
=vfpr(gv(RC_FLOAT
));
2054 o(0xEEBC0AC0|(r
<<12)|r
|T2CPR(r2
)|u
); /* ftoXizY */
2055 r2
=intr(vtop
->r
=get_reg(RC_INT
));
2056 o(0xEE100A10|(r
<<16)|(r2
<<12));
2061 func
=TOK___fixunssfsi
;
2062 #if LDOUBLE_SIZE != 8
2063 else if(r2
== VT_LDOUBLE
)
2064 func
=TOK___fixunsxfsi
;
2065 else if(r2
== VT_DOUBLE
)
2067 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
2069 func
=TOK___fixunsdfsi
;
2071 r
=fpr(gv(RC_FLOAT
));
2072 r2
=intr(vtop
->r
=get_reg(RC_INT
));
2073 o(0xEE100170|(r2
<<12)|r
);
2077 } else if(t
== VT_LLONG
) { // unsigned handled in gen_cvt_ftoi1
2080 #if LDOUBLE_SIZE != 8
2081 else if(r2
== VT_LDOUBLE
)
2083 else if(r2
== VT_DOUBLE
)
2085 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
2090 vpush_global_sym(&func_old_type
, func
);
2095 vtop
->r2
= REG_LRET
;
2099 tcc_error("unimplemented gen_cvt_ftoi!");
2102 /* convert from one floating point type to another */
2103 void gen_cvt_ftof(int t
)
2106 if(((vtop
->type
.t
& VT_BTYPE
) == VT_FLOAT
) != ((t
& VT_BTYPE
) == VT_FLOAT
)) {
2107 uint32_t r
= vfpr(gv(RC_FLOAT
));
2108 o(0xEEB70AC0|(r
<<12)|r
|T2CPR(vtop
->type
.t
));
2111 /* all we have to do on i386 and FPA ARM is to put the float in a register */
2116 /* computed goto support */
2123 /* Save the stack pointer onto the stack and return the location of its address */
2124 ST_FUNC
void gen_vla_sp_save(int addr
) {
2127 v
.r
= VT_LOCAL
| VT_LVAL
;
2132 /* Restore the SP from a location on the stack */
2133 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2136 v
.r
= VT_LOCAL
| VT_LVAL
;
2141 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2142 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2143 int r
= intr(gv(RC_INT
));
2144 o(0xE04D0000|(r
<<12)|r
); /* sub r, sp, r */
2152 if (align
& (align
- 1))
2153 tcc_error("alignment is not a power of 2: %i", align
);
2154 o(stuff_const(0xE3C0D000|(r
<<16), align
- 1)); /* bic sp, r, #align-1 */
2158 /* end of ARM code generator */
2159 /*************************************************************/
2161 /*************************************************************/