2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
41 #define RC_XMM0 0x0020
42 #define RC_ST0 0x0040 /* only for long double */
43 #define RC_IRET RC_RAX /* function return: integer register */
44 #define RC_LRET RC_RDX /* function return: second integer register */
45 #define RC_FRET RC_XMM0 /* function return: float register */
47 /* pretty names for the registers */
66 #define REX_BASE(reg) (((reg) >> 3) & 1)
67 #define REG_VALUE(reg) ((reg) & 7)
69 /* return registers for function */
70 #define REG_IRET TREG_RAX /* single word int return register */
71 #define REG_LRET TREG_RDX /* second word return register (for long long) */
72 #define REG_FRET TREG_XMM0 /* float return register */
74 /* defined if function parameters must be evaluated in reverse order */
75 #define INVERT_FUNC_PARAMS
77 /* pointer size, in bytes */
80 /* long double size and alignment, in bytes */
81 #define LDOUBLE_SIZE 16
82 #define LDOUBLE_ALIGN 8
83 /* maximum alignment (for aligned attribute support) */
86 ST_FUNC
void gen_opl(int op
);
87 ST_FUNC
void gen_le64(int64_t c
);
89 /******************************************************/
92 #define EM_TCC_TARGET EM_X86_64
94 /* relocation type for 32 bit data relocation */
95 #define R_DATA_32 R_X86_64_32
96 #define R_DATA_PTR R_X86_64_64
97 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
98 #define R_COPY R_X86_64_COPY
100 #define ELF_START_ADDR 0x08048000
101 #define ELF_PAGE_SIZE 0x1000
103 /******************************************************/
104 #else /* ! TARGET_DEFS_ONLY */
105 /******************************************************/
109 ST_DATA
const int reg_classes
[] = {
110 /* eax */ RC_INT
| RC_RAX
,
111 /* ecx */ RC_INT
| RC_RCX
,
112 /* edx */ RC_INT
| RC_RDX
,
113 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
124 static unsigned long func_sub_sp_offset
;
125 static int func_ret_sub
;
127 /* XXX: make it faster ? */
132 if (ind1
> cur_text_section
->data_allocated
)
133 section_realloc(cur_text_section
, ind1
);
134 cur_text_section
->data
[ind
] = c
;
138 void o(unsigned int c
)
160 void gen_le64(int64_t c
)
172 void orex(int ll
, int r
, int r2
, int b
)
174 if ((r
& VT_VALMASK
) >= VT_CONST
)
176 if ((r2
& VT_VALMASK
) >= VT_CONST
)
178 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
179 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
183 /* output a symbol and patch all calls to it */
184 void gsym_addr(int t
, int a
)
188 ptr
= (int *)(cur_text_section
->data
+ t
);
189 n
= *ptr
; /* next value */
200 /* psym is used to put an instruction with a data field which is a
201 reference to a symbol. It is in fact the same as oad ! */
204 static int is64_type(int t
)
206 return ((t
& VT_BTYPE
) == VT_PTR
||
207 (t
& VT_BTYPE
) == VT_FUNC
||
208 (t
& VT_BTYPE
) == VT_LLONG
);
211 static int is_sse_float(int t
) {
214 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
218 /* instruction + 4 bytes data. Return the address of the data */
219 ST_FUNC
int oad(int c
, int s
)
225 if (ind1
> cur_text_section
->data_allocated
)
226 section_realloc(cur_text_section
, ind1
);
227 *(int *)(cur_text_section
->data
+ ind
) = s
;
233 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
236 greloc(cur_text_section
, sym
, ind
, R_X86_64_32
);
240 /* output constant with relocation if 'r & VT_SYM' is true */
241 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
244 greloc(cur_text_section
, sym
, ind
, R_X86_64_64
);
248 /* output constant with relocation if 'r & VT_SYM' is true */
249 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
252 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
256 /* output got address with relocation */
257 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
259 #ifndef TCC_TARGET_PE
262 greloc(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
);
263 sr
= cur_text_section
->reloc
;
264 rel
= (ElfW(Rela
) *)(sr
->data
+ sr
->data_offset
- sizeof(ElfW(Rela
)));
267 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym
->v
, NULL
), c
, r
,
268 cur_text_section
->data
[ind
-3],
269 cur_text_section
->data
[ind
-2],
270 cur_text_section
->data
[ind
-1]
272 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
276 /* we use add c, %xxx for displacement */
278 o(0xc0 + REG_VALUE(r
));
283 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
285 op_reg
= REG_VALUE(op_reg
) << 3;
286 if ((r
& VT_VALMASK
) == VT_CONST
) {
287 /* constant memory reference */
290 gen_gotpcrel(r
, sym
, c
);
292 gen_addrpc32(r
, sym
, c
);
294 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
295 /* currently, we use only ebp as base */
297 /* short reference */
301 oad(0x85 | op_reg
, c
);
303 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
305 g(0x80 | op_reg
| REG_VALUE(r
));
308 g(0x00 | op_reg
| REG_VALUE(r
));
311 g(0x00 | op_reg
| REG_VALUE(r
));
315 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
317 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
319 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
322 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
324 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
327 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
328 orex(1, r
, op_reg
, opcode
);
329 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
333 /* load 'r' from value 'sv' */
334 void load(int r
, SValue
*sv
)
336 int v
, t
, ft
, fc
, fr
;
341 sv
= pe_getimport(sv
, &v2
);
348 #ifndef TCC_TARGET_PE
349 /* we use indirect access via got */
350 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
351 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
352 /* use the result register as a temporal register */
353 int tr
= r
| TREG_MEM
;
355 /* we cannot use float registers as a temporal register */
356 tr
= get_reg(RC_INT
) | TREG_MEM
;
358 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
360 /* load from the temporal register */
368 if (v
== VT_LLOCAL
) {
370 v1
.r
= VT_LOCAL
| VT_LVAL
;
373 if (!(reg_classes
[fr
] & RC_INT
))
374 fr
= get_reg(RC_INT
);
378 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
379 b
= 0x6e0f66, r
= 0; /* movd */
380 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
381 b
= 0x7e0ff3, r
= 0; /* movq */
382 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
383 b
= 0xdb, r
= 5; /* fldt */
384 } else if ((ft
& VT_TYPE
) == VT_BYTE
) {
385 b
= 0xbe0f; /* movsbl */
386 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
387 b
= 0xb60f; /* movzbl */
388 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
389 b
= 0xbf0f; /* movswl */
390 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
391 b
= 0xb70f; /* movzwl */
397 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
400 gen_modrm(r
, fr
, sv
->sym
, fc
);
407 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
408 gen_addrpc32(fr
, sv
->sym
, fc
);
410 if (sv
->sym
->type
.t
& VT_STATIC
) {
412 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
413 gen_addrpc32(fr
, sv
->sym
, fc
);
416 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
417 gen_gotpcrel(r
, sv
->sym
, fc
);
420 } else if (is64_type(ft
)) {
421 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
424 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
427 } else if (v
== VT_LOCAL
) {
428 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
429 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
430 } else if (v
== VT_CMP
) {
432 if ((fc
& ~0x100) != TOK_NE
)
433 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
435 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
438 /* This was a float compare. If the parity bit is
439 set the result was unordered, meaning false for everything
440 except TOK_NE, and true for TOK_NE. */
442 o(0x037a + (REX_BASE(r
) << 8));
444 orex(0,r
,0, 0x0f); /* setxx %br */
446 o(0xc0 + REG_VALUE(r
));
447 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
450 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
451 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
454 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
456 if (r
== TREG_XMM0
) {
457 assert(v
== TREG_ST0
);
458 /* gen_cvt_ftof(VT_DOUBLE); */
459 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
460 /* movsd -0x10(%rsp),%xmm0 */
463 } else if (r
== TREG_ST0
) {
464 assert(v
== TREG_XMM0
);
465 /* gen_cvt_ftof(VT_LDOUBLE); */
466 /* movsd %xmm0,-0x10(%rsp) */
469 o(0xf02444dd); /* fldl -0x10(%rsp) */
472 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
478 /* store register 'r' in lvalue 'v' */
479 void store(int r
, SValue
*v
)
483 /* store the REX prefix in this variable when PIC is enabled */
488 v
= pe_getimport(v
, &v2
);
493 fr
= v
->r
& VT_VALMASK
;
496 #ifndef TCC_TARGET_PE
497 /* we need to access the variable via got */
498 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
499 /* mov xx(%rip), %r11 */
501 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.ul
);
502 pic
= is64_type(bt
) ? 0x49 : 0x41;
506 /* XXX: incorrect if float reg to reg */
507 if (bt
== VT_FLOAT
) {
510 o(0x7e0f); /* movd */
512 } else if (bt
== VT_DOUBLE
) {
515 o(0xd60f); /* movq */
517 } else if (bt
== VT_LDOUBLE
) {
518 o(0xc0d9); /* fld %st(0) */
526 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
528 else if (is64_type(bt
))
534 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
539 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
540 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
541 } else if (fr
!= r
) {
542 /* XXX: don't we really come here? */
544 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
547 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
548 gen_modrm(r
, v
->r
, v
->sym
, fc
);
549 } else if (fr
!= r
) {
550 /* XXX: don't we really come here? */
552 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
557 /* 'is_jmp' is '1' if it is a jump */
558 static void gcall_or_jmp(int is_jmp
)
561 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
563 if (vtop
->r
& VT_SYM
) {
564 /* relocation case */
565 greloc(cur_text_section
, vtop
->sym
,
566 ind
+ 1, R_X86_64_PC32
);
568 /* put an empty PC32 relocation */
569 put_elf_reloc(symtab_section
, cur_text_section
,
570 ind
+ 1, R_X86_64_PC32
, 0);
572 oad(0xe8 + is_jmp
, vtop
->c
.ul
- 4); /* call/jmp im */
574 /* otherwise, indirect call */
578 o(0xff); /* call/jmp *r */
579 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
586 static const uint8_t arg_regs
[] = {
587 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
590 static int func_scratch
;
592 /* Generate function call. The function address is pushed first, then
593 all the parameters in call order. This functions pops all the
594 parameters and the function address. */
596 void gen_offs_sp(int b
, int r
, int d
)
598 orex(1,0,r
& 0x100 ? 0 : r
, b
);
600 o(0x2444 | (REG_VALUE(r
) << 3));
603 o(0x2484 | (REG_VALUE(r
) << 3));
608 void gfunc_call(int nb_args
)
610 int size
, align
, r
, args_size
, i
, d
, j
, bt
, struct_size
;
611 int nb_reg_args
, gen_reg
;
613 nb_reg_args
= nb_args
;
614 args_size
= (nb_reg_args
< REGN
? REGN
: nb_reg_args
) * PTR_SIZE
;
616 /* for struct arguments, we need to call memcpy and the function
617 call breaks register passing arguments we are preparing.
618 So, we process arguments which will be passed by stack first. */
619 struct_size
= args_size
;
620 for(i
= 0; i
< nb_args
; i
++) {
621 SValue
*sv
= &vtop
[-i
];
622 bt
= (sv
->type
.t
& VT_BTYPE
);
623 if (bt
== VT_STRUCT
) {
624 size
= type_size(&sv
->type
, &align
);
625 /* align to stack align size */
626 size
= (size
+ 15) & ~15;
627 /* generate structure store */
629 gen_offs_sp(0x8d, r
, struct_size
);
632 /* generate memcpy call */
633 vset(&sv
->type
, r
| VT_LVAL
, 0);
638 } else if (bt
== VT_LDOUBLE
) {
641 gen_offs_sp(0xdb, 0x107, struct_size
);
647 if (func_scratch
< struct_size
)
648 func_scratch
= struct_size
;
650 for (i
= 0; i
< REGN
; ++i
)
651 save_reg(arg_regs
[i
]);
654 gen_reg
= nb_reg_args
;
655 struct_size
= args_size
;
657 for(i
= 0; i
< nb_args
; i
++) {
658 bt
= (vtop
->type
.t
& VT_BTYPE
);
660 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
661 if (bt
== VT_LDOUBLE
)
664 size
= type_size(&vtop
->type
, &align
);
665 /* align to stack align size */
666 size
= (size
+ 15) & ~15;
670 gen_offs_sp(0x8d, d
, struct_size
);
671 gen_offs_sp(0x89, d
, j
*8);
674 gen_offs_sp(0x8d, d
, struct_size
);
678 } else if (is_sse_float(vtop
->type
.t
)) {
679 gv(RC_FLOAT
); /* only one float register */
682 /* movq %xmm0, j*8(%rsp) */
683 gen_offs_sp(0xd60f66, 0x100, j
*8);
685 /* movaps %xmm0, %xmmN */
689 /* mov %xmm0, %rxx */
692 o(0xc0 + REG_VALUE(d
));
698 gen_offs_sp(0x89, r
, j
*8);
702 gv(reg_classes
[d
] & ~RC_INT
);
707 o(0xc0 + REG_VALUE(d
) + REG_VALUE(r
) * 8);
721 #define FUNC_PROLOG_SIZE 11
723 /* generate function prolog of type 't' */
724 void gfunc_prolog(CType
*func_type
)
726 int addr
, reg_param_index
, bt
;
735 ind
+= FUNC_PROLOG_SIZE
;
736 func_sub_sp_offset
= ind
;
739 sym
= func_type
->ref
;
741 /* if the function returns a structure, then add an
742 implicit pointer parameter */
744 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
745 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
750 /* define parameters */
751 while ((sym
= sym
->next
) != NULL
) {
753 bt
= type
->t
& VT_BTYPE
;
754 if (reg_param_index
< REGN
) {
755 /* save arguments passed by register */
756 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
758 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
759 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
| VT_REF
, addr
);
761 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
767 while (reg_param_index
< REGN
) {
768 if (func_type
->ref
->c
== FUNC_ELLIPSIS
)
769 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
775 /* generate function epilog */
776 void gfunc_epilog(void)
781 if (func_ret_sub
== 0) {
786 g(func_ret_sub
>> 8);
790 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
791 /* align local size to word & save local variables */
792 v
= (func_scratch
+ -loc
+ 15) & -16;
795 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
796 oad(0xb8, v
); /* mov stacksize, %eax */
797 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
798 greloc(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
);
799 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
801 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
802 o(0xec8148); /* sub rsp, stacksize */
806 cur_text_section
->data_offset
= saved_ind
;
807 pe_add_unwind_data(ind
, saved_ind
, v
);
808 ind
= cur_text_section
->data_offset
;
813 static void gadd_sp(int val
)
815 if (val
== (char)val
) {
819 oad(0xc48148, val
); /* add $xxx, %rsp */
824 static const uint8_t arg_regs
[REGN
] = {
825 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
828 /* Generate function call. The function address is pushed first, then
829 all the parameters in call order. This functions pops all the
830 parameters and the function address. */
831 void gfunc_call(int nb_args
)
833 int size
, align
, r
, args_size
, i
;
836 int sse_reg
, gen_reg
;
838 /* calculate the number of integer/float arguments */
840 for(i
= 0; i
< nb_args
; i
++) {
841 if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_STRUCT
) {
842 args_size
+= type_size(&vtop
[-i
].type
, &align
);
843 args_size
= (args_size
+ 7) & ~7;
844 } else if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
846 } else if (is_sse_float(vtop
[-i
].type
.t
)) {
848 if (nb_sse_args
> 8) args_size
+= 8;
851 if (nb_reg_args
> REGN
) args_size
+= 8;
855 /* for struct arguments, we need to call memcpy and the function
856 call breaks register passing arguments we are preparing.
857 So, we process arguments which will be passed by stack first. */
858 gen_reg
= nb_reg_args
;
859 sse_reg
= nb_sse_args
;
861 /* adjust stack to align SSE boundary */
862 if (args_size
&= 15) {
863 /* fetch cpu flag before the following sub will change the value */
864 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
867 args_size
= 16 - args_size
;
869 oad(0xec81, args_size
); /* sub $xxx, %rsp */
872 for(i
= 0; i
< nb_args
; i
++) {
873 /* Swap argument to top, it will possibly be changed here,
874 and might use more temps. All arguments must remain on the
875 stack, so that get_reg can correctly evict some of them onto
876 stack. We could use also use a vrott(nb_args) at the end
877 of this loop, but this seems faster. */
878 SValue tmp
= vtop
[0];
881 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
882 size
= type_size(&vtop
->type
, &align
);
883 /* align to stack align size */
884 size
= (size
+ 7) & ~7;
885 /* allocate the necessary size on stack */
887 oad(0xec81, size
); /* sub $xxx, %rsp */
888 /* generate structure store */
890 orex(1, r
, 0, 0x89); /* mov %rsp, r */
891 o(0xe0 + REG_VALUE(r
));
892 vset(&vtop
->type
, r
| VT_LVAL
, 0);
896 } else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
899 oad(0xec8148, size
); /* sub $xxx, %rsp */
900 o(0x7cdb); /* fstpt 0(%rsp) */
904 } else if (is_sse_float(vtop
->type
.t
)) {
908 o(0x50); /* push $rax */
909 /* movq %xmm0, (%rsp) */
917 /* XXX: implicit cast ? */
920 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
925 /* And swap the argument back to it's original position. */
931 /* XXX This should be superfluous. */
932 save_regs(0); /* save used temporary registers */
934 /* then, we prepare register passing arguments.
935 Note that we cannot set RDX and RCX in this loop because gv()
936 may break these temporary registers. Let's use R10 and R11
938 gen_reg
= nb_reg_args
;
939 sse_reg
= nb_sse_args
;
940 for(i
= 0; i
< nb_args
; i
++) {
941 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
||
942 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
943 } else if (is_sse_float(vtop
->type
.t
)) {
946 gv(RC_FLOAT
); /* only one float register */
947 /* movaps %xmm0, %xmmN */
949 o(0xc0 + (sse_reg
<< 3));
954 /* XXX: implicit cast ? */
958 if (j
== 2 || j
== 3)
959 /* j=2: r10, j=3: r11 */
961 orex(1,d
,r
,0x89); /* mov */
962 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
968 /* We shouldn't have many operands on the stack anymore, but the
969 call address itself is still there, and it might be in %eax
970 (or edx/ecx) currently, which the below writes would clobber.
971 So evict all remaining operands here. */
974 /* Copy R10 and R11 into RDX and RCX, respectively */
975 if (nb_reg_args
> 2) {
976 o(0xd2894c); /* mov %r10, %rdx */
977 if (nb_reg_args
> 3) {
978 o(0xd9894c); /* mov %r11, %rcx */
982 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
990 #define FUNC_PROLOG_SIZE 11
992 static void push_arg_reg(int i
) {
994 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
997 /* generate function prolog of type 't' */
998 void gfunc_prolog(CType
*func_type
)
1000 int i
, addr
, align
, size
;
1001 int param_index
, param_addr
, reg_param_index
, sse_param_index
;
1005 sym
= func_type
->ref
;
1006 addr
= PTR_SIZE
* 2;
1008 ind
+= FUNC_PROLOG_SIZE
;
1009 func_sub_sp_offset
= ind
;
1012 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1013 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1014 seen_reg_num
= seen_sse_num
= 0;
1015 /* frame pointer and return address */
1016 seen_stack_size
= PTR_SIZE
* 2;
1017 /* count the number of seen parameters */
1018 sym
= func_type
->ref
;
1019 while ((sym
= sym
->next
) != NULL
) {
1021 if (is_sse_float(type
->t
)) {
1022 if (seen_sse_num
< 8) {
1025 seen_stack_size
+= 8;
1027 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
1028 size
= type_size(type
, &align
);
1029 size
= (size
+ 7) & ~7;
1030 seen_stack_size
+= size
;
1031 } else if ((type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1032 seen_stack_size
+= LDOUBLE_SIZE
;
1034 if (seen_reg_num
< REGN
) {
1037 seen_stack_size
+= 8;
1043 /* movl $0x????????, -0x10(%rbp) */
1045 gen_le32(seen_reg_num
* 8);
1046 /* movl $0x????????, -0xc(%rbp) */
1048 gen_le32(seen_sse_num
* 16 + 48);
1049 /* movl $0x????????, -0x8(%rbp) */
1051 gen_le32(seen_stack_size
);
1053 /* save all register passing arguments */
1054 for (i
= 0; i
< 8; i
++) {
1056 o(0xd60f66); /* movq */
1057 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1058 /* movq $0, loc+8(%rbp) */
1063 for (i
= 0; i
< REGN
; i
++) {
1064 push_arg_reg(REGN
-1-i
);
1068 sym
= func_type
->ref
;
1070 reg_param_index
= 0;
1071 sse_param_index
= 0;
1073 /* if the function returns a structure, then add an
1074 implicit pointer parameter */
1075 func_vt
= sym
->type
;
1076 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
1077 push_arg_reg(reg_param_index
);
1084 /* define parameters */
1085 while ((sym
= sym
->next
) != NULL
) {
1087 size
= type_size(type
, &align
);
1088 size
= (size
+ 7) & ~7;
1089 if (is_sse_float(type
->t
)) {
1090 if (sse_param_index
< 8) {
1091 /* save arguments passed by register */
1093 o(0xd60f66); /* movq */
1094 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, loc
);
1102 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
||
1103 (type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1107 if (reg_param_index
< REGN
) {
1108 /* save arguments passed by register */
1109 push_arg_reg(reg_param_index
);
1117 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1118 VT_LOCAL
| VT_LVAL
, param_addr
);
1123 /* generate function epilog */
1124 void gfunc_epilog(void)
1128 o(0xc9); /* leave */
1129 if (func_ret_sub
== 0) {
1132 o(0xc2); /* ret n */
1134 g(func_ret_sub
>> 8);
1136 /* align local size to word & save local variables */
1137 v
= (-loc
+ 15) & -16;
1139 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1140 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1141 o(0xec8148); /* sub rsp, stacksize */
1148 /* generate a jump to a label */
1151 return psym(0xe9, t
);
1154 /* generate a jump to a fixed address */
1155 void gjmp_addr(int a
)
1163 oad(0xe9, a
- ind
- 5);
1167 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1168 int gtst(int inv
, int t
)
1172 v
= vtop
->r
& VT_VALMASK
;
1174 /* fast case : can jump directly since flags are set */
1175 if (vtop
->c
.i
& 0x100)
1177 /* This was a float compare. If the parity flag is set
1178 the result was unordered. For anything except != this
1179 means false and we don't jump (anding both conditions).
1180 For != this means true (oring both).
1181 Take care about inverting the test. We need to jump
1182 to our target if the result was unordered and test wasn't NE,
1183 otherwise if unordered we don't want to jump. */
1184 vtop
->c
.i
&= ~0x100;
1185 if (!inv
== (vtop
->c
.i
!= TOK_NE
))
1186 o(0x067a); /* jp +6 */
1190 t
= psym(0x8a, t
); /* jp t */
1194 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1195 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1196 /* && or || optimization */
1197 if ((v
& 1) == inv
) {
1198 /* insert vtop->c jump list in t */
1201 p
= (int *)(cur_text_section
->data
+ *p
);
1209 if (is_float(vtop
->type
.t
) ||
1210 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1214 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1215 /* constant jmp optimization */
1216 if ((vtop
->c
.i
!= 0) != inv
)
1221 o(0xc0 + REG_VALUE(v
) * 9);
1223 t
= psym(0x85 ^ inv
, t
);
1230 /* generate an integer binary operation */
1231 void gen_opi(int op
)
1236 ll
= is64_type(vtop
[-1].type
.t
);
1237 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1238 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1242 case TOK_ADDC1
: /* add with carry generation */
1245 if (cc
&& (!ll
|| (int)vtop
->c
.ll
== vtop
->c
.ll
)) {
1252 /* XXX: generate inc and dec for smaller code ? */
1253 orex(ll
, r
, 0, 0x83);
1254 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1257 orex(ll
, r
, 0, 0x81);
1258 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1261 gv2(RC_INT
, RC_INT
);
1264 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1265 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1268 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1274 case TOK_SUBC1
: /* sub with carry generation */
1277 case TOK_ADDC2
: /* add with carry use */
1280 case TOK_SUBC2
: /* sub with carry use */
1293 gv2(RC_INT
, RC_INT
);
1296 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1297 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1309 opc
= 0xc0 | (opc
<< 3);
1315 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1316 o(opc
| REG_VALUE(r
));
1317 g(vtop
->c
.i
& (ll
? 63 : 31));
1319 /* we generate the shift in ecx */
1320 gv2(RC_INT
, RC_RCX
);
1322 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1323 o(opc
| REG_VALUE(r
));
1336 /* first operand must be in eax */
1337 /* XXX: need better constraint for second operand */
1338 gv2(RC_RAX
, RC_RCX
);
1343 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1344 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1345 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1346 if (op
== '%' || op
== TOK_UMOD
)
1358 void gen_opl(int op
)
1363 /* generate a floating point operation 'v = t1 op t2' instruction. The
1364 two operands are guaranted to have the same floating point type */
1365 /* XXX: need to use ST1 too */
1366 void gen_opf(int op
)
1368 int a
, ft
, fc
, swapped
, r
;
1370 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1372 /* convert constants to memory references */
1373 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1378 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1381 /* must put at least one value in the floating point register */
1382 if ((vtop
[-1].r
& VT_LVAL
) &&
1383 (vtop
[0].r
& VT_LVAL
)) {
1389 /* swap the stack if needed so that t1 is the register and t2 is
1390 the memory reference */
1391 if (vtop
[-1].r
& VT_LVAL
) {
1395 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1396 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1397 /* load on stack second operand */
1398 load(TREG_ST0
, vtop
);
1399 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1400 if (op
== TOK_GE
|| op
== TOK_GT
)
1402 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1405 o(0xc9d9); /* fxch %st(1) */
1406 o(0xe9da); /* fucompp */
1407 o(0xe0df); /* fnstsw %ax */
1409 o(0x45e480); /* and $0x45, %ah */
1410 o(0x40fC80); /* cmp $0x40, %ah */
1411 } else if (op
== TOK_NE
) {
1412 o(0x45e480); /* and $0x45, %ah */
1413 o(0x40f480); /* xor $0x40, %ah */
1415 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1416 o(0x05c4f6); /* test $0x05, %ah */
1419 o(0x45c4f6); /* test $0x45, %ah */
1426 /* no memory reference possible for long double operations */
1427 load(TREG_ST0
, vtop
);
1451 o(0xde); /* fxxxp %st, %st(1) */
1456 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1457 /* if saved lvalue, then we must reload it */
1460 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1462 r
= get_reg(RC_INT
);
1464 v1
.r
= VT_LOCAL
| VT_LVAL
;
1470 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1473 if (op
== TOK_LE
|| op
== TOK_LT
)
1475 if (op
== TOK_LE
|| op
== TOK_GE
) {
1476 op
= 0x93; /* setae */
1478 op
= 0x97; /* seta */
1483 o(0x7e0ff3); /* movq */
1484 gen_modrm(1, r
, vtop
->sym
, fc
);
1486 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1489 o(0x2e0f); /* ucomisd %xmm0, %xmm1 */
1492 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1495 o(0x2e0f); /* ucomisd */
1496 gen_modrm(0, r
, vtop
->sym
, fc
);
1501 vtop
->c
.i
= op
| 0x100;
1503 /* no memory reference possible for long double operations */
1504 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1505 load(TREG_XMM0
, vtop
);
1525 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
1526 o(0xde); /* fxxxp %st, %st(1) */
1529 /* if saved lvalue, then we must reload it */
1531 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1533 r
= get_reg(RC_INT
);
1535 v1
.r
= VT_LOCAL
| VT_LVAL
;
1541 /* movq %xmm0,%xmm1 */
1544 load(TREG_XMM0
, vtop
);
1545 /* subsd %xmm1,%xmm0 (f2 0f 5c c1) */
1546 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1555 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1562 gen_modrm(0, r
, vtop
->sym
, fc
);
1570 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1571 and 'long long' cases. */
1572 void gen_cvt_itof(int t
)
1574 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1577 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1578 /* signed long long to float/double/long double (unsigned case
1579 is handled generically) */
1580 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1581 o(0x242cdf); /* fildll (%rsp) */
1582 o(0x08c48348); /* add $8, %rsp */
1583 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1584 (VT_INT
| VT_UNSIGNED
)) {
1585 /* unsigned int to float/double/long double */
1586 o(0x6a); /* push $0 */
1588 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1589 o(0x242cdf); /* fildll (%rsp) */
1590 o(0x10c48348); /* add $16, %rsp */
1592 /* int to float/double/long double */
1593 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1594 o(0x2404db); /* fildl (%rsp) */
1595 o(0x08c48348); /* add $8, %rsp */
1599 save_reg(TREG_XMM0
);
1601 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
));
1602 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1603 (VT_INT
| VT_UNSIGNED
) ||
1604 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1608 o(0xc0 + (vtop
->r
& VT_VALMASK
)); /* cvtsi2sd */
1609 vtop
->r
= TREG_XMM0
;
1613 /* convert from one floating point type to another */
1614 void gen_cvt_ftof(int t
)
1622 if (bt
== VT_FLOAT
) {
1624 if (tbt
== VT_DOUBLE
) {
1625 o(0xc0140f); /* unpcklps */
1626 o(0xc05a0f); /* cvtps2pd */
1627 } else if (tbt
== VT_LDOUBLE
) {
1628 /* movss %xmm0,-0x10(%rsp) */
1631 o(0xf02444d9); /* flds -0x10(%rsp) */
1634 } else if (bt
== VT_DOUBLE
) {
1636 if (tbt
== VT_FLOAT
) {
1637 o(0xc0140f66); /* unpcklpd */
1638 o(0xc05a0f66); /* cvtpd2ps */
1639 } else if (tbt
== VT_LDOUBLE
) {
1640 /* movsd %xmm0,-0x10(%rsp) */
1643 o(0xf02444dd); /* fldl -0x10(%rsp) */
1648 if (tbt
== VT_DOUBLE
) {
1649 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
1650 /* movsd -0x10(%rsp),%xmm0 */
1653 vtop
->r
= TREG_XMM0
;
1654 } else if (tbt
== VT_FLOAT
) {
1655 o(0xf0245cd9); /* fstps -0x10(%rsp) */
1656 /* movss -0x10(%rsp),%xmm0 */
1659 vtop
->r
= TREG_XMM0
;
1664 /* convert fp to int 't' type */
1665 void gen_cvt_ftoi(int t
)
1667 int ft
, bt
, size
, r
;
1670 if (bt
== VT_LDOUBLE
) {
1671 gen_cvt_ftof(VT_DOUBLE
);
1681 r
= get_reg(RC_INT
);
1682 if (bt
== VT_FLOAT
) {
1684 } else if (bt
== VT_DOUBLE
) {
1689 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
1690 o(0xc0 + (REG_VALUE(r
) << 3));
1694 /* computed goto support */
1701 /* end of x86-64 code generator */
1702 /*************************************************************/
1703 #endif /* ! TARGET_DEFS_ONLY */
1704 /******************************************************/