1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
3 ; RUN: llc < %s -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=WIN64
5 define i64 @double_to_i128(double %d) nounwind {
6 ; WIN64-LABEL: double_to_i128:
8 ; WIN64-NEXT: subq $40, %rsp
9 ; WIN64-NEXT: callq __fixdfti
10 ; WIN64-NEXT: movq %xmm0, %rax
11 ; WIN64-NEXT: addq $40, %rsp
13 %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %d, metadata !"fpexcept.strict")
14 %2 = trunc i128 %1 to i64
18 define i64 @double_to_ui128(double %d) nounwind {
19 ; WIN64-LABEL: double_to_ui128:
21 ; WIN64-NEXT: subq $40, %rsp
22 ; WIN64-NEXT: callq __fixunsdfti
23 ; WIN64-NEXT: movq %xmm0, %rax
24 ; WIN64-NEXT: addq $40, %rsp
26 %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %d, metadata !"fpexcept.strict")
27 %2 = trunc i128 %1 to i64
31 define i64 @float_to_i128(float %d) nounwind {
32 ; WIN64-LABEL: float_to_i128:
34 ; WIN64-NEXT: subq $40, %rsp
35 ; WIN64-NEXT: callq __fixsfti
36 ; WIN64-NEXT: movq %xmm0, %rax
37 ; WIN64-NEXT: addq $40, %rsp
39 %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f32(float %d, metadata !"fpexcept.strict")
40 %2 = trunc i128 %1 to i64
44 define i64 @float_to_ui128(float %d) nounwind {
45 ; WIN64-LABEL: float_to_ui128:
47 ; WIN64-NEXT: subq $40, %rsp
48 ; WIN64-NEXT: callq __fixunssfti
49 ; WIN64-NEXT: movq %xmm0, %rax
50 ; WIN64-NEXT: addq $40, %rsp
52 %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f32(float %d, metadata !"fpexcept.strict")
53 %2 = trunc i128 %1 to i64
57 define i64 @longdouble_to_i128(ptr nocapture readonly %0) nounwind {
58 ; WIN64-LABEL: longdouble_to_i128:
60 ; WIN64-NEXT: subq $56, %rsp
61 ; WIN64-NEXT: fldt (%rcx)
62 ; WIN64-NEXT: fstpt {{[0-9]+}}(%rsp)
63 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
64 ; WIN64-NEXT: callq __fixxfti
65 ; WIN64-NEXT: movq %xmm0, %rax
66 ; WIN64-NEXT: addq $56, %rsp
68 %2 = load x86_fp80, ptr %0, align 16
69 %3 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
70 %4 = trunc i128 %3 to i64
74 define i64 @longdouble_to_ui128(ptr nocapture readonly %0) nounwind {
75 ; WIN64-LABEL: longdouble_to_ui128:
77 ; WIN64-NEXT: subq $56, %rsp
78 ; WIN64-NEXT: fldt (%rcx)
79 ; WIN64-NEXT: fstpt {{[0-9]+}}(%rsp)
80 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
81 ; WIN64-NEXT: callq __fixunsxfti
82 ; WIN64-NEXT: movq %xmm0, %rax
83 ; WIN64-NEXT: addq $56, %rsp
85 %2 = load x86_fp80, ptr %0, align 16
86 %3 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
87 %4 = trunc i128 %3 to i64
91 define double @i128_to_double(ptr nocapture readonly %0) nounwind {
92 ; WIN64-LABEL: i128_to_double:
94 ; WIN64-NEXT: subq $56, %rsp
95 ; WIN64-NEXT: movaps (%rcx), %xmm0
96 ; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
97 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
98 ; WIN64-NEXT: callq __floattidf
99 ; WIN64-NEXT: addq $56, %rsp
101 %2 = load i128, ptr %0, align 16
102 %3 = tail call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
106 define double @ui128_to_double(ptr nocapture readonly %0) nounwind {
107 ; WIN64-LABEL: ui128_to_double:
109 ; WIN64-NEXT: subq $56, %rsp
110 ; WIN64-NEXT: movaps (%rcx), %xmm0
111 ; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
112 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
113 ; WIN64-NEXT: callq __floatuntidf
114 ; WIN64-NEXT: addq $56, %rsp
116 %2 = load i128, ptr %0, align 16
117 %3 = tail call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
121 define float @i128_to_float(ptr nocapture readonly %0) nounwind {
122 ; WIN64-LABEL: i128_to_float:
124 ; WIN64-NEXT: subq $56, %rsp
125 ; WIN64-NEXT: movaps (%rcx), %xmm0
126 ; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
127 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
128 ; WIN64-NEXT: callq __floattisf
129 ; WIN64-NEXT: addq $56, %rsp
131 %2 = load i128, ptr %0, align 16
132 %3 = tail call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
136 define float @ui128_to_float(ptr nocapture readonly %0) nounwind {
137 ; WIN64-LABEL: ui128_to_float:
139 ; WIN64-NEXT: subq $56, %rsp
140 ; WIN64-NEXT: movaps (%rcx), %xmm0
141 ; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
142 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
143 ; WIN64-NEXT: callq __floatuntisf
144 ; WIN64-NEXT: addq $56, %rsp
146 %2 = load i128, ptr %0, align 16
147 %3 = tail call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
151 define void @i128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind {
152 ; WIN64-LABEL: i128_to_longdouble:
154 ; WIN64-NEXT: pushq %rsi
155 ; WIN64-NEXT: subq $64, %rsp
156 ; WIN64-NEXT: movq %rcx, %rsi
157 ; WIN64-NEXT: movaps (%rdx), %xmm0
158 ; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
159 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
160 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
161 ; WIN64-NEXT: callq __floattixf
162 ; WIN64-NEXT: fldt {{[0-9]+}}(%rsp)
163 ; WIN64-NEXT: fstpt (%rsi)
164 ; WIN64-NEXT: movq %rsi, %rax
165 ; WIN64-NEXT: addq $64, %rsp
166 ; WIN64-NEXT: popq %rsi
168 %2 = load i128, ptr %0, align 16
169 %3 = tail call x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
170 store x86_fp80 %3, ptr %agg.result, align 16
174 define void @ui128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind {
175 ; WIN64-LABEL: ui128_to_longdouble:
177 ; WIN64-NEXT: pushq %rsi
178 ; WIN64-NEXT: subq $64, %rsp
179 ; WIN64-NEXT: movq %rcx, %rsi
180 ; WIN64-NEXT: movaps (%rdx), %xmm0
181 ; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
182 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
183 ; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
184 ; WIN64-NEXT: callq __floatuntixf
185 ; WIN64-NEXT: fldt {{[0-9]+}}(%rsp)
186 ; WIN64-NEXT: fstpt (%rsi)
187 ; WIN64-NEXT: movq %rsi, %rax
188 ; WIN64-NEXT: addq $64, %rsp
189 ; WIN64-NEXT: popq %rsi
191 %2 = load i128, ptr %0, align 16
192 %3 = tail call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
193 store x86_fp80 %3, ptr %agg.result, align 16
197 declare i128 @llvm.experimental.constrained.fptosi.i128.f64(double, metadata)
198 declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
199 declare i128 @llvm.experimental.constrained.fptosi.i128.f32(float, metadata)
200 declare i128 @llvm.experimental.constrained.fptoui.i128.f32(float, metadata)
201 declare i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80, metadata)
202 declare i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80, metadata)
203 declare double @llvm.experimental.constrained.sitofp.f64.i128(i128, metadata, metadata)
204 declare double @llvm.experimental.constrained.uitofp.f64.i128(i128, metadata, metadata)
205 declare float @llvm.experimental.constrained.sitofp.f32.i128(i128, metadata, metadata)
206 declare float @llvm.experimental.constrained.uitofp.f32.i128(i128, metadata, metadata)
207 declare x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128, metadata, metadata)
208 declare x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128, metadata, metadata)