1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 3
2 // RUN: %clang_cc1 -triple riscv32 -emit-llvm %s -o - | FileCheck --check-prefix=RV32 %s
3 // RUN: %clang_cc1 -triple riscv64 -emit-llvm %s -o - | FileCheck --check-prefix=RV64 %s
7 long double ceill(long double);
8 float copysignf(float, float);
9 double copysign(double, double);
10 long double copysignl(long double, long double);
13 long double cosl(long double);
16 long double expl(long double);
19 long double exp2l(long double);
22 long double fabsl(long double);
25 long double floorl(long double);
26 float fmaxf(float, float);
27 double fmax(double, double);
28 long double fmaxl(long double, long double);
29 float fminf(float, float);
30 double fmin(double, double);
31 long double fminl(long double, long double);
32 float fmodf(float, float);
33 double fmod(double, double);
34 long double fmodl(long double, long double);
37 long double logl(long double);
40 long double log10l(long double);
43 long double log2l(long double);
44 float nearbyintf(float);
45 double nearbyint(double);
46 long double nearbyintl(long double);
47 float powf(float, float);
48 double pow(double, double);
49 long double powl(long double, long double);
52 long double rintl(long double);
55 long lrintl(long double);
56 long long llrintf(float);
57 long long llrint(double);
58 long long llrintl(long double);
61 long double roundl(long double);
64 long lroundl(long double);
65 long long llroundf(float);
66 long long llround(double);
67 long long llroundl(long double);
68 float roundevenf(float);
69 double roundeven(double);
70 long double roundevenl(long double);
73 long double sinl(long double);
76 long double sqrtl(long double);
79 long double truncl(long double);
81 // RV32-LABEL: define dso_local void @test(
82 // RV32-SAME: float noundef [[FARG:%.*]], double noundef [[DARG:%.*]], fp128 noundef [[LDARG:%.*]]) #[[ATTR0:[0-9]+]] {
84 // RV32-NEXT: [[FARG_ADDR:%.*]] = alloca float, align 4
85 // RV32-NEXT: [[DARG_ADDR:%.*]] = alloca double, align 8
86 // RV32-NEXT: [[LDARG_ADDR:%.*]] = alloca fp128, align 16
87 // RV32-NEXT: store float [[FARG]], ptr [[FARG_ADDR]], align 4
88 // RV32-NEXT: store double [[DARG]], ptr [[DARG_ADDR]], align 8
89 // RV32-NEXT: store fp128 [[LDARG]], ptr [[LDARG_ADDR]], align 16
90 // RV32-NEXT: [[TMP0:%.*]] = load float, ptr [[FARG_ADDR]], align 4
91 // RV32-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[TMP0]])
92 // RV32-NEXT: [[TMP2:%.*]] = load double, ptr [[DARG_ADDR]], align 8
93 // RV32-NEXT: [[TMP3:%.*]] = call double @llvm.ceil.f64(double [[TMP2]])
94 // RV32-NEXT: [[TMP4:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
95 // RV32-NEXT: [[TMP5:%.*]] = call fp128 @llvm.ceil.f128(fp128 [[TMP4]])
96 // RV32-NEXT: [[TMP6:%.*]] = load float, ptr [[FARG_ADDR]], align 4
97 // RV32-NEXT: [[TMP7:%.*]] = load float, ptr [[FARG_ADDR]], align 4
98 // RV32-NEXT: [[TMP8:%.*]] = call float @llvm.copysign.f32(float [[TMP6]], float [[TMP7]])
99 // RV32-NEXT: [[TMP9:%.*]] = load double, ptr [[DARG_ADDR]], align 8
100 // RV32-NEXT: [[TMP10:%.*]] = load double, ptr [[DARG_ADDR]], align 8
101 // RV32-NEXT: [[TMP11:%.*]] = call double @llvm.copysign.f64(double [[TMP9]], double [[TMP10]])
102 // RV32-NEXT: [[TMP12:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
103 // RV32-NEXT: [[TMP13:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
104 // RV32-NEXT: [[TMP14:%.*]] = call fp128 @llvm.copysign.f128(fp128 [[TMP12]], fp128 [[TMP13]])
105 // RV32-NEXT: [[TMP15:%.*]] = load float, ptr [[FARG_ADDR]], align 4
106 // RV32-NEXT: [[TMP16:%.*]] = call float @llvm.cos.f32(float [[TMP15]])
107 // RV32-NEXT: [[TMP17:%.*]] = load double, ptr [[DARG_ADDR]], align 8
108 // RV32-NEXT: [[TMP18:%.*]] = call double @llvm.cos.f64(double [[TMP17]])
109 // RV32-NEXT: [[TMP19:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
110 // RV32-NEXT: [[TMP20:%.*]] = call fp128 @llvm.cos.f128(fp128 [[TMP19]])
111 // RV32-NEXT: [[TMP21:%.*]] = load float, ptr [[FARG_ADDR]], align 4
112 // RV32-NEXT: [[TMP22:%.*]] = call float @llvm.exp.f32(float [[TMP21]])
113 // RV32-NEXT: [[TMP23:%.*]] = load double, ptr [[DARG_ADDR]], align 8
114 // RV32-NEXT: [[TMP24:%.*]] = call double @llvm.exp.f64(double [[TMP23]])
115 // RV32-NEXT: [[TMP25:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
116 // RV32-NEXT: [[TMP26:%.*]] = call fp128 @llvm.exp.f128(fp128 [[TMP25]])
117 // RV32-NEXT: [[TMP27:%.*]] = load float, ptr [[FARG_ADDR]], align 4
118 // RV32-NEXT: [[TMP28:%.*]] = call float @llvm.exp2.f32(float [[TMP27]])
119 // RV32-NEXT: [[TMP29:%.*]] = load double, ptr [[DARG_ADDR]], align 8
120 // RV32-NEXT: [[TMP30:%.*]] = call double @llvm.exp2.f64(double [[TMP29]])
121 // RV32-NEXT: [[TMP31:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
122 // RV32-NEXT: [[TMP32:%.*]] = call fp128 @llvm.exp2.f128(fp128 [[TMP31]])
123 // RV32-NEXT: [[TMP33:%.*]] = load float, ptr [[FARG_ADDR]], align 4
124 // RV32-NEXT: [[TMP34:%.*]] = call float @llvm.fabs.f32(float [[TMP33]])
125 // RV32-NEXT: [[TMP35:%.*]] = load double, ptr [[DARG_ADDR]], align 8
126 // RV32-NEXT: [[TMP36:%.*]] = call double @llvm.fabs.f64(double [[TMP35]])
127 // RV32-NEXT: [[TMP37:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
128 // RV32-NEXT: [[TMP38:%.*]] = call fp128 @llvm.fabs.f128(fp128 [[TMP37]])
129 // RV32-NEXT: [[TMP39:%.*]] = load float, ptr [[FARG_ADDR]], align 4
130 // RV32-NEXT: [[TMP40:%.*]] = call float @llvm.floor.f32(float [[TMP39]])
131 // RV32-NEXT: [[TMP41:%.*]] = load double, ptr [[DARG_ADDR]], align 8
132 // RV32-NEXT: [[TMP42:%.*]] = call double @llvm.floor.f64(double [[TMP41]])
133 // RV32-NEXT: [[TMP43:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
134 // RV32-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
135 // RV32-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
136 // RV32-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
137 // RV32-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
138 // RV32-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
139 // RV32-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
140 // RV32-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
141 // RV32-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
142 // RV32-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
143 // RV32-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
144 // RV32-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
145 // RV32-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
146 // RV32-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
147 // RV32-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
148 // RV32-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
149 // RV32-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
150 // RV32-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
151 // RV32-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
152 // RV32-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
153 // RV32-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
154 // RV32-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
155 // RV32-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
156 // RV32-NEXT: [[TMP65:%.*]] = load double, ptr [[DARG_ADDR]], align 8
157 // RV32-NEXT: [[TMP66:%.*]] = load double, ptr [[DARG_ADDR]], align 8
158 // RV32-NEXT: [[FMOD1:%.*]] = frem double [[TMP65]], [[TMP66]]
159 // RV32-NEXT: [[TMP67:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
160 // RV32-NEXT: [[TMP68:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
161 // RV32-NEXT: [[FMOD2:%.*]] = frem fp128 [[TMP67]], [[TMP68]]
162 // RV32-NEXT: [[TMP69:%.*]] = load float, ptr [[FARG_ADDR]], align 4
163 // RV32-NEXT: [[TMP70:%.*]] = call float @llvm.log.f32(float [[TMP69]])
164 // RV32-NEXT: [[TMP71:%.*]] = load double, ptr [[DARG_ADDR]], align 8
165 // RV32-NEXT: [[TMP72:%.*]] = call double @llvm.log.f64(double [[TMP71]])
166 // RV32-NEXT: [[TMP73:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
167 // RV32-NEXT: [[TMP74:%.*]] = call fp128 @llvm.log.f128(fp128 [[TMP73]])
168 // RV32-NEXT: [[TMP75:%.*]] = load float, ptr [[FARG_ADDR]], align 4
169 // RV32-NEXT: [[TMP76:%.*]] = call float @llvm.log10.f32(float [[TMP75]])
170 // RV32-NEXT: [[TMP77:%.*]] = load double, ptr [[DARG_ADDR]], align 8
171 // RV32-NEXT: [[TMP78:%.*]] = call double @llvm.log10.f64(double [[TMP77]])
172 // RV32-NEXT: [[TMP79:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
173 // RV32-NEXT: [[TMP80:%.*]] = call fp128 @llvm.log10.f128(fp128 [[TMP79]])
174 // RV32-NEXT: [[TMP81:%.*]] = load float, ptr [[FARG_ADDR]], align 4
175 // RV32-NEXT: [[TMP82:%.*]] = call float @llvm.log2.f32(float [[TMP81]])
176 // RV32-NEXT: [[TMP83:%.*]] = load double, ptr [[DARG_ADDR]], align 8
177 // RV32-NEXT: [[TMP84:%.*]] = call double @llvm.log2.f64(double [[TMP83]])
178 // RV32-NEXT: [[TMP85:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
179 // RV32-NEXT: [[TMP86:%.*]] = call fp128 @llvm.log2.f128(fp128 [[TMP85]])
180 // RV32-NEXT: [[TMP87:%.*]] = load float, ptr [[FARG_ADDR]], align 4
181 // RV32-NEXT: [[TMP88:%.*]] = call float @llvm.nearbyint.f32(float [[TMP87]])
182 // RV32-NEXT: [[TMP89:%.*]] = load double, ptr [[DARG_ADDR]], align 8
183 // RV32-NEXT: [[TMP90:%.*]] = call double @llvm.nearbyint.f64(double [[TMP89]])
184 // RV32-NEXT: [[TMP91:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
185 // RV32-NEXT: [[TMP92:%.*]] = call fp128 @llvm.nearbyint.f128(fp128 [[TMP91]])
186 // RV32-NEXT: [[TMP93:%.*]] = load float, ptr [[FARG_ADDR]], align 4
187 // RV32-NEXT: [[TMP94:%.*]] = load float, ptr [[FARG_ADDR]], align 4
188 // RV32-NEXT: [[TMP95:%.*]] = call float @llvm.pow.f32(float [[TMP93]], float [[TMP94]])
189 // RV32-NEXT: [[TMP96:%.*]] = load double, ptr [[DARG_ADDR]], align 8
190 // RV32-NEXT: [[TMP97:%.*]] = load double, ptr [[DARG_ADDR]], align 8
191 // RV32-NEXT: [[TMP98:%.*]] = call double @llvm.pow.f64(double [[TMP96]], double [[TMP97]])
192 // RV32-NEXT: [[TMP99:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
193 // RV32-NEXT: [[TMP100:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
194 // RV32-NEXT: [[TMP101:%.*]] = call fp128 @llvm.pow.f128(fp128 [[TMP99]], fp128 [[TMP100]])
195 // RV32-NEXT: [[TMP102:%.*]] = load float, ptr [[FARG_ADDR]], align 4
196 // RV32-NEXT: [[TMP103:%.*]] = call float @llvm.rint.f32(float [[TMP102]])
197 // RV32-NEXT: [[TMP104:%.*]] = load double, ptr [[DARG_ADDR]], align 8
198 // RV32-NEXT: [[TMP105:%.*]] = call double @llvm.rint.f64(double [[TMP104]])
199 // RV32-NEXT: [[TMP106:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
200 // RV32-NEXT: [[TMP107:%.*]] = call fp128 @llvm.rint.f128(fp128 [[TMP106]])
201 // RV32-NEXT: [[TMP108:%.*]] = load float, ptr [[FARG_ADDR]], align 4
202 // RV32-NEXT: [[TMP109:%.*]] = call i32 @llvm.lrint.i32.f32(float [[TMP108]])
203 // RV32-NEXT: [[TMP110:%.*]] = load double, ptr [[DARG_ADDR]], align 8
204 // RV32-NEXT: [[TMP111:%.*]] = call i32 @llvm.lrint.i32.f64(double [[TMP110]])
205 // RV32-NEXT: [[TMP112:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
206 // RV32-NEXT: [[TMP113:%.*]] = call i32 @llvm.lrint.i32.f128(fp128 [[TMP112]])
207 // RV32-NEXT: [[TMP114:%.*]] = load float, ptr [[FARG_ADDR]], align 4
208 // RV32-NEXT: [[TMP115:%.*]] = call i32 @llvm.lrint.i32.f32(float [[TMP114]])
209 // RV32-NEXT: [[TMP116:%.*]] = load double, ptr [[DARG_ADDR]], align 8
210 // RV32-NEXT: [[TMP117:%.*]] = call i64 @llvm.llrint.i64.f64(double [[TMP116]])
211 // RV32-NEXT: [[TMP118:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
212 // RV32-NEXT: [[TMP119:%.*]] = call i64 @llvm.llrint.i64.f128(fp128 [[TMP118]])
213 // RV32-NEXT: [[TMP120:%.*]] = load float, ptr [[FARG_ADDR]], align 4
214 // RV32-NEXT: [[TMP121:%.*]] = call float @llvm.round.f32(float [[TMP120]])
215 // RV32-NEXT: [[TMP122:%.*]] = load double, ptr [[DARG_ADDR]], align 8
216 // RV32-NEXT: [[TMP123:%.*]] = call double @llvm.round.f64(double [[TMP122]])
217 // RV32-NEXT: [[TMP124:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
218 // RV32-NEXT: [[TMP125:%.*]] = call fp128 @llvm.round.f128(fp128 [[TMP124]])
219 // RV32-NEXT: [[TMP126:%.*]] = load float, ptr [[FARG_ADDR]], align 4
220 // RV32-NEXT: [[TMP127:%.*]] = call i32 @llvm.lround.i32.f32(float [[TMP126]])
221 // RV32-NEXT: [[TMP128:%.*]] = load double, ptr [[DARG_ADDR]], align 8
222 // RV32-NEXT: [[TMP129:%.*]] = call i32 @llvm.lround.i32.f64(double [[TMP128]])
223 // RV32-NEXT: [[TMP130:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
224 // RV32-NEXT: [[TMP131:%.*]] = call i32 @llvm.lround.i32.f128(fp128 [[TMP130]])
225 // RV32-NEXT: [[TMP132:%.*]] = load float, ptr [[FARG_ADDR]], align 4
226 // RV32-NEXT: [[TMP133:%.*]] = call i64 @llvm.llround.i64.f32(float [[TMP132]])
227 // RV32-NEXT: [[TMP134:%.*]] = load double, ptr [[DARG_ADDR]], align 8
228 // RV32-NEXT: [[TMP135:%.*]] = call i64 @llvm.llround.i64.f64(double [[TMP134]])
229 // RV32-NEXT: [[TMP136:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
230 // RV32-NEXT: [[TMP137:%.*]] = call i64 @llvm.llround.i64.f128(fp128 [[TMP136]])
231 // RV32-NEXT: [[TMP138:%.*]] = load float, ptr [[FARG_ADDR]], align 4
232 // RV32-NEXT: [[TMP139:%.*]] = call float @llvm.roundeven.f32(float [[TMP138]])
233 // RV32-NEXT: [[TMP140:%.*]] = load double, ptr [[DARG_ADDR]], align 8
234 // RV32-NEXT: [[TMP141:%.*]] = call double @llvm.roundeven.f64(double [[TMP140]])
235 // RV32-NEXT: [[TMP142:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
236 // RV32-NEXT: [[TMP143:%.*]] = call fp128 @llvm.roundeven.f128(fp128 [[TMP142]])
237 // RV32-NEXT: [[TMP144:%.*]] = load float, ptr [[FARG_ADDR]], align 4
238 // RV32-NEXT: [[TMP145:%.*]] = call float @llvm.sin.f32(float [[TMP144]])
239 // RV32-NEXT: [[TMP146:%.*]] = load double, ptr [[DARG_ADDR]], align 8
240 // RV32-NEXT: [[TMP147:%.*]] = call double @llvm.sin.f64(double [[TMP146]])
241 // RV32-NEXT: [[TMP148:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
242 // RV32-NEXT: [[TMP149:%.*]] = call fp128 @llvm.sin.f128(fp128 [[TMP148]])
243 // RV32-NEXT: [[TMP150:%.*]] = load float, ptr [[FARG_ADDR]], align 4
244 // RV32-NEXT: [[TMP151:%.*]] = call float @llvm.sqrt.f32(float [[TMP150]])
245 // RV32-NEXT: [[TMP152:%.*]] = load double, ptr [[DARG_ADDR]], align 8
246 // RV32-NEXT: [[TMP153:%.*]] = call double @llvm.sqrt.f64(double [[TMP152]])
247 // RV32-NEXT: [[TMP154:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
248 // RV32-NEXT: [[TMP155:%.*]] = call fp128 @llvm.sqrt.f128(fp128 [[TMP154]])
249 // RV32-NEXT: [[TMP156:%.*]] = load float, ptr [[FARG_ADDR]], align 4
250 // RV32-NEXT: [[TMP157:%.*]] = call float @llvm.trunc.f32(float [[TMP156]])
251 // RV32-NEXT: [[TMP158:%.*]] = load double, ptr [[DARG_ADDR]], align 8
252 // RV32-NEXT: [[TMP159:%.*]] = call double @llvm.trunc.f64(double [[TMP158]])
253 // RV32-NEXT: [[TMP160:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
254 // RV32-NEXT: [[TMP161:%.*]] = call fp128 @llvm.trunc.f128(fp128 [[TMP160]])
255 // RV32-NEXT: ret void
257 // RV64-LABEL: define dso_local void @test(
258 // RV64-SAME: float noundef [[FARG:%.*]], double noundef [[DARG:%.*]], fp128 noundef [[LDARG:%.*]]) #[[ATTR0:[0-9]+]] {
260 // RV64-NEXT: [[FARG_ADDR:%.*]] = alloca float, align 4
261 // RV64-NEXT: [[DARG_ADDR:%.*]] = alloca double, align 8
262 // RV64-NEXT: [[LDARG_ADDR:%.*]] = alloca fp128, align 16
263 // RV64-NEXT: store float [[FARG]], ptr [[FARG_ADDR]], align 4
264 // RV64-NEXT: store double [[DARG]], ptr [[DARG_ADDR]], align 8
265 // RV64-NEXT: store fp128 [[LDARG]], ptr [[LDARG_ADDR]], align 16
266 // RV64-NEXT: [[TMP0:%.*]] = load float, ptr [[FARG_ADDR]], align 4
267 // RV64-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[TMP0]])
268 // RV64-NEXT: [[TMP2:%.*]] = load double, ptr [[DARG_ADDR]], align 8
269 // RV64-NEXT: [[TMP3:%.*]] = call double @llvm.ceil.f64(double [[TMP2]])
270 // RV64-NEXT: [[TMP4:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
271 // RV64-NEXT: [[TMP5:%.*]] = call fp128 @llvm.ceil.f128(fp128 [[TMP4]])
272 // RV64-NEXT: [[TMP6:%.*]] = load float, ptr [[FARG_ADDR]], align 4
273 // RV64-NEXT: [[TMP7:%.*]] = load float, ptr [[FARG_ADDR]], align 4
274 // RV64-NEXT: [[TMP8:%.*]] = call float @llvm.copysign.f32(float [[TMP6]], float [[TMP7]])
275 // RV64-NEXT: [[TMP9:%.*]] = load double, ptr [[DARG_ADDR]], align 8
276 // RV64-NEXT: [[TMP10:%.*]] = load double, ptr [[DARG_ADDR]], align 8
277 // RV64-NEXT: [[TMP11:%.*]] = call double @llvm.copysign.f64(double [[TMP9]], double [[TMP10]])
278 // RV64-NEXT: [[TMP12:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
279 // RV64-NEXT: [[TMP13:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
280 // RV64-NEXT: [[TMP14:%.*]] = call fp128 @llvm.copysign.f128(fp128 [[TMP12]], fp128 [[TMP13]])
281 // RV64-NEXT: [[TMP15:%.*]] = load float, ptr [[FARG_ADDR]], align 4
282 // RV64-NEXT: [[TMP16:%.*]] = call float @llvm.cos.f32(float [[TMP15]])
283 // RV64-NEXT: [[TMP17:%.*]] = load double, ptr [[DARG_ADDR]], align 8
284 // RV64-NEXT: [[TMP18:%.*]] = call double @llvm.cos.f64(double [[TMP17]])
285 // RV64-NEXT: [[TMP19:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
286 // RV64-NEXT: [[TMP20:%.*]] = call fp128 @llvm.cos.f128(fp128 [[TMP19]])
287 // RV64-NEXT: [[TMP21:%.*]] = load float, ptr [[FARG_ADDR]], align 4
288 // RV64-NEXT: [[TMP22:%.*]] = call float @llvm.exp.f32(float [[TMP21]])
289 // RV64-NEXT: [[TMP23:%.*]] = load double, ptr [[DARG_ADDR]], align 8
290 // RV64-NEXT: [[TMP24:%.*]] = call double @llvm.exp.f64(double [[TMP23]])
291 // RV64-NEXT: [[TMP25:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
292 // RV64-NEXT: [[TMP26:%.*]] = call fp128 @llvm.exp.f128(fp128 [[TMP25]])
293 // RV64-NEXT: [[TMP27:%.*]] = load float, ptr [[FARG_ADDR]], align 4
294 // RV64-NEXT: [[TMP28:%.*]] = call float @llvm.exp2.f32(float [[TMP27]])
295 // RV64-NEXT: [[TMP29:%.*]] = load double, ptr [[DARG_ADDR]], align 8
296 // RV64-NEXT: [[TMP30:%.*]] = call double @llvm.exp2.f64(double [[TMP29]])
297 // RV64-NEXT: [[TMP31:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
298 // RV64-NEXT: [[TMP32:%.*]] = call fp128 @llvm.exp2.f128(fp128 [[TMP31]])
299 // RV64-NEXT: [[TMP33:%.*]] = load float, ptr [[FARG_ADDR]], align 4
300 // RV64-NEXT: [[TMP34:%.*]] = call float @llvm.fabs.f32(float [[TMP33]])
301 // RV64-NEXT: [[TMP35:%.*]] = load double, ptr [[DARG_ADDR]], align 8
302 // RV64-NEXT: [[TMP36:%.*]] = call double @llvm.fabs.f64(double [[TMP35]])
303 // RV64-NEXT: [[TMP37:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
304 // RV64-NEXT: [[TMP38:%.*]] = call fp128 @llvm.fabs.f128(fp128 [[TMP37]])
305 // RV64-NEXT: [[TMP39:%.*]] = load float, ptr [[FARG_ADDR]], align 4
306 // RV64-NEXT: [[TMP40:%.*]] = call float @llvm.floor.f32(float [[TMP39]])
307 // RV64-NEXT: [[TMP41:%.*]] = load double, ptr [[DARG_ADDR]], align 8
308 // RV64-NEXT: [[TMP42:%.*]] = call double @llvm.floor.f64(double [[TMP41]])
309 // RV64-NEXT: [[TMP43:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
310 // RV64-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
311 // RV64-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
312 // RV64-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
313 // RV64-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
314 // RV64-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
315 // RV64-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
316 // RV64-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
317 // RV64-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
318 // RV64-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
319 // RV64-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
320 // RV64-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
321 // RV64-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
322 // RV64-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
323 // RV64-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
324 // RV64-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
325 // RV64-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
326 // RV64-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
327 // RV64-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
328 // RV64-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
329 // RV64-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
330 // RV64-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
331 // RV64-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
332 // RV64-NEXT: [[TMP65:%.*]] = load double, ptr [[DARG_ADDR]], align 8
333 // RV64-NEXT: [[TMP66:%.*]] = load double, ptr [[DARG_ADDR]], align 8
334 // RV64-NEXT: [[FMOD1:%.*]] = frem double [[TMP65]], [[TMP66]]
335 // RV64-NEXT: [[TMP67:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
336 // RV64-NEXT: [[TMP68:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
337 // RV64-NEXT: [[FMOD2:%.*]] = frem fp128 [[TMP67]], [[TMP68]]
338 // RV64-NEXT: [[TMP69:%.*]] = load float, ptr [[FARG_ADDR]], align 4
339 // RV64-NEXT: [[TMP70:%.*]] = call float @llvm.log.f32(float [[TMP69]])
340 // RV64-NEXT: [[TMP71:%.*]] = load double, ptr [[DARG_ADDR]], align 8
341 // RV64-NEXT: [[TMP72:%.*]] = call double @llvm.log.f64(double [[TMP71]])
342 // RV64-NEXT: [[TMP73:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
343 // RV64-NEXT: [[TMP74:%.*]] = call fp128 @llvm.log.f128(fp128 [[TMP73]])
344 // RV64-NEXT: [[TMP75:%.*]] = load float, ptr [[FARG_ADDR]], align 4
345 // RV64-NEXT: [[TMP76:%.*]] = call float @llvm.log10.f32(float [[TMP75]])
346 // RV64-NEXT: [[TMP77:%.*]] = load double, ptr [[DARG_ADDR]], align 8
347 // RV64-NEXT: [[TMP78:%.*]] = call double @llvm.log10.f64(double [[TMP77]])
348 // RV64-NEXT: [[TMP79:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
349 // RV64-NEXT: [[TMP80:%.*]] = call fp128 @llvm.log10.f128(fp128 [[TMP79]])
350 // RV64-NEXT: [[TMP81:%.*]] = load float, ptr [[FARG_ADDR]], align 4
351 // RV64-NEXT: [[TMP82:%.*]] = call float @llvm.log2.f32(float [[TMP81]])
352 // RV64-NEXT: [[TMP83:%.*]] = load double, ptr [[DARG_ADDR]], align 8
353 // RV64-NEXT: [[TMP84:%.*]] = call double @llvm.log2.f64(double [[TMP83]])
354 // RV64-NEXT: [[TMP85:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
355 // RV64-NEXT: [[TMP86:%.*]] = call fp128 @llvm.log2.f128(fp128 [[TMP85]])
356 // RV64-NEXT: [[TMP87:%.*]] = load float, ptr [[FARG_ADDR]], align 4
357 // RV64-NEXT: [[TMP88:%.*]] = call float @llvm.nearbyint.f32(float [[TMP87]])
358 // RV64-NEXT: [[TMP89:%.*]] = load double, ptr [[DARG_ADDR]], align 8
359 // RV64-NEXT: [[TMP90:%.*]] = call double @llvm.nearbyint.f64(double [[TMP89]])
360 // RV64-NEXT: [[TMP91:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
361 // RV64-NEXT: [[TMP92:%.*]] = call fp128 @llvm.nearbyint.f128(fp128 [[TMP91]])
362 // RV64-NEXT: [[TMP93:%.*]] = load float, ptr [[FARG_ADDR]], align 4
363 // RV64-NEXT: [[TMP94:%.*]] = load float, ptr [[FARG_ADDR]], align 4
364 // RV64-NEXT: [[TMP95:%.*]] = call float @llvm.pow.f32(float [[TMP93]], float [[TMP94]])
365 // RV64-NEXT: [[TMP96:%.*]] = load double, ptr [[DARG_ADDR]], align 8
366 // RV64-NEXT: [[TMP97:%.*]] = load double, ptr [[DARG_ADDR]], align 8
367 // RV64-NEXT: [[TMP98:%.*]] = call double @llvm.pow.f64(double [[TMP96]], double [[TMP97]])
368 // RV64-NEXT: [[TMP99:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
369 // RV64-NEXT: [[TMP100:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
370 // RV64-NEXT: [[TMP101:%.*]] = call fp128 @llvm.pow.f128(fp128 [[TMP99]], fp128 [[TMP100]])
371 // RV64-NEXT: [[TMP102:%.*]] = load float, ptr [[FARG_ADDR]], align 4
372 // RV64-NEXT: [[TMP103:%.*]] = call float @llvm.rint.f32(float [[TMP102]])
373 // RV64-NEXT: [[TMP104:%.*]] = load double, ptr [[DARG_ADDR]], align 8
374 // RV64-NEXT: [[TMP105:%.*]] = call double @llvm.rint.f64(double [[TMP104]])
375 // RV64-NEXT: [[TMP106:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
376 // RV64-NEXT: [[TMP107:%.*]] = call fp128 @llvm.rint.f128(fp128 [[TMP106]])
377 // RV64-NEXT: [[TMP108:%.*]] = load float, ptr [[FARG_ADDR]], align 4
378 // RV64-NEXT: [[TMP109:%.*]] = call i64 @llvm.lrint.i64.f32(float [[TMP108]])
379 // RV64-NEXT: [[TMP110:%.*]] = load double, ptr [[DARG_ADDR]], align 8
380 // RV64-NEXT: [[TMP111:%.*]] = call i64 @llvm.lrint.i64.f64(double [[TMP110]])
381 // RV64-NEXT: [[TMP112:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
382 // RV64-NEXT: [[TMP113:%.*]] = call i64 @llvm.lrint.i64.f128(fp128 [[TMP112]])
383 // RV64-NEXT: [[TMP114:%.*]] = load float, ptr [[FARG_ADDR]], align 4
384 // RV64-NEXT: [[TMP115:%.*]] = call i64 @llvm.lrint.i64.f32(float [[TMP114]])
385 // RV64-NEXT: [[TMP116:%.*]] = load double, ptr [[DARG_ADDR]], align 8
386 // RV64-NEXT: [[TMP117:%.*]] = call i64 @llvm.llrint.i64.f64(double [[TMP116]])
387 // RV64-NEXT: [[TMP118:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
388 // RV64-NEXT: [[TMP119:%.*]] = call i64 @llvm.llrint.i64.f128(fp128 [[TMP118]])
389 // RV64-NEXT: [[TMP120:%.*]] = load float, ptr [[FARG_ADDR]], align 4
390 // RV64-NEXT: [[TMP121:%.*]] = call float @llvm.round.f32(float [[TMP120]])
391 // RV64-NEXT: [[TMP122:%.*]] = load double, ptr [[DARG_ADDR]], align 8
392 // RV64-NEXT: [[TMP123:%.*]] = call double @llvm.round.f64(double [[TMP122]])
393 // RV64-NEXT: [[TMP124:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
394 // RV64-NEXT: [[TMP125:%.*]] = call fp128 @llvm.round.f128(fp128 [[TMP124]])
395 // RV64-NEXT: [[TMP126:%.*]] = load float, ptr [[FARG_ADDR]], align 4
396 // RV64-NEXT: [[TMP127:%.*]] = call i64 @llvm.lround.i64.f32(float [[TMP126]])
397 // RV64-NEXT: [[TMP128:%.*]] = load double, ptr [[DARG_ADDR]], align 8
398 // RV64-NEXT: [[TMP129:%.*]] = call i64 @llvm.lround.i64.f64(double [[TMP128]])
399 // RV64-NEXT: [[TMP130:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
400 // RV64-NEXT: [[TMP131:%.*]] = call i64 @llvm.lround.i64.f128(fp128 [[TMP130]])
401 // RV64-NEXT: [[TMP132:%.*]] = load float, ptr [[FARG_ADDR]], align 4
402 // RV64-NEXT: [[TMP133:%.*]] = call i64 @llvm.llround.i64.f32(float [[TMP132]])
403 // RV64-NEXT: [[TMP134:%.*]] = load double, ptr [[DARG_ADDR]], align 8
404 // RV64-NEXT: [[TMP135:%.*]] = call i64 @llvm.llround.i64.f64(double [[TMP134]])
405 // RV64-NEXT: [[TMP136:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
406 // RV64-NEXT: [[TMP137:%.*]] = call i64 @llvm.llround.i64.f128(fp128 [[TMP136]])
407 // RV64-NEXT: [[TMP138:%.*]] = load float, ptr [[FARG_ADDR]], align 4
408 // RV64-NEXT: [[TMP139:%.*]] = call float @llvm.roundeven.f32(float [[TMP138]])
409 // RV64-NEXT: [[TMP140:%.*]] = load double, ptr [[DARG_ADDR]], align 8
410 // RV64-NEXT: [[TMP141:%.*]] = call double @llvm.roundeven.f64(double [[TMP140]])
411 // RV64-NEXT: [[TMP142:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
412 // RV64-NEXT: [[TMP143:%.*]] = call fp128 @llvm.roundeven.f128(fp128 [[TMP142]])
413 // RV64-NEXT: [[TMP144:%.*]] = load float, ptr [[FARG_ADDR]], align 4
414 // RV64-NEXT: [[TMP145:%.*]] = call float @llvm.sin.f32(float [[TMP144]])
415 // RV64-NEXT: [[TMP146:%.*]] = load double, ptr [[DARG_ADDR]], align 8
416 // RV64-NEXT: [[TMP147:%.*]] = call double @llvm.sin.f64(double [[TMP146]])
417 // RV64-NEXT: [[TMP148:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
418 // RV64-NEXT: [[TMP149:%.*]] = call fp128 @llvm.sin.f128(fp128 [[TMP148]])
419 // RV64-NEXT: [[TMP150:%.*]] = load float, ptr [[FARG_ADDR]], align 4
420 // RV64-NEXT: [[TMP151:%.*]] = call float @llvm.sqrt.f32(float [[TMP150]])
421 // RV64-NEXT: [[TMP152:%.*]] = load double, ptr [[DARG_ADDR]], align 8
422 // RV64-NEXT: [[TMP153:%.*]] = call double @llvm.sqrt.f64(double [[TMP152]])
423 // RV64-NEXT: [[TMP154:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
424 // RV64-NEXT: [[TMP155:%.*]] = call fp128 @llvm.sqrt.f128(fp128 [[TMP154]])
425 // RV64-NEXT: [[TMP156:%.*]] = load float, ptr [[FARG_ADDR]], align 4
426 // RV64-NEXT: [[TMP157:%.*]] = call float @llvm.trunc.f32(float [[TMP156]])
427 // RV64-NEXT: [[TMP158:%.*]] = load double, ptr [[DARG_ADDR]], align 8
428 // RV64-NEXT: [[TMP159:%.*]] = call double @llvm.trunc.f64(double [[TMP158]])
429 // RV64-NEXT: [[TMP160:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
430 // RV64-NEXT: [[TMP161:%.*]] = call fp128 @llvm.trunc.f128(fp128 [[TMP160]])
431 // RV64-NEXT: ret void
433 void test(float farg
, double darg
, long double ldarg
) {
434 ceilf(farg
); ceil(darg
); ceill(ldarg
);
435 copysignf(farg
, farg
); copysign(darg
, darg
); copysignl(ldarg
, ldarg
);
436 cosf(farg
); cos(darg
); cosl(ldarg
);
437 expf(farg
); exp(darg
); expl(ldarg
);
438 exp2f(farg
); exp2(darg
); exp2l(ldarg
);
439 fabsf(farg
); fabs(darg
); fabsl(ldarg
);
440 floorf(farg
); floor(darg
); floorl(ldarg
);
441 fmaxf(farg
, farg
); fmax(darg
, darg
); fmaxl(ldarg
, ldarg
);
442 fminf(farg
, farg
); fmin(darg
, darg
); fminl(ldarg
, ldarg
);
443 fmodf(farg
, farg
); fmod(darg
, darg
); fmodl(ldarg
, ldarg
);
444 logf(farg
); log(darg
); logl(ldarg
);
445 log10f(farg
); log10(darg
); log10l(ldarg
);
446 log2f(farg
); log2(darg
); log2l(ldarg
);
447 nearbyintf(farg
); nearbyint(darg
); nearbyintl(ldarg
);
448 powf(farg
, farg
); pow(darg
, darg
); powl(ldarg
, ldarg
);
449 rintf(farg
); rint(darg
); rintl(ldarg
);
450 lrintf(farg
); lrint(darg
); lrintl(ldarg
);
451 lrintf(farg
); llrint(darg
); llrintl(ldarg
);
452 roundf(farg
); round(darg
); roundl(ldarg
);
453 lroundf(farg
); lround(darg
); lroundl(ldarg
);
454 llroundf(farg
); llround(darg
); llroundl(ldarg
);
455 roundevenf(farg
); roundeven(darg
); roundevenl(ldarg
);
456 sinf(farg
); sin(darg
); sinl(ldarg
);
457 sqrtf(farg
); sqrt(darg
); sqrtl(ldarg
);
458 truncf(farg
); trunc(darg
); truncl(ldarg
);