1 ; RUN: opt -vector-library=SVML -inject-tli-mappings -loop-vectorize -S < %s | FileCheck %s
3 ; Test to verify that when math headers are built with
4 ; __FINITE_MATH_ONLY__ enabled, causing use of __<func>_finite
5 ; function versions, vectorization can map these to vector versions.
7 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
8 target triple = "x86_64-unknown-linux-gnu"
10 declare float @__expf_finite(float) #0
12 ; CHECK-LABEL: @exp_f32
13 ; CHECK: <4 x float> @__svml_expf4
15 define void @exp_f32(float* nocapture %varray) {
19 for.body: ; preds = %for.body, %entry
20 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
21 %tmp = trunc i64 %indvars.iv to i32
22 %conv = sitofp i32 %tmp to float
23 %call = tail call fast float @__expf_finite(float %conv)
24 %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
25 store float %call, float* %arrayidx, align 4
26 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
27 %exitcond = icmp eq i64 %indvars.iv.next, 1000
28 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
30 for.end: ; preds = %for.body
34 !1 = distinct !{!1, !2, !3}
35 !2 = !{!"llvm.loop.vectorize.width", i32 4}
36 !3 = !{!"llvm.loop.vectorize.enable", i1 true}
39 declare double @__exp_finite(double) #0
41 ; CHECK-LABEL: @exp_f64
42 ; CHECK: <4 x double> @__svml_exp4
44 define void @exp_f64(double* nocapture %varray) {
48 for.body: ; preds = %for.body, %entry
49 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
50 %tmp = trunc i64 %indvars.iv to i32
51 %conv = sitofp i32 %tmp to double
52 %call = tail call fast double @__exp_finite(double %conv)
53 %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
54 store double %call, double* %arrayidx, align 4
55 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
56 %exitcond = icmp eq i64 %indvars.iv.next, 1000
57 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !11
59 for.end: ; preds = %for.body
63 !11 = distinct !{!11, !12, !13}
64 !12 = !{!"llvm.loop.vectorize.width", i32 4}
65 !13 = !{!"llvm.loop.vectorize.enable", i1 true}
70 declare float @__logf_finite(float) #0
72 ; CHECK-LABEL: @log_f32
73 ; CHECK: <4 x float> @__svml_logf4
75 define void @log_f32(float* nocapture %varray) {
79 for.body: ; preds = %for.body, %entry
80 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
81 %tmp = trunc i64 %indvars.iv to i32
82 %conv = sitofp i32 %tmp to float
83 %call = tail call fast float @__logf_finite(float %conv)
84 %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
85 store float %call, float* %arrayidx, align 4
86 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
87 %exitcond = icmp eq i64 %indvars.iv.next, 1000
88 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
90 for.end: ; preds = %for.body
94 !21 = distinct !{!21, !22, !23}
95 !22 = !{!"llvm.loop.vectorize.width", i32 4}
96 !23 = !{!"llvm.loop.vectorize.enable", i1 true}
99 declare double @__log_finite(double) #0
101 ; CHECK-LABEL: @log_f64
102 ; CHECK: <4 x double> @__svml_log4
104 define void @log_f64(double* nocapture %varray) {
108 for.body: ; preds = %for.body, %entry
109 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
110 %tmp = trunc i64 %indvars.iv to i32
111 %conv = sitofp i32 %tmp to double
112 %call = tail call fast double @__log_finite(double %conv)
113 %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
114 store double %call, double* %arrayidx, align 4
115 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
116 %exitcond = icmp eq i64 %indvars.iv.next, 1000
117 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
119 for.end: ; preds = %for.body
123 !31 = distinct !{!31, !32, !33}
124 !32 = !{!"llvm.loop.vectorize.width", i32 4}
125 !33 = !{!"llvm.loop.vectorize.enable", i1 true}
128 declare float @__powf_finite(float, float) #0
130 ; CHECK-LABEL: @pow_f32
131 ; CHECK: <4 x float> @__svml_powf4
133 define void @pow_f32(float* nocapture %varray, float* nocapture readonly %exp) {
137 for.body: ; preds = %for.body, %entry
138 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
139 %tmp = trunc i64 %indvars.iv to i32
140 %conv = sitofp i32 %tmp to float
141 %arrayidx = getelementptr inbounds float, float* %exp, i64 %indvars.iv
142 %tmp1 = load float, float* %arrayidx, align 4
143 %tmp2 = tail call fast float @__powf_finite(float %conv, float %tmp1)
144 %arrayidx2 = getelementptr inbounds float, float* %varray, i64 %indvars.iv
145 store float %tmp2, float* %arrayidx2, align 4
146 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
147 %exitcond = icmp eq i64 %indvars.iv.next, 1000
148 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !41
150 for.end: ; preds = %for.body
154 !41 = distinct !{!41, !42, !43}
155 !42 = !{!"llvm.loop.vectorize.width", i32 4}
156 !43 = !{!"llvm.loop.vectorize.enable", i1 true}
159 declare double @__pow_finite(double, double) #0
161 ; CHECK-LABEL: @pow_f64
162 ; CHECK: <4 x double> @__svml_pow4
164 define void @pow_f64(double* nocapture %varray, double* nocapture readonly %exp) {
168 for.body: ; preds = %for.body, %entry
169 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
170 %tmp = trunc i64 %indvars.iv to i32
171 %conv = sitofp i32 %tmp to double
172 %arrayidx = getelementptr inbounds double, double* %exp, i64 %indvars.iv
173 %tmp1 = load double, double* %arrayidx, align 4
174 %tmp2 = tail call fast double @__pow_finite(double %conv, double %tmp1)
175 %arrayidx2 = getelementptr inbounds double, double* %varray, i64 %indvars.iv
176 store double %tmp2, double* %arrayidx2, align 4
177 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
178 %exitcond = icmp eq i64 %indvars.iv.next, 1000
179 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !51
181 for.end: ; preds = %for.body
185 !51 = distinct !{!51, !52, !53}
186 !52 = !{!"llvm.loop.vectorize.width", i32 4}
187 !53 = !{!"llvm.loop.vectorize.enable", i1 true}
189 declare float @__exp2f_finite(float) #0
191 define void @exp2f_finite(float* nocapture %varray) {
192 ; CHECK-LABEL: @exp2f_finite(
193 ; CHECK: call <4 x float> @__svml_exp2f4(<4 x float> %{{.*}})
200 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
201 %tmp = trunc i64 %iv to i32
202 %conv = sitofp i32 %tmp to float
203 %call = tail call float @__exp2f_finite(float %conv)
204 %arrayidx = getelementptr inbounds float, float* %varray, i64 %iv
205 store float %call, float* %arrayidx, align 4
206 %iv.next = add nuw nsw i64 %iv, 1
207 %exitcond = icmp eq i64 %iv.next, 1000
208 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !61
214 !61 = distinct !{!61, !62, !63}
215 !62 = !{!"llvm.loop.vectorize.width", i32 4}
216 !63 = !{!"llvm.loop.vectorize.enable", i1 true}
218 declare double @__exp2_finite(double) #0
220 define void @exp2_finite(double* nocapture %varray) {
221 ; CHECK-LABEL: @exp2_finite(
222 ; CHECK: call <4 x double> @__svml_exp24(<4 x double> {{.*}})
229 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
230 %tmp = trunc i64 %iv to i32
231 %conv = sitofp i32 %tmp to double
232 %call = tail call double @__exp2_finite(double %conv)
233 %arrayidx = getelementptr inbounds double, double* %varray, i64 %iv
234 store double %call, double* %arrayidx, align 4
235 %iv.next = add nuw nsw i64 %iv, 1
236 %exitcond = icmp eq i64 %iv.next, 1000
237 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !71
243 !71 = distinct !{!71, !72, !73}
244 !72 = !{!"llvm.loop.vectorize.width", i32 4}
245 !73 = !{!"llvm.loop.vectorize.enable", i1 true}
247 declare float @__log2f_finite(float) #0
249 ; CHECK-LABEL: @log2_f32
250 ; CHECK: <4 x float> @__svml_log2f4
252 define void @log2_f32(float* nocapture %varray) {
256 for.body: ; preds = %for.body, %entry
257 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
258 %tmp = trunc i64 %indvars.iv to i32
259 %conv = sitofp i32 %tmp to float
260 %call = tail call fast float @__log2f_finite(float %conv)
261 %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
262 store float %call, float* %arrayidx, align 4
263 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
264 %exitcond = icmp eq i64 %indvars.iv.next, 1000
265 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
267 for.end: ; preds = %for.body
271 !81 = distinct !{!21, !22, !23}
272 !82 = !{!"llvm.loop.vectorize.width", i32 4}
273 !83 = !{!"llvm.loop.vectorize.enable", i1 true}
276 declare double @__log2_finite(double) #0
278 ; CHECK-LABEL: @log2_f64
279 ; CHECK: <4 x double> @__svml_log24
281 define void @log2_f64(double* nocapture %varray) {
285 for.body: ; preds = %for.body, %entry
286 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
287 %tmp = trunc i64 %indvars.iv to i32
288 %conv = sitofp i32 %tmp to double
289 %call = tail call fast double @__log2_finite(double %conv)
290 %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
291 store double %call, double* %arrayidx, align 4
292 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
293 %exitcond = icmp eq i64 %indvars.iv.next, 1000
294 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
296 for.end: ; preds = %for.body
300 !91 = distinct !{!31, !32, !33}
301 !92 = !{!"llvm.loop.vectorize.width", i32 4}
302 !93 = !{!"llvm.loop.vectorize.enable", i1 true}
304 declare float @__log10f_finite(float) #0
306 ; CHECK-LABEL: @log10_f32
307 ; CHECK: <4 x float> @__svml_log10f4
309 define void @log10_f32(float* nocapture %varray) {
313 for.body: ; preds = %for.body, %entry
314 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
315 %tmp = trunc i64 %indvars.iv to i32
316 %conv = sitofp i32 %tmp to float
317 %call = tail call fast float @__log10f_finite(float %conv)
318 %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
319 store float %call, float* %arrayidx, align 4
320 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
321 %exitcond = icmp eq i64 %indvars.iv.next, 1000
322 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
324 for.end: ; preds = %for.body
328 !101 = distinct !{!21, !22, !23}
329 !102 = !{!"llvm.loop.vectorize.width", i32 4}
330 !103 = !{!"llvm.loop.vectorize.enable", i1 true}
333 declare double @__log10_finite(double) #0
335 ; CHECK-LABEL: @log10_f64
336 ; CHECK: <4 x double> @__svml_log104
338 define void @log10_f64(double* nocapture %varray) {
342 for.body: ; preds = %for.body, %entry
343 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
344 %tmp = trunc i64 %indvars.iv to i32
345 %conv = sitofp i32 %tmp to double
346 %call = tail call fast double @__log10_finite(double %conv)
347 %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
348 store double %call, double* %arrayidx, align 4
349 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
350 %exitcond = icmp eq i64 %indvars.iv.next, 1000
351 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
353 for.end: ; preds = %for.body
357 !111 = distinct !{!31, !32, !33}
358 !112 = !{!"llvm.loop.vectorize.width", i32 4}
359 !113 = !{!"llvm.loop.vectorize.enable", i1 true}
361 declare float @__sqrtf_finite(float) #0
363 ; CHECK-LABEL: @sqrt_f32
364 ; CHECK: <4 x float> @__svml_sqrtf4
366 define void @sqrt_f32(float* nocapture %varray) {
370 for.body: ; preds = %for.body, %entry
371 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
372 %tmp = trunc i64 %indvars.iv to i32
373 %conv = sitofp i32 %tmp to float
374 %call = tail call fast float @__sqrtf_finite(float %conv)
375 %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
376 store float %call, float* %arrayidx, align 4
377 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
378 %exitcond = icmp eq i64 %indvars.iv.next, 1000
379 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
381 for.end: ; preds = %for.body
385 !121 = distinct !{!21, !22, !23}
386 !122 = !{!"llvm.loop.vectorize.width", i32 4}
387 !123 = !{!"llvm.loop.vectorize.enable", i1 true}
390 declare double @__sqrt_finite(double) #0
392 ; CHECK-LABEL: @sqrt_f64
393 ; CHECK: <4 x double> @__svml_sqrt4
395 define void @sqrt_f64(double* nocapture %varray) {
399 for.body: ; preds = %for.body, %entry
400 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
401 %tmp = trunc i64 %indvars.iv to i32
402 %conv = sitofp i32 %tmp to double
403 %call = tail call fast double @__sqrt_finite(double %conv)
404 %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
405 store double %call, double* %arrayidx, align 4
406 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
407 %exitcond = icmp eq i64 %indvars.iv.next, 1000
408 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
410 for.end: ; preds = %for.body
414 !131 = distinct !{!31, !32, !33}
415 !132 = !{!"llvm.loop.vectorize.width", i32 4}
416 !133 = !{!"llvm.loop.vectorize.enable", i1 true}