1 ; RUN: opt -vector-library=SVML -loop-vectorize -S < %s | FileCheck %s
3 ; Test to verify that when math headers are built with
4 ; __FINITE_MATH_ONLY__ enabled, causing use of __<func>_finite
5 ; function versions, vectorization can map these to vector versions.
7 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
8 target triple = "x86_64-unknown-linux-gnu"
10 declare float @__expf_finite(float) #0
12 ; CHECK-LABEL: @exp_f32
13 ; CHECK: <4 x float> @__svml_expf4
15 define void @exp_f32(float* nocapture %varray) {
19 for.body: ; preds = %for.body, %entry
20 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
21 %tmp = trunc i64 %indvars.iv to i32
22 %conv = sitofp i32 %tmp to float
23 %call = tail call fast float @__expf_finite(float %conv)
24 %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
25 store float %call, float* %arrayidx, align 4
26 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
27 %exitcond = icmp eq i64 %indvars.iv.next, 1000
28 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
30 for.end: ; preds = %for.body
34 !1 = distinct !{!1, !2, !3}
35 !2 = !{!"llvm.loop.vectorize.width", i32 4}
36 !3 = !{!"llvm.loop.vectorize.enable", i1 true}
39 declare double @__exp_finite(double) #0
41 ; CHECK-LABEL: @exp_f64
42 ; CHECK: <4 x double> @__svml_exp4
44 define void @exp_f64(double* nocapture %varray) {
48 for.body: ; preds = %for.body, %entry
49 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
50 %tmp = trunc i64 %indvars.iv to i32
51 %conv = sitofp i32 %tmp to double
52 %call = tail call fast double @__exp_finite(double %conv)
53 %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
54 store double %call, double* %arrayidx, align 4
55 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
56 %exitcond = icmp eq i64 %indvars.iv.next, 1000
57 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !11
59 for.end: ; preds = %for.body
63 !11 = distinct !{!11, !12, !13}
64 !12 = !{!"llvm.loop.vectorize.width", i32 4}
65 !13 = !{!"llvm.loop.vectorize.enable", i1 true}
70 declare float @__logf_finite(float) #0
72 ; CHECK-LABEL: @log_f32
73 ; CHECK: <4 x float> @__svml_logf4
75 define void @log_f32(float* nocapture %varray) {
79 for.body: ; preds = %for.body, %entry
80 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
81 %tmp = trunc i64 %indvars.iv to i32
82 %conv = sitofp i32 %tmp to float
83 %call = tail call fast float @__logf_finite(float %conv)
84 %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
85 store float %call, float* %arrayidx, align 4
86 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
87 %exitcond = icmp eq i64 %indvars.iv.next, 1000
88 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
90 for.end: ; preds = %for.body
94 !21 = distinct !{!21, !22, !23}
95 !22 = !{!"llvm.loop.vectorize.width", i32 4}
96 !23 = !{!"llvm.loop.vectorize.enable", i1 true}
99 declare double @__log_finite(double) #0
101 ; CHECK-LABEL: @log_f64
102 ; CHECK: <4 x double> @__svml_log4
104 define void @log_f64(double* nocapture %varray) {
108 for.body: ; preds = %for.body, %entry
109 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
110 %tmp = trunc i64 %indvars.iv to i32
111 %conv = sitofp i32 %tmp to double
112 %call = tail call fast double @__log_finite(double %conv)
113 %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
114 store double %call, double* %arrayidx, align 4
115 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
116 %exitcond = icmp eq i64 %indvars.iv.next, 1000
117 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
119 for.end: ; preds = %for.body
123 !31 = distinct !{!31, !32, !33}
124 !32 = !{!"llvm.loop.vectorize.width", i32 4}
125 !33 = !{!"llvm.loop.vectorize.enable", i1 true}
128 declare float @__powf_finite(float, float) #0
130 ; CHECK-LABEL: @pow_f32
131 ; CHECK: <4 x float> @__svml_powf4
133 define void @pow_f32(float* nocapture %varray, float* nocapture readonly %exp) {
137 for.body: ; preds = %for.body, %entry
138 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
139 %tmp = trunc i64 %indvars.iv to i32
140 %conv = sitofp i32 %tmp to float
141 %arrayidx = getelementptr inbounds float, float* %exp, i64 %indvars.iv
142 %tmp1 = load float, float* %arrayidx, align 4
143 %tmp2 = tail call fast float @__powf_finite(float %conv, float %tmp1)
144 %arrayidx2 = getelementptr inbounds float, float* %varray, i64 %indvars.iv
145 store float %tmp2, float* %arrayidx2, align 4
146 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
147 %exitcond = icmp eq i64 %indvars.iv.next, 1000
148 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !41
150 for.end: ; preds = %for.body
154 !41 = distinct !{!41, !42, !43}
155 !42 = !{!"llvm.loop.vectorize.width", i32 4}
156 !43 = !{!"llvm.loop.vectorize.enable", i1 true}
159 declare double @__pow_finite(double, double) #0
161 ; CHECK-LABEL: @pow_f64
162 ; CHECK: <4 x double> @__svml_pow4
164 define void @pow_f64(double* nocapture %varray, double* nocapture readonly %exp) {
168 for.body: ; preds = %for.body, %entry
169 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
170 %tmp = trunc i64 %indvars.iv to i32
171 %conv = sitofp i32 %tmp to double
172 %arrayidx = getelementptr inbounds double, double* %exp, i64 %indvars.iv
173 %tmp1 = load double, double* %arrayidx, align 4
174 %tmp2 = tail call fast double @__pow_finite(double %conv, double %tmp1)
175 %arrayidx2 = getelementptr inbounds double, double* %varray, i64 %indvars.iv
176 store double %tmp2, double* %arrayidx2, align 4
177 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
178 %exitcond = icmp eq i64 %indvars.iv.next, 1000
179 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !51
181 for.end: ; preds = %for.body
185 !51 = distinct !{!51, !52, !53}
186 !52 = !{!"llvm.loop.vectorize.width", i32 4}
187 !53 = !{!"llvm.loop.vectorize.enable", i1 true}