1 ; RUN: opt -vector-library=AMDLIBM -passes=inject-tli-mappings,loop-vectorize -S < %s | FileCheck %s
3 ; Test to verify that when math headers are built with
4 ; __FINITE_MATH_ONLY__ enabled, causing use of __<func>_finite
5 ; function versions, vectorization can map these to vector versions.
7 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
8 target triple = "x86_64-unknown-linux-gnu"
10 declare float @__expf_finite(float) #0
12 ; CHECK-LABEL: @exp_f32
13 ; CHECK: <4 x float> @amd_vrs4_expf
15 define void @exp_f32(ptr nocapture %varray) {
19 for.body: ; preds = %for.body, %entry
20 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
21 %tmp = trunc i64 %indvars.iv to i32
22 %conv = sitofp i32 %tmp to float
23 %call = tail call fast float @__expf_finite(float %conv)
24 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
25 store float %call, ptr %arrayidx, align 4
26 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
27 %exitcond = icmp eq i64 %indvars.iv.next, 1000
28 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
30 for.end: ; preds = %for.body
34 !1 = distinct !{!1, !2, !3}
35 !2 = !{!"llvm.loop.vectorize.width", i32 4}
36 !3 = !{!"llvm.loop.vectorize.enable", i1 true}
39 declare double @__exp_finite(double) #0
41 ; CHECK-LABEL: @exp_f64
42 ; CHECK: <4 x double> @amd_vrd4_exp
44 define void @exp_f64(ptr nocapture %varray) {
48 for.body: ; preds = %for.body, %entry
49 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
50 %tmp = trunc i64 %indvars.iv to i32
51 %conv = sitofp i32 %tmp to double
52 %call = tail call fast double @__exp_finite(double %conv)
53 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
54 store double %call, ptr %arrayidx, align 4
55 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
56 %exitcond = icmp eq i64 %indvars.iv.next, 1000
57 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !11
59 for.end: ; preds = %for.body
63 !11 = distinct !{!11, !12, !13}
64 !12 = !{!"llvm.loop.vectorize.width", i32 4}
65 !13 = !{!"llvm.loop.vectorize.enable", i1 true}
70 declare float @__logf_finite(float) #0
72 ; CHECK-LABEL: @log_f32
73 ; CHECK: <4 x float> @amd_vrs4_logf
75 define void @log_f32(ptr nocapture %varray) {
79 for.body: ; preds = %for.body, %entry
80 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
81 %tmp = trunc i64 %indvars.iv to i32
82 %conv = sitofp i32 %tmp to float
83 %call = tail call fast float @__logf_finite(float %conv)
84 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
85 store float %call, ptr %arrayidx, align 4
86 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
87 %exitcond = icmp eq i64 %indvars.iv.next, 1000
88 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
90 for.end: ; preds = %for.body
94 !21 = distinct !{!21, !22, !23}
95 !22 = !{!"llvm.loop.vectorize.width", i32 4}
96 !23 = !{!"llvm.loop.vectorize.enable", i1 true}
99 declare double @__log_finite(double) #0
101 ; CHECK-LABEL: @log_f64
102 ; CHECK: <4 x double> @amd_vrd4_log
104 define void @log_f64(ptr nocapture %varray) {
108 for.body: ; preds = %for.body, %entry
109 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
110 %tmp = trunc i64 %indvars.iv to i32
111 %conv = sitofp i32 %tmp to double
112 %call = tail call fast double @__log_finite(double %conv)
113 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
114 store double %call, ptr %arrayidx, align 4
115 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
116 %exitcond = icmp eq i64 %indvars.iv.next, 1000
117 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
119 for.end: ; preds = %for.body
123 !31 = distinct !{!31, !32, !33}
124 !32 = !{!"llvm.loop.vectorize.width", i32 4}
125 !33 = !{!"llvm.loop.vectorize.enable", i1 true}
128 declare float @__powf_finite(float, float) #0
130 ; CHECK-LABEL: @pow_f32
131 ; CHECK: <4 x float> @amd_vrs4_powf
133 define void @pow_f32(ptr nocapture %varray, ptr nocapture readonly %exp) {
137 for.body: ; preds = %for.body, %entry
138 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
139 %tmp = trunc i64 %indvars.iv to i32
140 %conv = sitofp i32 %tmp to float
141 %arrayidx = getelementptr inbounds float, ptr %exp, i64 %indvars.iv
142 %tmp1 = load float, ptr %arrayidx, align 4
143 %tmp2 = tail call fast float @__powf_finite(float %conv, float %tmp1)
144 %arrayidx2 = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
145 store float %tmp2, ptr %arrayidx2, align 4
146 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
147 %exitcond = icmp eq i64 %indvars.iv.next, 1000
148 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !41
150 for.end: ; preds = %for.body
154 !41 = distinct !{!41, !42, !43}
155 !42 = !{!"llvm.loop.vectorize.width", i32 4}
156 !43 = !{!"llvm.loop.vectorize.enable", i1 true}
159 declare double @__pow_finite(double, double) #0
161 ; CHECK-LABEL: @pow_f64
162 ; CHECK: <4 x double> @amd_vrd4_pow
164 define void @pow_f64(ptr nocapture %varray, ptr nocapture readonly %exp) {
168 for.body: ; preds = %for.body, %entry
169 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
170 %tmp = trunc i64 %indvars.iv to i32
171 %conv = sitofp i32 %tmp to double
172 %arrayidx = getelementptr inbounds double, ptr %exp, i64 %indvars.iv
173 %tmp1 = load double, ptr %arrayidx, align 4
174 %tmp2 = tail call fast double @__pow_finite(double %conv, double %tmp1)
175 %arrayidx2 = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
176 store double %tmp2, ptr %arrayidx2, align 4
177 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
178 %exitcond = icmp eq i64 %indvars.iv.next, 1000
179 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !51
181 for.end: ; preds = %for.body
185 !51 = distinct !{!51, !52, !53}
186 !52 = !{!"llvm.loop.vectorize.width", i32 4}
187 !53 = !{!"llvm.loop.vectorize.enable", i1 true}
189 declare float @__exp2f_finite(float) #0
191 define void @exp2f_finite(ptr nocapture %varray) {
192 ; CHECK-LABEL: @exp2f_finite(
193 ; CHECK: call <4 x float> @amd_vrs4_exp2f(<4 x float> %{{.*}})
200 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
201 %tmp = trunc i64 %iv to i32
202 %conv = sitofp i32 %tmp to float
203 %call = tail call float @__exp2f_finite(float %conv)
204 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
205 store float %call, ptr %arrayidx, align 4
206 %iv.next = add nuw nsw i64 %iv, 1
207 %exitcond = icmp eq i64 %iv.next, 1000
208 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !61
214 !61 = distinct !{!61, !62, !63}
215 !62 = !{!"llvm.loop.vectorize.width", i32 4}
216 !63 = !{!"llvm.loop.vectorize.enable", i1 true}
218 declare double @__exp2_finite(double) #0
220 define void @exp2_finite(ptr nocapture %varray) {
221 ; CHECK-LABEL: @exp2_finite(
222 ; CHECK: call <4 x double> @amd_vrd4_exp2(<4 x double> {{.*}})
229 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
230 %tmp = trunc i64 %iv to i32
231 %conv = sitofp i32 %tmp to double
232 %call = tail call double @__exp2_finite(double %conv)
233 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
234 store double %call, ptr %arrayidx, align 4
235 %iv.next = add nuw nsw i64 %iv, 1
236 %exitcond = icmp eq i64 %iv.next, 1000
237 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !71
243 !71 = distinct !{!71, !72, !73}
244 !72 = !{!"llvm.loop.vectorize.width", i32 4}
245 !73 = !{!"llvm.loop.vectorize.enable", i1 true}
247 declare float @__log2f_finite(float) #0
249 ; CHECK-LABEL: @log2_f32
250 ; CHECK: <4 x float> @amd_vrs4_log2f
252 define void @log2_f32(ptr nocapture %varray) {
256 for.body: ; preds = %for.body, %entry
257 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
258 %tmp = trunc i64 %indvars.iv to i32
259 %conv = sitofp i32 %tmp to float
260 %call = tail call fast float @__log2f_finite(float %conv)
261 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
262 store float %call, ptr %arrayidx, align 4
263 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
264 %exitcond = icmp eq i64 %indvars.iv.next, 1000
265 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
267 for.end: ; preds = %for.body
271 !81 = distinct !{!21, !22, !23}
272 !82 = !{!"llvm.loop.vectorize.width", i32 4}
273 !83 = !{!"llvm.loop.vectorize.enable", i1 true}
276 declare double @__log2_finite(double) #0
278 ; CHECK-LABEL: @log2_f64
279 ; CHECK: <4 x double> @amd_vrd4_log2
281 define void @log2_f64(ptr nocapture %varray) {
285 for.body: ; preds = %for.body, %entry
286 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
287 %tmp = trunc i64 %indvars.iv to i32
288 %conv = sitofp i32 %tmp to double
289 %call = tail call fast double @__log2_finite(double %conv)
290 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %indvars.iv
291 store double %call, ptr %arrayidx, align 4
292 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
293 %exitcond = icmp eq i64 %indvars.iv.next, 1000
294 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
296 for.end: ; preds = %for.body
300 !91 = distinct !{!31, !32, !33}
301 !92 = !{!"llvm.loop.vectorize.width", i32 4}
302 !93 = !{!"llvm.loop.vectorize.enable", i1 true}
304 declare float @__log10f_finite(float) #0
306 ; CHECK-LABEL: @log10_f32
307 ; CHECK: <4 x float> @amd_vrs4_log10f
309 define void @log10_f32(ptr nocapture %varray) {
313 for.body: ; preds = %for.body, %entry
314 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
315 %tmp = trunc i64 %indvars.iv to i32
316 %conv = sitofp i32 %tmp to float
317 %call = tail call fast float @__log10f_finite(float %conv)
318 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %indvars.iv
319 store float %call, ptr %arrayidx, align 4
320 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
321 %exitcond = icmp eq i64 %indvars.iv.next, 1000
322 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
324 for.end: ; preds = %for.body
328 !101 = distinct !{!21, !22, !23}
329 !102 = !{!"llvm.loop.vectorize.width", i32 4}
330 !103 = !{!"llvm.loop.vectorize.enable", i1 true}