1 ; RUN: opt -vector-library=SVML -passes=inject-tli-mappings,loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -mattr=avx -S < %s | FileCheck %s
3 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
4 target triple = "x86_64-unknown-linux-gnu"
6 declare double @sin(double) #0
7 declare float @sinf(float) #0
8 declare double @llvm.sin.f64(double) #0
9 declare float @llvm.sin.f32(float) #0
11 declare double @cos(double) #0
12 declare float @cosf(float) #0
13 declare double @llvm.cos.f64(double) #0
14 declare float @llvm.cos.f32(float) #0
16 declare double @pow(double, double) #0
17 declare float @powf(float, float) #0
18 declare double @llvm.pow.f64(double, double) #0
19 declare float @llvm.pow.f32(float, float) #0
21 declare double @exp(double) #0
22 declare float @expf(float) #0
23 declare double @llvm.exp.f64(double) #0
24 declare float @llvm.exp.f32(float) #0
26 declare double @log(double) #0
27 declare float @logf(float) #0
28 declare double @llvm.log.f64(double) #0
29 declare float @llvm.log.f32(float) #0
31 declare double @log2(double) #0
32 declare float @log2f(float) #0
33 declare double @llvm.log2.f64(double) #0
34 declare float @llvm.log2.f32(float) #0
36 declare double @log10(double) #0
37 declare float @log10f(float) #0
38 declare double @llvm.log10.f64(double) #0
39 declare float @llvm.log10.f32(float) #0
41 declare double @sqrt(double) #0
42 declare float @sqrtf(float) #0
44 declare double @exp2(double) #0
45 declare float @exp2f(float) #0
46 declare double @llvm.exp2.f64(double) #0
47 declare float @llvm.exp2.f32(float) #0
49 define void @sin_f64(ptr nocapture %varray) {
50 ; CHECK-LABEL: @sin_f64(
51 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_sin4(<4 x double> [[TMP4:%.*]])
58 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
59 %tmp = trunc i64 %iv to i32
60 %conv = sitofp i32 %tmp to double
61 %call = tail call double @sin(double %conv)
62 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
63 store double %call, ptr %arrayidx, align 4
64 %iv.next = add nuw nsw i64 %iv, 1
65 %exitcond = icmp eq i64 %iv.next, 1000
66 br i1 %exitcond, label %for.end, label %for.body
72 define void @sin_f32(ptr nocapture %varray) {
73 ; CHECK-LABEL: @sin_f32(
74 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_sinf4(<4 x float> [[TMP4:%.*]])
81 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
82 %tmp = trunc i64 %iv to i32
83 %conv = sitofp i32 %tmp to float
84 %call = tail call float @sinf(float %conv)
85 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
86 store float %call, ptr %arrayidx, align 4
87 %iv.next = add nuw nsw i64 %iv, 1
88 %exitcond = icmp eq i64 %iv.next, 1000
89 br i1 %exitcond, label %for.end, label %for.body
95 define void @sin_f64_intrinsic(ptr nocapture %varray) {
96 ; CHECK-LABEL: @sin_f64_intrinsic(
97 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_sin4(<4 x double> [[TMP4:%.*]])
104 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
105 %tmp = trunc i64 %iv to i32
106 %conv = sitofp i32 %tmp to double
107 %call = tail call double @llvm.sin.f64(double %conv)
108 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
109 store double %call, ptr %arrayidx, align 4
110 %iv.next = add nuw nsw i64 %iv, 1
111 %exitcond = icmp eq i64 %iv.next, 1000
112 br i1 %exitcond, label %for.end, label %for.body
118 define void @sin_f32_intrinsic(ptr nocapture %varray) {
119 ; CHECK-LABEL: @sin_f32_intrinsic(
120 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_sinf4(<4 x float> [[TMP4:%.*]])
127 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
128 %tmp = trunc i64 %iv to i32
129 %conv = sitofp i32 %tmp to float
130 %call = tail call float @llvm.sin.f32(float %conv)
131 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
132 store float %call, ptr %arrayidx, align 4
133 %iv.next = add nuw nsw i64 %iv, 1
134 %exitcond = icmp eq i64 %iv.next, 1000
135 br i1 %exitcond, label %for.end, label %for.body
141 define void @cos_f64(ptr nocapture %varray) {
142 ; CHECK-LABEL: @cos_f64(
143 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_cos4(<4 x double> [[TMP4:%.*]])
150 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
151 %tmp = trunc i64 %iv to i32
152 %conv = sitofp i32 %tmp to double
153 %call = tail call double @cos(double %conv)
154 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
155 store double %call, ptr %arrayidx, align 4
156 %iv.next = add nuw nsw i64 %iv, 1
157 %exitcond = icmp eq i64 %iv.next, 1000
158 br i1 %exitcond, label %for.end, label %for.body
164 define void @cos_f32(ptr nocapture %varray) {
165 ; CHECK-LABEL: @cos_f32(
166 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_cosf4(<4 x float> [[TMP4:%.*]])
173 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
174 %tmp = trunc i64 %iv to i32
175 %conv = sitofp i32 %tmp to float
176 %call = tail call float @cosf(float %conv)
177 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
178 store float %call, ptr %arrayidx, align 4
179 %iv.next = add nuw nsw i64 %iv, 1
180 %exitcond = icmp eq i64 %iv.next, 1000
181 br i1 %exitcond, label %for.end, label %for.body
187 define void @cos_f64_intrinsic(ptr nocapture %varray) {
188 ; CHECK-LABEL: @cos_f64_intrinsic(
189 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_cos4(<4 x double> [[TMP4:%.*]])
196 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
197 %tmp = trunc i64 %iv to i32
198 %conv = sitofp i32 %tmp to double
199 %call = tail call double @llvm.cos.f64(double %conv)
200 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
201 store double %call, ptr %arrayidx, align 4
202 %iv.next = add nuw nsw i64 %iv, 1
203 %exitcond = icmp eq i64 %iv.next, 1000
204 br i1 %exitcond, label %for.end, label %for.body
210 define void @cos_f32_intrinsic(ptr nocapture %varray) {
211 ; CHECK-LABEL: @cos_f32_intrinsic(
212 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_cosf4(<4 x float> [[TMP4:%.*]])
219 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
220 %tmp = trunc i64 %iv to i32
221 %conv = sitofp i32 %tmp to float
222 %call = tail call float @llvm.cos.f32(float %conv)
223 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
224 store float %call, ptr %arrayidx, align 4
225 %iv.next = add nuw nsw i64 %iv, 1
226 %exitcond = icmp eq i64 %iv.next, 1000
227 br i1 %exitcond, label %for.end, label %for.body
233 define void @tan_f64_intrinsic(ptr nocapture %varray) {
234 ; CHECK-LABEL: @tan_f64_intrinsic(
235 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_tan4(<4 x double> [[TMP4:%.*]])
242 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
243 %tmp = trunc i64 %iv to i32
244 %conv = sitofp i32 %tmp to double
245 %call = tail call double @llvm.tan.f64(double %conv)
246 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
247 store double %call, ptr %arrayidx, align 4
248 %iv.next = add nuw nsw i64 %iv, 1
249 %exitcond = icmp eq i64 %iv.next, 1000
250 br i1 %exitcond, label %for.end, label %for.body
256 define void @tan_f32_intrinsic(ptr nocapture %varray) {
257 ; CHECK-LABEL: @tan_f32_intrinsic(
258 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_tanf4(<4 x float> [[TMP4:%.*]])
265 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
266 %tmp = trunc i64 %iv to i32
267 %conv = sitofp i32 %tmp to float
268 %call = tail call float @llvm.tan.f32(float %conv)
269 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
270 store float %call, ptr %arrayidx, align 4
271 %iv.next = add nuw nsw i64 %iv, 1
272 %exitcond = icmp eq i64 %iv.next, 1000
273 br i1 %exitcond, label %for.end, label %for.body
279 define void @pow_f64(ptr nocapture %varray, ptr nocapture readonly %exp) {
280 ; CHECK-LABEL: @pow_f64(
281 ; CHECK: [[TMP8:%.*]] = call <4 x double> @__svml_pow4(<4 x double> [[TMP4:%.*]], <4 x double> [[WIDE_LOAD:%.*]])
288 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
289 %tmp = trunc i64 %iv to i32
290 %conv = sitofp i32 %tmp to double
291 %arrayidx = getelementptr inbounds double, ptr %exp, i64 %iv
292 %tmp1 = load double, ptr %arrayidx, align 4
293 %tmp2 = tail call double @pow(double %conv, double %tmp1)
294 %arrayidx2 = getelementptr inbounds double, ptr %varray, i64 %iv
295 store double %tmp2, ptr %arrayidx2, align 4
296 %iv.next = add nuw nsw i64 %iv, 1
297 %exitcond = icmp eq i64 %iv.next, 1000
298 br i1 %exitcond, label %for.end, label %for.body
304 define void @pow_f64_intrinsic(ptr nocapture %varray, ptr nocapture readonly %exp) {
305 ; CHECK-LABEL: @pow_f64_intrinsic(
306 ; CHECK: [[TMP8:%.*]] = call <4 x double> @__svml_pow4(<4 x double> [[TMP4:%.*]], <4 x double> [[WIDE_LOAD:%.*]])
313 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
314 %tmp = trunc i64 %iv to i32
315 %conv = sitofp i32 %tmp to double
316 %arrayidx = getelementptr inbounds double, ptr %exp, i64 %iv
317 %tmp1 = load double, ptr %arrayidx, align 4
318 %tmp2 = tail call double @llvm.pow.f64(double %conv, double %tmp1)
319 %arrayidx2 = getelementptr inbounds double, ptr %varray, i64 %iv
320 store double %tmp2, ptr %arrayidx2, align 4
321 %iv.next = add nuw nsw i64 %iv, 1
322 %exitcond = icmp eq i64 %iv.next, 1000
323 br i1 %exitcond, label %for.end, label %for.body
329 define void @pow_f32(ptr nocapture %varray, ptr nocapture readonly %exp) {
330 ; CHECK-LABEL: @pow_f32(
331 ; CHECK: [[TMP8:%.*]] = call <4 x float> @__svml_powf4(<4 x float> [[TMP4:%.*]], <4 x float> [[WIDE_LOAD:%.*]])
338 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
339 %tmp = trunc i64 %iv to i32
340 %conv = sitofp i32 %tmp to float
341 %arrayidx = getelementptr inbounds float, ptr %exp, i64 %iv
342 %tmp1 = load float, ptr %arrayidx, align 4
343 %tmp2 = tail call float @powf(float %conv, float %tmp1)
344 %arrayidx2 = getelementptr inbounds float, ptr %varray, i64 %iv
345 store float %tmp2, ptr %arrayidx2, align 4
346 %iv.next = add nuw nsw i64 %iv, 1
347 %exitcond = icmp eq i64 %iv.next, 1000
348 br i1 %exitcond, label %for.end, label %for.body
354 define void @pow_f32_intrinsic(ptr nocapture %varray, ptr nocapture readonly %exp) {
355 ; CHECK-LABEL: @pow_f32_intrinsic(
356 ; CHECK: [[TMP8:%.*]] = call <4 x float> @__svml_powf4(<4 x float> [[TMP4:%.*]], <4 x float> [[WIDE_LOAD:%.*]])
363 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
364 %tmp = trunc i64 %iv to i32
365 %conv = sitofp i32 %tmp to float
366 %arrayidx = getelementptr inbounds float, ptr %exp, i64 %iv
367 %tmp1 = load float, ptr %arrayidx, align 4
368 %tmp2 = tail call float @llvm.pow.f32(float %conv, float %tmp1)
369 %arrayidx2 = getelementptr inbounds float, ptr %varray, i64 %iv
370 store float %tmp2, ptr %arrayidx2, align 4
371 %iv.next = add nuw nsw i64 %iv, 1
372 %exitcond = icmp eq i64 %iv.next, 1000
373 br i1 %exitcond, label %for.end, label %for.body
379 define void @exp_f64(ptr nocapture %varray) {
380 ; CHECK-LABEL: @exp_f64(
381 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_exp4(<4 x double> [[TMP4:%.*]])
388 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
389 %tmp = trunc i64 %iv to i32
390 %conv = sitofp i32 %tmp to double
391 %call = tail call double @exp(double %conv)
392 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
393 store double %call, ptr %arrayidx, align 4
394 %iv.next = add nuw nsw i64 %iv, 1
395 %exitcond = icmp eq i64 %iv.next, 1000
396 br i1 %exitcond, label %for.end, label %for.body
402 define void @exp_f32(ptr nocapture %varray) {
403 ; CHECK-LABEL: @exp_f32(
404 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_expf4(<4 x float> [[TMP4:%.*]])
411 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
412 %tmp = trunc i64 %iv to i32
413 %conv = sitofp i32 %tmp to float
414 %call = tail call float @expf(float %conv)
415 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
416 store float %call, ptr %arrayidx, align 4
417 %iv.next = add nuw nsw i64 %iv, 1
418 %exitcond = icmp eq i64 %iv.next, 1000
419 br i1 %exitcond, label %for.end, label %for.body
425 define void @exp_f64_intrinsic(ptr nocapture %varray) {
426 ; CHECK-LABEL: @exp_f64_intrinsic(
427 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_exp4(<4 x double> [[TMP4:%.*]])
434 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
435 %tmp = trunc i64 %iv to i32
436 %conv = sitofp i32 %tmp to double
437 %call = tail call double @llvm.exp.f64(double %conv)
438 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
439 store double %call, ptr %arrayidx, align 4
440 %iv.next = add nuw nsw i64 %iv, 1
441 %exitcond = icmp eq i64 %iv.next, 1000
442 br i1 %exitcond, label %for.end, label %for.body
448 define void @exp_f32_intrinsic(ptr nocapture %varray) {
449 ; CHECK-LABEL: @exp_f32_intrinsic(
450 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_expf4(<4 x float> [[TMP4:%.*]])
457 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
458 %tmp = trunc i64 %iv to i32
459 %conv = sitofp i32 %tmp to float
460 %call = tail call float @llvm.exp.f32(float %conv)
461 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
462 store float %call, ptr %arrayidx, align 4
463 %iv.next = add nuw nsw i64 %iv, 1
464 %exitcond = icmp eq i64 %iv.next, 1000
465 br i1 %exitcond, label %for.end, label %for.body
471 define void @log_f64(ptr nocapture %varray) {
472 ; CHECK-LABEL: @log_f64(
473 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_log4(<4 x double> [[TMP4:%.*]])
480 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
481 %tmp = trunc i64 %iv to i32
482 %conv = sitofp i32 %tmp to double
483 %call = tail call double @log(double %conv)
484 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
485 store double %call, ptr %arrayidx, align 4
486 %iv.next = add nuw nsw i64 %iv, 1
487 %exitcond = icmp eq i64 %iv.next, 1000
488 br i1 %exitcond, label %for.end, label %for.body
494 define void @log_f32(ptr nocapture %varray) {
495 ; CHECK-LABEL: @log_f32(
496 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_logf4(<4 x float> [[TMP4:%.*]])
503 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
504 %tmp = trunc i64 %iv to i32
505 %conv = sitofp i32 %tmp to float
506 %call = tail call float @logf(float %conv)
507 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
508 store float %call, ptr %arrayidx, align 4
509 %iv.next = add nuw nsw i64 %iv, 1
510 %exitcond = icmp eq i64 %iv.next, 1000
511 br i1 %exitcond, label %for.end, label %for.body
517 define void @log_f64_intrinsic(ptr nocapture %varray) {
518 ; CHECK-LABEL: @log_f64_intrinsic(
519 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_log4(<4 x double> [[TMP4:%.*]])
526 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
527 %tmp = trunc i64 %iv to i32
528 %conv = sitofp i32 %tmp to double
529 %call = tail call double @llvm.log.f64(double %conv)
530 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
531 store double %call, ptr %arrayidx, align 4
532 %iv.next = add nuw nsw i64 %iv, 1
533 %exitcond = icmp eq i64 %iv.next, 1000
534 br i1 %exitcond, label %for.end, label %for.body
540 define void @log_f32_intrinsic(ptr nocapture %varray) {
541 ; CHECK-LABEL: @log_f32_intrinsic(
542 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_logf4(<4 x float> [[TMP4:%.*]])
549 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
550 %tmp = trunc i64 %iv to i32
551 %conv = sitofp i32 %tmp to float
552 %call = tail call float @llvm.log.f32(float %conv)
553 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
554 store float %call, ptr %arrayidx, align 4
555 %iv.next = add nuw nsw i64 %iv, 1
556 %exitcond = icmp eq i64 %iv.next, 1000
557 br i1 %exitcond, label %for.end, label %for.body
563 define void @log2_f64(ptr nocapture %varray) {
564 ; CHECK-LABEL: @log2_f64(
565 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_log24(<4 x double> [[TMP4:%.*]])
572 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
573 %tmp = trunc i64 %iv to i32
574 %conv = sitofp i32 %tmp to double
575 %call = tail call double @log2(double %conv)
576 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
577 store double %call, ptr %arrayidx, align 4
578 %iv.next = add nuw nsw i64 %iv, 1
579 %exitcond = icmp eq i64 %iv.next, 1000
580 br i1 %exitcond, label %for.end, label %for.body
586 define void @log2_f32(ptr nocapture %varray) {
587 ; CHECK-LABEL: @log2_f32(
588 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_log2f4(<4 x float> [[TMP4:%.*]])
595 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
596 %tmp = trunc i64 %iv to i32
597 %conv = sitofp i32 %tmp to float
598 %call = tail call float @log2f(float %conv)
599 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
600 store float %call, ptr %arrayidx, align 4
601 %iv.next = add nuw nsw i64 %iv, 1
602 %exitcond = icmp eq i64 %iv.next, 1000
603 br i1 %exitcond, label %for.end, label %for.body
609 define void @log2_f64_intrinsic(ptr nocapture %varray) {
610 ; CHECK-LABEL: @log2_f64_intrinsic(
611 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_log24(<4 x double> [[TMP4:%.*]])
618 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
619 %tmp = trunc i64 %iv to i32
620 %conv = sitofp i32 %tmp to double
621 %call = tail call double @llvm.log2.f64(double %conv)
622 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
623 store double %call, ptr %arrayidx, align 4
624 %iv.next = add nuw nsw i64 %iv, 1
625 %exitcond = icmp eq i64 %iv.next, 1000
626 br i1 %exitcond, label %for.end, label %for.body
632 define void @log2_f32_intrinsic(ptr nocapture %varray) {
633 ; CHECK-LABEL: @log2_f32_intrinsic(
634 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_log2f4(<4 x float> [[TMP4:%.*]])
641 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
642 %tmp = trunc i64 %iv to i32
643 %conv = sitofp i32 %tmp to float
644 %call = tail call float @llvm.log2.f32(float %conv)
645 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
646 store float %call, ptr %arrayidx, align 4
647 %iv.next = add nuw nsw i64 %iv, 1
648 %exitcond = icmp eq i64 %iv.next, 1000
649 br i1 %exitcond, label %for.end, label %for.body
655 define void @log10_f64(ptr nocapture %varray) {
656 ; CHECK-LABEL: @log10_f64(
657 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_log104(<4 x double> [[TMP4:%.*]])
664 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
665 %tmp = trunc i64 %iv to i32
666 %conv = sitofp i32 %tmp to double
667 %call = tail call double @log10(double %conv)
668 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
669 store double %call, ptr %arrayidx, align 4
670 %iv.next = add nuw nsw i64 %iv, 1
671 %exitcond = icmp eq i64 %iv.next, 1000
672 br i1 %exitcond, label %for.end, label %for.body
678 define void @log10_f32(ptr nocapture %varray) {
679 ; CHECK-LABEL: @log10_f32(
680 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_log10f4(<4 x float> [[TMP4:%.*]])
687 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
688 %tmp = trunc i64 %iv to i32
689 %conv = sitofp i32 %tmp to float
690 %call = tail call float @log10f(float %conv)
691 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
692 store float %call, ptr %arrayidx, align 4
693 %iv.next = add nuw nsw i64 %iv, 1
694 %exitcond = icmp eq i64 %iv.next, 1000
695 br i1 %exitcond, label %for.end, label %for.body
701 define void @log10_f64_intrinsic(ptr nocapture %varray) {
702 ; CHECK-LABEL: @log10_f64_intrinsic(
703 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_log104(<4 x double> [[TMP4:%.*]])
710 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
711 %tmp = trunc i64 %iv to i32
712 %conv = sitofp i32 %tmp to double
713 %call = tail call double @llvm.log10.f64(double %conv)
714 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
715 store double %call, ptr %arrayidx, align 4
716 %iv.next = add nuw nsw i64 %iv, 1
717 %exitcond = icmp eq i64 %iv.next, 1000
718 br i1 %exitcond, label %for.end, label %for.body
724 define void @log10_f32_intrinsic(ptr nocapture %varray) {
725 ; CHECK-LABEL: @log10_f32_intrinsic(
726 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_log10f4(<4 x float> [[TMP4:%.*]])
733 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
734 %tmp = trunc i64 %iv to i32
735 %conv = sitofp i32 %tmp to float
736 %call = tail call float @llvm.log10.f32(float %conv)
737 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
738 store float %call, ptr %arrayidx, align 4
739 %iv.next = add nuw nsw i64 %iv, 1
740 %exitcond = icmp eq i64 %iv.next, 1000
741 br i1 %exitcond, label %for.end, label %for.body
747 define void @sqrt_f64(ptr nocapture %varray) {
748 ; CHECK-LABEL: @sqrt_f64(
749 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_sqrt4(<4 x double> [[TMP4:%.*]])
756 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
757 %tmp = trunc i64 %iv to i32
758 %conv = sitofp i32 %tmp to double
759 %call = tail call double @sqrt(double %conv)
760 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
761 store double %call, ptr %arrayidx, align 4
762 %iv.next = add nuw nsw i64 %iv, 1
763 %exitcond = icmp eq i64 %iv.next, 1000
764 br i1 %exitcond, label %for.end, label %for.body
770 define void @sqrt_f32(ptr nocapture %varray) {
771 ; CHECK-LABEL: @sqrt_f32(
772 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_sqrtf4(<4 x float> [[TMP4:%.*]])
779 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
780 %tmp = trunc i64 %iv to i32
781 %conv = sitofp i32 %tmp to float
782 %call = tail call float @sqrtf(float %conv)
783 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
784 store float %call, ptr %arrayidx, align 4
785 %iv.next = add nuw nsw i64 %iv, 1
786 %exitcond = icmp eq i64 %iv.next, 1000
787 br i1 %exitcond, label %for.end, label %for.body
793 define void @exp2_f64(ptr nocapture %varray) {
794 ; CHECK-LABEL: @exp2_f64(
795 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_exp24(<4 x double> [[TMP4:%.*]])
802 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
803 %tmp = trunc i64 %iv to i32
804 %conv = sitofp i32 %tmp to double
805 %call = tail call double @exp2(double %conv)
806 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
807 store double %call, ptr %arrayidx, align 4
808 %iv.next = add nuw nsw i64 %iv, 1
809 %exitcond = icmp eq i64 %iv.next, 1000
810 br i1 %exitcond, label %for.end, label %for.body
816 define void @exp2_f32(ptr nocapture %varray) {
817 ; CHECK-LABEL: @exp2_f32(
818 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_exp2f4(<4 x float> [[TMP4:%.*]])
825 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
826 %tmp = trunc i64 %iv to i32
827 %conv = sitofp i32 %tmp to float
828 %call = tail call float @exp2f(float %conv)
829 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
830 store float %call, ptr %arrayidx, align 4
831 %iv.next = add nuw nsw i64 %iv, 1
832 %exitcond = icmp eq i64 %iv.next, 1000
833 br i1 %exitcond, label %for.end, label %for.body
839 define void @exp2_f64_intrinsic(ptr nocapture %varray) {
840 ; CHECK-LABEL: @exp2_f64_intrinsic(
841 ; CHECK: [[TMP5:%.*]] = call <4 x double> @__svml_exp24(<4 x double> [[TMP4:%.*]])
848 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
849 %tmp = trunc i64 %iv to i32
850 %conv = sitofp i32 %tmp to double
851 %call = tail call double @llvm.exp2.f64(double %conv)
852 %arrayidx = getelementptr inbounds double, ptr %varray, i64 %iv
853 store double %call, ptr %arrayidx, align 4
854 %iv.next = add nuw nsw i64 %iv, 1
855 %exitcond = icmp eq i64 %iv.next, 1000
856 br i1 %exitcond, label %for.end, label %for.body
862 define void @exp2_f32_intrinsic(ptr nocapture %varray) {
863 ; CHECK-LABEL: @exp2_f32_intrinsic(
864 ; CHECK: [[TMP5:%.*]] = call <4 x float> @__svml_exp2f4(<4 x float> [[TMP4:%.*]])
871 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
872 %tmp = trunc i64 %iv to i32
873 %conv = sitofp i32 %tmp to float
874 %call = tail call float @llvm.exp2.f32(float %conv)
875 %arrayidx = getelementptr inbounds float, ptr %varray, i64 %iv
876 store float %call, ptr %arrayidx, align 4
877 %iv.next = add nuw nsw i64 %iv, 1
878 %exitcond = icmp eq i64 %iv.next, 1000
879 br i1 %exitcond, label %for.end, label %for.body
885 attributes #0 = { nounwind readnone }