1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer,dce -slp-threshold=-999 -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-apple-macosx10.8.0"
7 declare double @sin(double) nounwind willreturn
8 declare double @cos(double) nounwind willreturn
9 declare double @pow(double, double) nounwind willreturn
10 declare double @exp2(double) nounwind willreturn
11 declare double @sqrt(double) nounwind willreturn
12 declare i64 @round(i64) nounwind willreturn
15 define void @sin_libm(ptr %a, ptr %b) {
16 ; CHECK-LABEL: @sin_libm(
17 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
18 ; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP2]])
19 ; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[B:%.*]], align 8
20 ; CHECK-NEXT: ret void
22 %a0 = load double, ptr %a, align 8
23 %idx1 = getelementptr inbounds double, ptr %a, i64 1
24 %a1 = load double, ptr %idx1, align 8
25 %sin1 = tail call double @sin(double %a0) nounwind readnone
26 %sin2 = tail call double @sin(double %a1) nounwind readnone
27 store double %sin1, ptr %b, align 8
28 %idx2 = getelementptr inbounds double, ptr %b, i64 1
29 store double %sin2, ptr %idx2, align 8
33 define void @cos_libm(ptr %a, ptr %b) {
34 ; CHECK-LABEL: @cos_libm(
35 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
36 ; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.cos.v2f64(<2 x double> [[TMP2]])
37 ; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[B:%.*]], align 8
38 ; CHECK-NEXT: ret void
40 %a0 = load double, ptr %a, align 8
41 %idx1 = getelementptr inbounds double, ptr %a, i64 1
42 %a1 = load double, ptr %idx1, align 8
43 %cos1 = tail call double @cos(double %a0) nounwind readnone
44 %cos2 = tail call double @cos(double %a1) nounwind readnone
45 store double %cos1, ptr %b, align 8
46 %idx2 = getelementptr inbounds double, ptr %b, i64 1
47 store double %cos2, ptr %idx2, align 8
51 define void @pow_libm(ptr %a, ptr %b) {
52 ; CHECK-LABEL: @pow_libm(
53 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
54 ; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.pow.v2f64(<2 x double> [[TMP2]], <2 x double> [[TMP2]])
55 ; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[B:%.*]], align 8
56 ; CHECK-NEXT: ret void
58 %a0 = load double, ptr %a, align 8
59 %idx1 = getelementptr inbounds double, ptr %a, i64 1
60 %a1 = load double, ptr %idx1, align 8
61 %pow1 = tail call double @pow(double %a0, double %a0) nounwind readnone
62 %pow2 = tail call double @pow(double %a1, double %a1) nounwind readnone
63 store double %pow1, ptr %b, align 8
64 %idx2 = getelementptr inbounds double, ptr %b, i64 1
65 store double %pow2, ptr %idx2, align 8
69 define void @exp_libm(ptr %a, ptr %b) {
70 ; CHECK-LABEL: @exp_libm(
71 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
72 ; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.exp2.v2f64(<2 x double> [[TMP2]])
73 ; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[B:%.*]], align 8
74 ; CHECK-NEXT: ret void
76 %a0 = load double, ptr %a, align 8
77 %idx1 = getelementptr inbounds double, ptr %a, i64 1
78 %a1 = load double, ptr %idx1, align 8
79 %exp1 = tail call double @exp2(double %a0) nounwind readnone
80 %exp2 = tail call double @exp2(double %a1) nounwind readnone
81 store double %exp1, ptr %b, align 8
82 %idx2 = getelementptr inbounds double, ptr %b, i64 1
83 store double %exp2, ptr %idx2, align 8
87 ; No fast-math-flags are required to convert sqrt library calls to an intrinsic.
88 ; We just need to know that errno is not set (readnone).
90 define void @sqrt_libm_no_errno(ptr %a, ptr %b) {
91 ; CHECK-LABEL: @sqrt_libm_no_errno(
92 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
93 ; CHECK-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP2]])
94 ; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[B:%.*]], align 8
95 ; CHECK-NEXT: ret void
97 %a0 = load double, ptr %a, align 8
98 %idx1 = getelementptr inbounds double, ptr %a, i64 1
99 %a1 = load double, ptr %idx1, align 8
100 %sqrt1 = tail call double @sqrt(double %a0) nounwind readnone
101 %sqrt2 = tail call double @sqrt(double %a1) nounwind readnone
102 store double %sqrt1, ptr %b, align 8
103 %idx2 = getelementptr inbounds double, ptr %b, i64 1
104 store double %sqrt2, ptr %idx2, align 8
108 ; The sqrt intrinsic does not set errno, but a non-constant sqrt call might, so this can't vectorize.
109 ; The nnan on the call does not matter because there's no guarantee in the C standard that a negative
110 ; input would result in a nan output ("On a domain error, the function returns an
111 ; implementation-defined value.")
113 define void @sqrt_libm_errno(ptr %a, ptr %b) {
114 ; CHECK-LABEL: @sqrt_libm_errno(
115 ; CHECK-NEXT: [[A0:%.*]] = load double, ptr [[A:%.*]], align 8
116 ; CHECK-NEXT: [[IDX1:%.*]] = getelementptr inbounds double, ptr [[A]], i64 1
117 ; CHECK-NEXT: [[A1:%.*]] = load double, ptr [[IDX1]], align 8
118 ; CHECK-NEXT: [[SQRT1:%.*]] = tail call nnan double @sqrt(double [[A0]]) #[[ATTR3:[0-9]+]]
119 ; CHECK-NEXT: [[SQRT2:%.*]] = tail call nnan double @sqrt(double [[A1]]) #[[ATTR3]]
120 ; CHECK-NEXT: store double [[SQRT1]], ptr [[B:%.*]], align 8
121 ; CHECK-NEXT: [[IDX2:%.*]] = getelementptr inbounds double, ptr [[B]], i64 1
122 ; CHECK-NEXT: store double [[SQRT2]], ptr [[IDX2]], align 8
123 ; CHECK-NEXT: ret void
125 %a0 = load double, ptr %a, align 8
126 %idx1 = getelementptr inbounds double, ptr %a, i64 1
127 %a1 = load double, ptr %idx1, align 8
128 %sqrt1 = tail call nnan double @sqrt(double %a0) nounwind
129 %sqrt2 = tail call nnan double @sqrt(double %a1) nounwind
130 store double %sqrt1, ptr %b, align 8
131 %idx2 = getelementptr inbounds double, ptr %b, i64 1
132 store double %sqrt2, ptr %idx2, align 8
137 define void @round_custom(ptr %a, ptr %b) {
138 ; CHECK-LABEL: @round_custom(
139 ; CHECK-NEXT: [[A0:%.*]] = load i64, ptr [[A:%.*]], align 8
140 ; CHECK-NEXT: [[IDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 1
141 ; CHECK-NEXT: [[A1:%.*]] = load i64, ptr [[IDX1]], align 8
142 ; CHECK-NEXT: [[ROUND1:%.*]] = tail call i64 @round(i64 [[A0]]) #[[ATTR4:[0-9]+]]
143 ; CHECK-NEXT: [[ROUND2:%.*]] = tail call i64 @round(i64 [[A1]]) #[[ATTR4]]
144 ; CHECK-NEXT: store i64 [[ROUND1]], ptr [[B:%.*]], align 8
145 ; CHECK-NEXT: [[IDX2:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 1
146 ; CHECK-NEXT: store i64 [[ROUND2]], ptr [[IDX2]], align 8
147 ; CHECK-NEXT: ret void
149 %a0 = load i64, ptr %a, align 8
150 %idx1 = getelementptr inbounds i64, ptr %a, i64 1
151 %a1 = load i64, ptr %idx1, align 8
152 %round1 = tail call i64 @round(i64 %a0) nounwind readnone
153 %round2 = tail call i64 @round(i64 %a1) nounwind readnone
154 store i64 %round1, ptr %b, align 8
155 %idx2 = getelementptr inbounds i64, ptr %b, i64 1
156 store i64 %round2, ptr %idx2, align 8