1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes='default<O3>' -S %s | FileCheck %s
4 target triple = "arm64-apple-darwin"
6 ; Make sure we can vectorize a loop that uses a function to clamp a double to
7 ; be between a given minimum and maximum value.
9 define internal double @clamp(double %v) {
11 %retval = alloca double, align 8
12 %v.addr = alloca double, align 8
13 store double %v, double* %v.addr, align 8
14 %0 = load double, double* %v.addr, align 8
15 %cmp = fcmp olt double %0, 0.000000e+00
16 br i1 %cmp, label %if.then, label %if.end
18 if.then: ; preds = %entry
19 store double 0.000000e+00, double* %retval, align 8
22 if.end: ; preds = %entry
23 %1 = load double, double* %v.addr, align 8
24 %cmp1 = fcmp ogt double %1, 6.000000e+00
25 br i1 %cmp1, label %if.then2, label %if.end3
27 if.then2: ; preds = %if.end
28 store double 6.000000e+00, double* %retval, align 8
31 if.end3: ; preds = %if.end
32 %2 = load double, double* %v.addr, align 8
33 store double %2, double* %retval, align 8
36 return: ; preds = %if.end3, %if.then2, %if.then
37 %3 = load double, double* %retval, align 8
41 define void @loop(double* %X, double* %Y) {
44 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, double* [[X:%.*]], i64 20000
45 ; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr double, double* [[Y:%.*]], i64 20000
46 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt double* [[SCEVGEP9]], [[X]]
47 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt double* [[SCEVGEP]], [[Y]]
48 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
49 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]]
51 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
52 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[INDEX]] to i64
53 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, double* [[Y]], i64 [[TMP0]]
54 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[TMP1]] to <2 x double>*
55 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8, !alias.scope !0
56 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, double* [[TMP1]], i64 2
57 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[TMP3]] to <2 x double>*
58 ; CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 8, !alias.scope !0
59 ; CHECK-NEXT: [[TMP5:%.*]] = fcmp olt <2 x double> [[WIDE_LOAD]], zeroinitializer
60 ; CHECK-NEXT: [[TMP6:%.*]] = fcmp olt <2 x double> [[WIDE_LOAD11]], zeroinitializer
61 ; CHECK-NEXT: [[TMP7:%.*]] = fcmp ogt <2 x double> [[WIDE_LOAD]], <double 6.000000e+00, double 6.000000e+00>
62 ; CHECK-NEXT: [[TMP8:%.*]] = fcmp ogt <2 x double> [[WIDE_LOAD11]], <double 6.000000e+00, double 6.000000e+00>
63 ; CHECK-NEXT: [[TMP9:%.*]] = select <2 x i1> [[TMP7]], <2 x double> <double 6.000000e+00, double 6.000000e+00>, <2 x double> [[WIDE_LOAD]]
64 ; CHECK-NEXT: [[TMP10:%.*]] = select <2 x i1> [[TMP8]], <2 x double> <double 6.000000e+00, double 6.000000e+00>, <2 x double> [[WIDE_LOAD11]]
65 ; CHECK-NEXT: [[TMP11:%.*]] = select <2 x i1> [[TMP5]], <2 x double> zeroinitializer, <2 x double> [[TMP9]]
66 ; CHECK-NEXT: [[TMP12:%.*]] = select <2 x i1> [[TMP6]], <2 x double> zeroinitializer, <2 x double> [[TMP10]]
67 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds double, double* [[X]], i64 [[TMP0]]
68 ; CHECK-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to <2 x double>*
69 ; CHECK-NEXT: store <2 x double> [[TMP11]], <2 x double>* [[TMP14]], align 8, !alias.scope !3, !noalias !0
70 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, double* [[TMP13]], i64 2
71 ; CHECK-NEXT: [[TMP16:%.*]] = bitcast double* [[TMP15]] to <2 x double>*
72 ; CHECK-NEXT: store <2 x double> [[TMP12]], <2 x double>* [[TMP16]], align 8, !alias.scope !3, !noalias !0
73 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
74 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], 20000
75 ; CHECK-NEXT: br i1 [[TMP17]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
76 ; CHECK: for.cond.cleanup:
77 ; CHECK-NEXT: ret void
79 ; CHECK-NEXT: [[I_05:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
80 ; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[I_05]] to i64
81 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[Y]], i64 [[IDXPROM]]
82 ; CHECK-NEXT: [[TMP18:%.*]] = load double, double* [[ARRAYIDX]], align 8
83 ; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt double [[TMP18]], 0.000000e+00
84 ; CHECK-NEXT: [[CMP1_I:%.*]] = fcmp ogt double [[TMP18]], 6.000000e+00
85 ; CHECK-NEXT: [[DOTV_I:%.*]] = select i1 [[CMP1_I]], double 6.000000e+00, double [[TMP18]]
86 ; CHECK-NEXT: [[RETVAL_0_I:%.*]] = select i1 [[CMP_I]], double 0.000000e+00, double [[DOTV_I]]
87 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[X]], i64 [[IDXPROM]]
88 ; CHECK-NEXT: store double [[RETVAL_0_I]], double* [[ARRAYIDX2]], align 8
89 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_05]], 1
90 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[I_05]], 19999
91 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]], !llvm.loop [[LOOP7:![0-9]+]]
94 %X.addr = alloca double*, align 8
95 %Y.addr = alloca double*, align 8
96 %i = alloca i32, align 4
97 store double* %X, double** %X.addr, align 8
98 store double* %Y, double** %Y.addr, align 8
99 %0 = bitcast i32* %i to i8*
100 call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #2
101 store i32 0, i32* %i, align 4
104 for.cond: ; preds = %for.inc, %entry
105 %1 = load i32, i32* %i, align 4
106 %cmp = icmp ult i32 %1, 20000
107 br i1 %cmp, label %for.body, label %for.cond.cleanup
109 for.cond.cleanup: ; preds = %for.cond
110 %2 = bitcast i32* %i to i8*
111 call void @llvm.lifetime.end.p0i8(i64 4, i8* %2) #2
114 for.body: ; preds = %for.cond
115 %3 = load double*, double** %Y.addr, align 8
116 %4 = load i32, i32* %i, align 4
117 %idxprom = zext i32 %4 to i64
118 %arrayidx = getelementptr inbounds double, double* %3, i64 %idxprom
119 %5 = load double, double* %arrayidx, align 8
120 %call = call double @clamp(double %5)
121 %6 = load double*, double** %X.addr, align 8
122 %7 = load i32, i32* %i, align 4
123 %idxprom1 = zext i32 %7 to i64
124 %arrayidx2 = getelementptr inbounds double, double* %6, i64 %idxprom1
125 store double %call, double* %arrayidx2, align 8
128 for.inc: ; preds = %for.body
129 %8 = load i32, i32* %i, align 4
131 store i32 %inc, i32* %i, align 4
134 for.end: ; preds = %for.cond.cleanup
138 ; Test that requires sinking/hoisting of instructions for vectorization.
140 define void @loop2(float* %A, float* %B, i32* %C, float %x) {
141 ; CHECK-LABEL: @loop2(
143 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr float, float* [[B:%.*]], i64 10000
144 ; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i32, i32* [[C:%.*]], i64 10000
145 ; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr float, float* [[A:%.*]], i64 10000
146 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SCEVGEP6]] to float*
147 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt float* [[TMP0]], [[B]]
148 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[SCEVGEP]] to i32*
149 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[TMP1]], [[C]]
150 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
151 ; CHECK-NEXT: [[BOUND011:%.*]] = icmp ugt float* [[SCEVGEP9]], [[B]]
152 ; CHECK-NEXT: [[BOUND112:%.*]] = icmp ugt float* [[SCEVGEP]], [[A]]
153 ; CHECK-NEXT: [[FOUND_CONFLICT13:%.*]] = and i1 [[BOUND011]], [[BOUND112]]
154 ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT13]]
155 ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label [[LOOP_BODY:%.*]], label [[VECTOR_PH:%.*]]
157 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[X:%.*]], i32 0
158 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
159 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
160 ; CHECK: vector.body:
161 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
162 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[INDEX]]
163 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
164 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4, !alias.scope !8
165 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], <i32 20, i32 20, i32 20, i32 20>
166 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDEX]]
167 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[TMP5]] to <4 x float>*
168 ; CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, <4 x float>* [[TMP6]], align 4, !alias.scope !11
169 ; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[WIDE_LOAD14]], [[BROADCAST_SPLAT]]
170 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDEX]]
171 ; CHECK-NEXT: [[TMP9:%.*]] = bitcast float* [[TMP8]] to <4 x float>*
172 ; CHECK-NEXT: [[WIDE_LOAD15:%.*]] = load <4 x float>, <4 x float>* [[TMP9]], align 4, !alias.scope !13, !noalias !15
173 ; CHECK-NEXT: [[TMP10:%.*]] = fadd <4 x float> [[TMP7]], [[WIDE_LOAD15]]
174 ; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x float> [[TMP7]], <4 x float> [[TMP10]]
175 ; CHECK-NEXT: [[TMP11:%.*]] = bitcast float* [[TMP8]] to <4 x float>*
176 ; CHECK-NEXT: store <4 x float> [[PREDPHI]], <4 x float>* [[TMP11]], align 4, !alias.scope !13, !noalias !15
177 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
178 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
179 ; CHECK-NEXT: br i1 [[TMP12]], label [[EXIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
181 ; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ], [ 0, [[ENTRY:%.*]] ]
182 ; CHECK-NEXT: [[C_GEP:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[IV1]]
183 ; CHECK-NEXT: [[C_LV:%.*]] = load i32, i32* [[C_GEP]], align 4
184 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[C_LV]], 20
185 ; CHECK-NEXT: [[A_GEP_0:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[IV1]]
186 ; CHECK-NEXT: [[A_LV_0:%.*]] = load float, float* [[A_GEP_0]], align 4
187 ; CHECK-NEXT: [[MUL2_I81_I:%.*]] = fmul float [[A_LV_0]], [[X]]
188 ; CHECK-NEXT: [[B_GEP_0:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[IV1]]
189 ; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_LATCH]], label [[ELSE:%.*]]
191 ; CHECK-NEXT: [[B_LV:%.*]] = load float, float* [[B_GEP_0]], align 4
192 ; CHECK-NEXT: [[ADD:%.*]] = fadd float [[MUL2_I81_I]], [[B_LV]]
193 ; CHECK-NEXT: br label [[LOOP_LATCH]]
195 ; CHECK-NEXT: [[ADD_SINK:%.*]] = phi float [ [[ADD]], [[ELSE]] ], [ [[MUL2_I81_I]], [[LOOP_BODY]] ]
196 ; CHECK-NEXT: store float [[ADD_SINK]], float* [[B_GEP_0]], align 4
197 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
198 ; CHECK-NEXT: [[CMP_0:%.*]] = icmp ult i64 [[IV1]], 9999
199 ; CHECK-NEXT: br i1 [[CMP_0]], label [[LOOP_BODY]], label [[EXIT]], !llvm.loop [[LOOP17:![0-9]+]]
201 ; CHECK-NEXT: ret void
204 br label %loop.header
207 %iv = phi i64 [ %iv.next, %loop.latch ], [ 0, %entry ]
208 %cmp.0 = icmp ult i64 %iv, 10000
209 br i1 %cmp.0, label %loop.body, label %exit
212 %C.gep = getelementptr inbounds i32, i32* %C, i64 %iv
213 %C.lv = load i32, i32* %C.gep
214 %cmp = icmp eq i32 %C.lv, 20
215 br i1 %cmp, label %then, label %else
218 %A.gep.0 = getelementptr inbounds float, float* %A, i64 %iv
219 %A.lv.0 = load float, float* %A.gep.0, align 4
220 %mul2.i81.i = fmul float %A.lv.0, %x
221 %B.gep.0 = getelementptr inbounds float, float* %B, i64 %iv
222 store float %mul2.i81.i, float* %B.gep.0, align 4
226 %A.gep.1 = getelementptr inbounds float, float* %A, i64 %iv
227 %A.lv.1 = load float, float* %A.gep.1, align 4
228 %mul2 = fmul float %A.lv.1, %x
229 %B.gep.1 = getelementptr inbounds float, float* %B, i64 %iv
230 %B.lv = load float, float* %B.gep.1, align 4
231 %add = fadd float %mul2, %B.lv
232 store float %add, float* %B.gep.1, align 4
236 %iv.next = add nuw nsw i64 %iv, 1
237 br label %loop.header
243 declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
245 declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)