1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
3 ; RUN: opt < %s -S -debug -passes=loop-idiom 2>&1 | FileCheck %s
4 ; The C code to generate this testcase:
5 ; void test(int *ar, int n, int m)
8 ; for (i=0; i<n; ++i) {
9 ; int *arr = ar + i * m;
10 ; memset(arr, 0, i + m * sizeof(int));
14 ; Check on debug outputs...
15 ; CHECK: loop-idiom Scanning: F[MemsetSize_LoopVariant] Countable Loop %for.body
16 ; CHECK-NEXT: memset size is non-constant
17 ; CHECK-NEXT: memset size is not a loop-invariant, abort
18 ; CHECK: loop-idiom Scanning: F[MemsetSize_Stride_Mismatch] Countable Loop %for.body
19 ; CHECK-NEXT: memset size is non-constant
20 ; CHECK-NEXT: MemsetSizeSCEV: (4 * (sext i32 %m to i64))<nsw>
21 ; CHECK-NEXT: PositiveStrideSCEV: (4 + (4 * (sext i32 %m to i64))<nsw>)<nsw>
22 ; CHECK-NEXT: SCEV don't match, abort
23 ; CHECK: loop-idiom Scanning: F[NonZeroAddressSpace] Countable Loop %for.cond1.preheader
24 ; CHECK-NEXT: memset size is non-constant
25 ; CHECK-NEXT: pointer is not in address space zero, abort
26 ; CHECK: loop-idiom Scanning: F[NonAffinePointer] Countable Loop %for.body
27 ; CHECK-NEXT: Pointer is not affine, abort
29 define void @MemsetSize_LoopVariant(i32* %ar, i32 %n, i32 %m) {
30 ; CHECK-LABEL: @MemsetSize_LoopVariant(
32 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
33 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
34 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
35 ; CHECK: for.body.lr.ph:
36 ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
37 ; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[M]] to i64
38 ; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4
39 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
41 ; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
42 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
43 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR:%.*]], i64 [[MUL]]
44 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ADD_PTR]] to i8*
45 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[I_02]], [[MUL3]]
46 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[ADD]], i1 false)
47 ; CHECK-NEXT: br label [[FOR_INC]]
49 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
50 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INC]], [[CONV]]
51 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
52 ; CHECK: for.cond.for.end_crit_edge:
53 ; CHECK-NEXT: br label [[FOR_END]]
55 ; CHECK-NEXT: ret void
58 %conv = sext i32 %n to i64
59 %cmp1 = icmp slt i64 0, %conv
60 br i1 %cmp1, label %for.body.lr.ph, label %for.end
62 for.body.lr.ph: ; preds = %entry
63 %conv1 = sext i32 %m to i64
64 %conv2 = sext i32 %m to i64
65 %mul3 = mul i64 %conv2, 4
68 for.body: ; preds = %for.body.lr.ph, %for.inc
69 %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
70 %mul = mul nsw i64 %i.02, %conv1
71 %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %mul
72 %0 = bitcast i32* %add.ptr to i8*
73 %add = add nsw i64 %i.02, %mul3
74 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %add, i1 false)
77 for.inc: ; preds = %for.body
78 %inc = add nuw nsw i64 %i.02, 1
79 %cmp = icmp slt i64 %inc, %conv
80 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
82 for.cond.for.end_crit_edge: ; preds = %for.inc
85 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
88 ; void test(int *ar, int n, int m)
91 ; for (i=0; i<n; ++i) {
92 ; int *arr = ar + i + i * m;
93 ; memset(arr, 0, m * sizeof(int));
96 define void @MemsetSize_Stride_Mismatch(i32* %ar, i32 %n, i32 %m) {
97 ; CHECK-LABEL: @MemsetSize_Stride_Mismatch(
99 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
100 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
101 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
102 ; CHECK: for.body.lr.ph:
103 ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
104 ; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[M]] to i64
105 ; CHECK-NEXT: [[MUL4:%.*]] = mul i64 [[CONV3]], 4
106 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
108 ; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
109 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR:%.*]], i64 [[I_02]]
110 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
111 ; CHECK-NEXT: [[ADD_PTR2:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR]], i64 [[MUL]]
112 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ADD_PTR2]] to i8*
113 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[MUL4]], i1 false)
114 ; CHECK-NEXT: br label [[FOR_INC]]
116 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
117 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INC]], [[CONV]]
118 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
119 ; CHECK: for.cond.for.end_crit_edge:
120 ; CHECK-NEXT: br label [[FOR_END]]
122 ; CHECK-NEXT: ret void
125 %conv = sext i32 %n to i64
126 %cmp1 = icmp slt i64 0, %conv
127 br i1 %cmp1, label %for.body.lr.ph, label %for.end
129 for.body.lr.ph: ; preds = %entry
130 %conv1 = sext i32 %m to i64
131 %conv3 = sext i32 %m to i64
132 %mul4 = mul i64 %conv3, 4
135 for.body: ; preds = %for.body.lr.ph, %for.inc
136 %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
137 %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %i.02
138 %mul = mul nsw i64 %i.02, %conv1
139 %add.ptr2 = getelementptr inbounds i32, i32* %add.ptr, i64 %mul
140 %0 = bitcast i32* %add.ptr2 to i8*
141 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul4, i1 false)
144 for.inc: ; preds = %for.body
145 %inc = add nuw nsw i64 %i.02, 1
146 %cmp = icmp slt i64 %inc, %conv
147 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
149 for.cond.for.end_crit_edge: ; preds = %for.inc
152 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
156 define void @NonZeroAddressSpace(i32 addrspace(2)* nocapture %ar, i64 %n, i64 %m) {
157 ; CHECK-LABEL: @NonZeroAddressSpace(
159 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[M:%.*]], 2
160 ; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]]
161 ; CHECK: for.cond1.preheader:
162 ; CHECK-NEXT: [[I_017:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC5:%.*]], [[FOR_INC4:%.*]] ]
163 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[M]], [[I_017]]
164 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32 addrspace(2)* [[AR:%.*]], i64 [[TMP1]]
165 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = bitcast i32 addrspace(2)* [[SCEVGEP]] to i8 addrspace(2)*
166 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_017]], [[M]]
167 ; CHECK-NEXT: call void @llvm.memset.p2i8.i64(i8 addrspace(2)* align 4 [[SCEVGEP1]], i8 0, i64 [[TMP0]], i1 false)
168 ; CHECK-NEXT: br label [[FOR_INC4]]
170 ; CHECK-NEXT: [[INC5]] = add nuw nsw i64 [[I_017]], 1
171 ; CHECK-NEXT: [[EXITCOND18_NOT:%.*]] = icmp eq i64 [[INC5]], [[N:%.*]]
172 ; CHECK-NEXT: br i1 [[EXITCOND18_NOT]], label [[FOR_END6:%.*]], label [[FOR_COND1_PREHEADER]]
174 ; CHECK-NEXT: ret void
177 %0 = shl nuw i64 %m, 2
178 br label %for.cond1.preheader
180 for.cond1.preheader: ; preds = %for.inc4, %entry
181 %i.017 = phi i64 [ 0, %entry ], [ %inc5, %for.inc4 ]
182 %1 = mul i64 %m, %i.017
183 %scevgep = getelementptr i32, i32 addrspace(2)* %ar, i64 %1
184 %scevgep1 = bitcast i32 addrspace(2)* %scevgep to i8 addrspace(2)*
185 %mul = mul nsw i64 %i.017, %m
186 call void @llvm.memset.p2i8.i64(i8 addrspace(2)* align 4 %scevgep1, i8 0, i64 %0, i1 false)
189 for.inc4: ; preds = %for.cond1.preheader
190 %inc5 = add nuw nsw i64 %i.017, 1
191 %exitcond18.not = icmp eq i64 %inc5, %n
192 br i1 %exitcond18.not, label %for.end6, label %for.cond1.preheader
194 for.end6: ; preds = %for.inc4
198 ; void test(int *ar, int n, int m)
201 ; for (i=0; i<n; ++i) {
202 ; int *arr = ar + i * m;
203 ; memset(arr, 0, m * sizeof(int));
207 define void @NonAffinePointer(i32* %ar, i32 %n, i32 %m) {
208 ; CHECK-LABEL: @NonAffinePointer(
210 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
211 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
212 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
213 ; CHECK: for.body.lr.ph:
214 ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
215 ; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[M]] to i64
216 ; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4
217 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
219 ; CHECK-NEXT: [[AR_ADDR_03:%.*]] = phi i32* [ [[AR:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC:%.*]] ]
220 ; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
221 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
222 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR_ADDR_03]], i64 [[MUL]]
223 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ADD_PTR]] to i8*
224 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[MUL3]], i1 false)
225 ; CHECK-NEXT: [[ADD_PTR4]] = getelementptr inbounds i32, i32* [[AR_ADDR_03]], i64 [[I_02]]
226 ; CHECK-NEXT: br label [[FOR_INC]]
228 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_02]], 1
229 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INC]], [[CONV]]
230 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
231 ; CHECK: for.cond.for.end_crit_edge:
232 ; CHECK-NEXT: br label [[FOR_END]]
234 ; CHECK-NEXT: ret void
237 %conv = sext i32 %n to i64
238 %cmp1 = icmp slt i64 0, %conv
239 br i1 %cmp1, label %for.body.lr.ph, label %for.end
241 for.body.lr.ph: ; preds = %entry
242 %conv1 = sext i32 %m to i64
243 %conv2 = sext i32 %m to i64
244 %mul3 = mul i64 %conv2, 4
247 for.body: ; preds = %for.body.lr.ph, %for.inc
248 %ar.addr.03 = phi i32* [ %ar, %for.body.lr.ph ], [ %add.ptr4, %for.inc ]
249 %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
250 %mul = mul nsw i64 %i.02, %conv1
251 %add.ptr = getelementptr inbounds i32, i32* %ar.addr.03, i64 %mul
252 %0 = bitcast i32* %add.ptr to i8*
253 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul3, i1 false)
254 %add.ptr4 = getelementptr inbounds i32, i32* %ar.addr.03, i64 %i.02
257 for.inc: ; preds = %for.body
258 %inc = add nuw nsw i64 %i.02, 1
259 %cmp = icmp slt i64 %inc, %conv
260 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
262 for.cond.for.end_crit_edge: ; preds = %for.inc
265 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
269 declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
270 declare void @llvm.memset.p2i8.i64(i8 addrspace(2)* nocapture writeonly, i8, i64, i1 immarg)