1 ; RUN: opt -objc-arc -S < %s | FileCheck %s
3 %struct.__objcFastEnumerationState = type { i64, i8**, i64*, [5 x i64] }
5 @"\01L_OBJC_METH_VAR_NAME_" = internal global [43 x i8] c"countByEnumeratingWithState:objects:count:\00", section "__TEXT,__objc_methname,cstring_literals", align 1
6 @"\01L_OBJC_SELECTOR_REFERENCES_" = internal global i8* getelementptr inbounds ([43 x i8], [43 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i64 0, i64 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
7 @g = common global i8* null, align 8
8 @"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] [i32 0, i32 16], section "__DATA, __objc_imageinfo, regular, no_dead_strip"
10 declare void @callee()
11 declare i8* @returner()
12 declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
13 declare i8* @llvm.objc.retain(i8*)
14 declare void @llvm.objc.enumerationMutation(i8*)
15 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
16 declare i8* @llvm.objc.msgSend(i8*, i8*, ...) nonlazybind
17 declare void @use(i8*)
18 declare void @llvm.objc.release(i8*)
20 declare void @__crasher_block_invoke(i8* nocapture)
21 declare i8* @llvm.objc.retainBlock(i8*)
22 declare void @__crasher_block_invoke1(i8* nocapture)
26 ; Delete a nested retain+release pair.
28 ; CHECK-LABEL: define void @test0(
29 ; CHECK: call i8* @llvm.objc.retain
30 ; CHECK-NOT: @llvm.objc.retain
32 define void @test0(i8* %a) nounwind {
34 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
35 %items.ptr = alloca [16 x i8*], align 8
36 %0 = call i8* @llvm.objc.retain(i8* %a) nounwind
37 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
38 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
39 %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
40 %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
41 %call = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
42 %iszero = icmp eq i64 %call, 0
43 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
46 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
47 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
48 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
49 %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
50 br label %forcoll.loopbody.outer
52 forcoll.loopbody.outer:
53 %forcoll.count.ph = phi i64 [ %call, %forcoll.loopinit ], [ %call6, %forcoll.refetch ]
54 %tmp7 = icmp ugt i64 %forcoll.count.ph, 1
55 %umax = select i1 %tmp7, i64 %forcoll.count.ph, i64 1
56 br label %forcoll.loopbody
59 %forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
60 %mutationsptr3 = load i64*, i64** %mutationsptr.ptr, align 8
61 %statemutations = load i64, i64* %mutationsptr3, align 8
62 %2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
63 br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
66 call void @llvm.objc.enumerationMutation(i8* %1)
67 br label %forcoll.notmutated
70 %stateitems = load i8**, i8*** %stateitems.ptr, align 8
71 %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
72 %3 = load i8*, i8** %currentitem.ptr, align 8
73 call void @use(i8* %3)
74 %4 = add i64 %forcoll.index, 1
75 %exitcond = icmp eq i64 %4, %umax
76 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
79 %tmp5 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
80 %call6 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp5, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
81 %5 = icmp eq i64 %call6, 0
82 br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
85 call void @llvm.objc.release(i8* %1) nounwind
86 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
90 ; Delete a nested retain+release pair.
92 ; CHECK-LABEL: define void @test2(
93 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
94 ; CHECK-NOT: @llvm.objc.retain
96 define void @test2() nounwind {
98 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
99 %items.ptr = alloca [16 x i8*], align 8
100 %call = call i8* @returner()
101 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
102 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
103 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
104 %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
105 %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
106 %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
107 %iszero = icmp eq i64 %call3, 0
108 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
111 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
112 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
113 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
114 %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
115 br label %forcoll.loopbody.outer
117 forcoll.loopbody.outer:
118 %forcoll.count.ph = phi i64 [ %call3, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
119 %tmp8 = icmp ugt i64 %forcoll.count.ph, 1
120 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1
121 br label %forcoll.loopbody
124 %forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
125 %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
126 %statemutations = load i64, i64* %mutationsptr4, align 8
127 %2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
128 br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
131 call void @llvm.objc.enumerationMutation(i8* %1)
132 br label %forcoll.notmutated
135 %stateitems = load i8**, i8*** %stateitems.ptr, align 8
136 %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
137 %3 = load i8*, i8** %currentitem.ptr, align 8
138 call void @use(i8* %3)
139 %4 = add i64 %forcoll.index, 1
140 %exitcond = icmp eq i64 %4, %umax
141 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
144 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
145 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
146 %5 = icmp eq i64 %call7, 0
147 br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
150 call void @llvm.objc.release(i8* %1) nounwind
151 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
155 ; Delete a nested retain+release pair.
157 ; CHECK-LABEL: define void @test4(
158 ; CHECK: call i8* @llvm.objc.retain
159 ; CHECK-NOT: @llvm.objc.retain
161 define void @test4() nounwind {
163 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
164 %items.ptr = alloca [16 x i8*], align 8
165 %tmp = load i8*, i8** @g, align 8
166 %0 = call i8* @llvm.objc.retain(i8* %tmp) nounwind
167 %tmp2 = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
168 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp2, i8 0, i64 64, i1 false)
169 %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
170 %tmp4 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
171 %call = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp4, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
172 %iszero = icmp eq i64 %call, 0
173 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
176 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
177 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
178 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
179 %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
180 br label %forcoll.loopbody.outer
182 forcoll.loopbody.outer:
183 %forcoll.count.ph = phi i64 [ %call, %forcoll.loopinit ], [ %call8, %forcoll.refetch ]
184 %tmp9 = icmp ugt i64 %forcoll.count.ph, 1
185 %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1
186 br label %forcoll.loopbody
189 %forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
190 %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
191 %statemutations = load i64, i64* %mutationsptr5, align 8
192 %2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
193 br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
196 call void @llvm.objc.enumerationMutation(i8* %1)
197 br label %forcoll.notmutated
200 %stateitems = load i8**, i8*** %stateitems.ptr, align 8
201 %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
202 %3 = load i8*, i8** %currentitem.ptr, align 8
203 call void @use(i8* %3)
204 %4 = add i64 %forcoll.index, 1
205 %exitcond = icmp eq i64 %4, %umax
206 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
209 %tmp7 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
210 %call8 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp7, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
211 %5 = icmp eq i64 %call8, 0
212 br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
215 call void @llvm.objc.release(i8* %1) nounwind
216 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
220 ; Delete a nested retain+release pair.
222 ; CHECK-LABEL: define void @test5(
223 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
224 ; CHECK-NOT: @llvm.objc.retain
226 define void @test5() nounwind {
228 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
229 %items.ptr = alloca [16 x i8*], align 8
230 %call = call i8* @returner()
231 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
232 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
233 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
234 %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
235 %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
236 %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
237 %iszero = icmp eq i64 %call3, 0
238 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
241 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
242 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
243 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
244 %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
245 br label %forcoll.loopbody.outer
247 forcoll.loopbody.outer:
248 %forcoll.count.ph = phi i64 [ %call3, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
249 %tmp8 = icmp ugt i64 %forcoll.count.ph, 1
250 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1
251 br label %forcoll.loopbody
254 %forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
255 %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
256 %statemutations = load i64, i64* %mutationsptr4, align 8
257 %2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
258 br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
261 call void @llvm.objc.enumerationMutation(i8* %1)
262 br label %forcoll.notmutated
265 %stateitems = load i8**, i8*** %stateitems.ptr, align 8
266 %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
267 %3 = load i8*, i8** %currentitem.ptr, align 8
268 call void @use(i8* %3)
269 %4 = add i64 %forcoll.index, 1
270 %exitcond = icmp eq i64 %4, %umax
271 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
274 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
275 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
276 %5 = icmp eq i64 %call7, 0
277 br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
280 call void @llvm.objc.release(i8* %1) nounwind
281 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
285 ; We handle this now due to the fact that a release just needs a post dominating
288 ; CHECK-LABEL: define void @test6(
289 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
290 ; CHECK-NOT: @llvm.objc.retain
292 define void @test6() nounwind {
294 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
295 %items.ptr = alloca [16 x i8*], align 8
296 %call = call i8* @returner()
297 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
298 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
299 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
300 %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
301 %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
302 %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
303 %iszero = icmp eq i64 %call3, 0
304 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
307 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
308 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
309 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
310 %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
311 br label %forcoll.loopbody.outer
313 forcoll.loopbody.outer:
314 %forcoll.count.ph = phi i64 [ %call3, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
315 %tmp8 = icmp ugt i64 %forcoll.count.ph, 1
316 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1
317 br label %forcoll.loopbody
320 %forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
321 %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
322 %statemutations = load i64, i64* %mutationsptr4, align 8
323 %2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
324 br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
327 call void @llvm.objc.enumerationMutation(i8* %1)
328 br label %forcoll.notmutated
331 %stateitems = load i8**, i8*** %stateitems.ptr, align 8
332 %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
333 %3 = load i8*, i8** %currentitem.ptr, align 8
334 call void @use(i8* %3)
335 %4 = add i64 %forcoll.index, 1
336 %exitcond = icmp eq i64 %4, %umax
337 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
340 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
341 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
342 %5 = icmp eq i64 %call7, 0
343 br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
346 call void @llvm.objc.release(i8* %1) nounwind
348 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
352 ; TODO: Delete a nested retain+release pair.
353 ; The optimizer currently can't do this, because isn't isn't sophisticated enough in
354 ; reasnoning about nesting.
356 ; CHECK-LABEL: define void @test7(
357 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
358 ; CHECK: @llvm.objc.retain
360 define void @test7() nounwind {
362 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
363 %items.ptr = alloca [16 x i8*], align 8
364 %call = call i8* @returner()
365 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
367 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
368 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
369 %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
370 %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
371 %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
372 %iszero = icmp eq i64 %call3, 0
373 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
376 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
377 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
378 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
379 %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
380 br label %forcoll.loopbody.outer
382 forcoll.loopbody.outer:
383 %forcoll.count.ph = phi i64 [ %call3, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
384 %tmp8 = icmp ugt i64 %forcoll.count.ph, 1
385 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1
386 br label %forcoll.loopbody
389 %forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
390 %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
391 %statemutations = load i64, i64* %mutationsptr4, align 8
392 %2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
393 br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
396 call void @llvm.objc.enumerationMutation(i8* %1)
397 br label %forcoll.notmutated
400 %stateitems = load i8**, i8*** %stateitems.ptr, align 8
401 %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
402 %3 = load i8*, i8** %currentitem.ptr, align 8
403 call void @use(i8* %3)
404 %4 = add i64 %forcoll.index, 1
405 %exitcond = icmp eq i64 %4, %umax
406 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
409 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
410 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
411 %5 = icmp eq i64 %call7, 0
412 br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
415 call void @llvm.objc.release(i8* %1) nounwind
417 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
421 ; Delete a nested retain+release pair.
423 ; CHECK-LABEL: define void @test8(
424 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
425 ; CHECK-NOT: @llvm.objc.retain
427 define void @test8() nounwind {
429 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
430 %items.ptr = alloca [16 x i8*], align 8
431 %call = call i8* @returner()
432 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
433 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
434 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
435 %1 = call i8* @llvm.objc.retain(i8* %0) nounwind
436 %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
437 %call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
438 %iszero = icmp eq i64 %call3, 0
439 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
442 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
443 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
444 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
445 %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
446 br label %forcoll.loopbody.outer
448 forcoll.loopbody.outer:
449 %forcoll.count.ph = phi i64 [ %call3, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
450 %tmp8 = icmp ugt i64 %forcoll.count.ph, 1
451 %umax = select i1 %tmp8, i64 %forcoll.count.ph, i64 1
452 br label %forcoll.loopbody
455 %forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.next ]
456 %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
457 %statemutations = load i64, i64* %mutationsptr4, align 8
458 %2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
459 br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
462 call void @llvm.objc.enumerationMutation(i8* %1)
463 br label %forcoll.notmutated
466 %stateitems = load i8**, i8*** %stateitems.ptr, align 8
467 %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
468 %3 = load i8*, i8** %currentitem.ptr, align 8
469 %tobool = icmp eq i8* %3, null
470 br i1 %tobool, label %forcoll.next, label %if.then
474 br label %forcoll.next
477 %4 = add i64 %forcoll.index, 1
478 %exitcond = icmp eq i64 %4, %umax
479 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
482 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
483 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
484 %5 = icmp eq i64 %call7, 0
485 br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
488 call void @llvm.objc.release(i8* %1) nounwind
489 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
493 ; TODO: Delete a nested retain+release pair.
494 ; The optimizer currently can't do this, because of a split loop backedge.
495 ; See test9b for the same testcase without a split backedge.
497 ; CHECK-LABEL: define void @test9(
498 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
499 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
500 ; CHECK: call i8* @llvm.objc.retain
502 define void @test9() nounwind {
504 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
505 %items.ptr = alloca [16 x i8*], align 8
506 %call = call i8* @returner()
507 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
508 %call1 = call i8* @returner()
509 %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
510 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
511 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
512 %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
513 %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
514 %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
515 %iszero = icmp eq i64 %call4, 0
516 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
519 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
520 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
521 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
522 br label %forcoll.loopbody.outer
524 forcoll.loopbody.outer:
525 %forcoll.count.ph = phi i64 [ %call4, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
526 %tmp9 = icmp ugt i64 %forcoll.count.ph, 1
527 %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1
528 br label %forcoll.loopbody
531 %forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated.forcoll.loopbody_crit_edge ], [ 1, %forcoll.loopbody.outer ]
532 %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
533 %statemutations = load i64, i64* %mutationsptr5, align 8
534 %3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
535 br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
538 call void @llvm.objc.enumerationMutation(i8* %2)
539 br label %forcoll.notmutated
542 %exitcond = icmp eq i64 %forcoll.index, %umax
543 br i1 %exitcond, label %forcoll.refetch, label %forcoll.notmutated.forcoll.loopbody_crit_edge
545 forcoll.notmutated.forcoll.loopbody_crit_edge:
546 %phitmp = add i64 %forcoll.index, 1
547 br label %forcoll.loopbody
550 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
551 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
552 %4 = icmp eq i64 %call7, 0
553 br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
556 call void @llvm.objc.release(i8* %2) nounwind
557 call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
558 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
562 ; Like test9, but without a split backedge. TODO: optimize this.
564 ; CHECK-LABEL: define void @test9b(
565 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
566 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
567 ; CHECK: @llvm.objc.retain
569 define void @test9b() nounwind {
571 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
572 %items.ptr = alloca [16 x i8*], align 8
573 %call = call i8* @returner()
574 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
575 %call1 = call i8* @returner()
576 %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
577 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
578 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
579 %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
580 %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
581 %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
582 %iszero = icmp eq i64 %call4, 0
583 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
586 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
587 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
588 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
589 br label %forcoll.loopbody.outer
591 forcoll.loopbody.outer:
592 %forcoll.count.ph = phi i64 [ %call4, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
593 %tmp9 = icmp ugt i64 %forcoll.count.ph, 1
594 %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1
595 br label %forcoll.loopbody
598 %forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated ], [ 0, %forcoll.loopbody.outer ]
599 %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
600 %statemutations = load i64, i64* %mutationsptr5, align 8
601 %3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
602 br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
605 call void @llvm.objc.enumerationMutation(i8* %2)
606 br label %forcoll.notmutated
609 %phitmp = add i64 %forcoll.index, 1
610 %exitcond = icmp eq i64 %phitmp, %umax
611 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
614 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
615 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
616 %4 = icmp eq i64 %call7, 0
617 br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
620 call void @llvm.objc.release(i8* %2) nounwind
621 call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
622 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
626 ; TODO: Delete a nested retain+release pair.
627 ; The optimizer currently can't do this, because of a split loop backedge.
628 ; See test10b for the same testcase without a split backedge.
630 ; CHECK-LABEL: define void @test10(
631 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
632 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
633 ; CHECK: call i8* @llvm.objc.retain
635 define void @test10() nounwind {
637 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
638 %items.ptr = alloca [16 x i8*], align 8
639 %call = call i8* @returner()
640 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
641 %call1 = call i8* @returner()
642 %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
644 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
645 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
646 %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
647 %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
648 %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
649 %iszero = icmp eq i64 %call4, 0
650 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
653 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
654 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
655 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
656 br label %forcoll.loopbody.outer
658 forcoll.loopbody.outer:
659 %forcoll.count.ph = phi i64 [ %call4, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
660 %tmp9 = icmp ugt i64 %forcoll.count.ph, 1
661 %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1
662 br label %forcoll.loopbody
665 %forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated.forcoll.loopbody_crit_edge ], [ 1, %forcoll.loopbody.outer ]
666 %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
667 %statemutations = load i64, i64* %mutationsptr5, align 8
668 %3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
669 br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
672 call void @llvm.objc.enumerationMutation(i8* %2)
673 br label %forcoll.notmutated
676 %exitcond = icmp eq i64 %forcoll.index, %umax
677 br i1 %exitcond, label %forcoll.refetch, label %forcoll.notmutated.forcoll.loopbody_crit_edge
679 forcoll.notmutated.forcoll.loopbody_crit_edge:
680 %phitmp = add i64 %forcoll.index, 1
681 br label %forcoll.loopbody
684 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
685 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
686 %4 = icmp eq i64 %call7, 0
687 br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
690 call void @llvm.objc.release(i8* %2) nounwind
691 call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
692 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
696 ; Like test10, but without a split backedge. TODO: optimize this.
698 ; CHECK-LABEL: define void @test10b(
699 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
700 ; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue
701 ; CHECK: @llvm.objc.retain
703 define void @test10b() nounwind {
705 %state.ptr = alloca %struct.__objcFastEnumerationState, align 8
706 %items.ptr = alloca [16 x i8*], align 8
707 %call = call i8* @returner()
708 %0 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call) nounwind
709 %call1 = call i8* @returner()
710 %1 = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %call1) nounwind
712 %tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
713 call void @llvm.memset.p0i8.i64(i8* align 8 %tmp, i8 0, i64 64, i1 false)
714 %2 = call i8* @llvm.objc.retain(i8* %0) nounwind
715 %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
716 %call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
717 %iszero = icmp eq i64 %call4, 0
718 br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
721 %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
722 %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
723 %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
724 br label %forcoll.loopbody.outer
726 forcoll.loopbody.outer:
727 %forcoll.count.ph = phi i64 [ %call4, %forcoll.loopinit ], [ %call7, %forcoll.refetch ]
728 %tmp9 = icmp ugt i64 %forcoll.count.ph, 1
729 %umax = select i1 %tmp9, i64 %forcoll.count.ph, i64 1
730 br label %forcoll.loopbody
733 %forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated ], [ 0, %forcoll.loopbody.outer ]
734 %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
735 %statemutations = load i64, i64* %mutationsptr5, align 8
736 %3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
737 br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
740 call void @llvm.objc.enumerationMutation(i8* %2)
741 br label %forcoll.notmutated
744 %phitmp = add i64 %forcoll.index, 1
745 %exitcond = icmp eq i64 %phitmp, %umax
746 br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
749 %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
750 %call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @llvm.objc.msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
751 %4 = icmp eq i64 %call7, 0
752 br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
755 call void @llvm.objc.release(i8* %2) nounwind
756 call void @llvm.objc.release(i8* %1) nounwind, !clang.imprecise_release !0
757 call void @llvm.objc.release(i8* %0) nounwind, !clang.imprecise_release !0
761 ; Pointers to strong pointers can obscure provenance relationships. Be conservative
762 ; in the face of escaping pointers. rdar://12150909.
764 %struct.__block_d = type { i64, i64 }
766 @_NSConcreteStackBlock = external global i8*
767 @__block_d_tmp = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
768 @__block_d_tmp5 = external hidden constant { i64, i64, i8*, i8*, i8*, i8* }
770 ; CHECK-LABEL: define void @test11(
771 ; CHECK: tail call i8* @llvm.objc.retain(i8* %call) [[NUW:#[0-9]+]]
772 ; CHECK: tail call i8* @llvm.objc.retain(i8* %call) [[NUW]]
773 ; CHECK: call void @llvm.objc.release(i8* %call) [[NUW]], !clang.imprecise_release !0
775 define void @test11() {
777 %block = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
778 %block9 = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
779 %call = call i8* @def(), !clang.arc.no_objc_arc_exceptions !0
780 %foo = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 5
781 %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 0
782 store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
783 %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 1
784 store i32 1107296256, i32* %block.flags, align 8
785 %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 2
786 store i32 0, i32* %block.reserved, align 4
787 %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 3
788 store i8* bitcast (void (i8*)* @__crasher_block_invoke to i8*), i8** %block.invoke, align 8
789 %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
790 store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp to %struct.__block_d*), %struct.__block_d** %block.d, align 8
791 %foo2 = tail call i8* @llvm.objc.retain(i8* %call) nounwind
792 store i8* %foo2, i8** %foo, align 8
793 %foo4 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block to i8*
794 %foo5 = call i8* @llvm.objc.retainBlock(i8* %foo4) nounwind
795 call void @use(i8* %foo5), !clang.arc.no_objc_arc_exceptions !0
796 call void @llvm.objc.release(i8* %foo5) nounwind
797 %strongdestroy = load i8*, i8** %foo, align 8
798 call void @llvm.objc.release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
799 %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
800 %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
801 store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa11, align 8
802 %block.flags12 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 1
803 store i32 1107296256, i32* %block.flags12, align 8
804 %block.reserved13 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 2
805 store i32 0, i32* %block.reserved13, align 4
806 %block.invoke14 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 3
807 store i8* bitcast (void (i8*)* @__crasher_block_invoke1 to i8*), i8** %block.invoke14, align 8
808 %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
809 store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp5 to %struct.__block_d*), %struct.__block_d** %block.d15, align 8
810 %foo18 = call i8* @llvm.objc.retain(i8* %call) nounwind
811 store i8* %call, i8** %foo10, align 8
812 %foo20 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9 to i8*
813 %foo21 = call i8* @llvm.objc.retainBlock(i8* %foo20) nounwind
814 call void @use(i8* %foo21), !clang.arc.no_objc_arc_exceptions !0
815 call void @llvm.objc.release(i8* %foo21) nounwind
816 %strongdestroy25 = load i8*, i8** %foo10, align 8
817 call void @llvm.objc.release(i8* %strongdestroy25) nounwind, !clang.imprecise_release !0
818 call void @llvm.objc.release(i8* %call) nounwind, !clang.imprecise_release !0
823 ; CHECK: attributes [[NUW]] = { nounwind }
824 ; CHECK: attributes #1 = { argmemonly nounwind }
825 ; CHECK: attributes #2 = { nonlazybind }