1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2 ; RUN: opt < %s -passes=newgvn -S | FileCheck %s
4 %struct.A = type { ptr }
5 @_ZTV1A = available_externally unnamed_addr constant [3 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A3fooEv], align 8
6 @_ZTI1A = external constant ptr
8 @unknownPtr = external global i8
11 ; CHECK-LABEL: define i8 @simple() {
13 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
14 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0:![0-9]+]]
15 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
16 ; CHECK-NEXT: ret i8 42
20 store i8 42, ptr %ptr, !invariant.group !0
21 call void @foo(ptr %ptr)
23 %a = load i8, ptr %ptr, !invariant.group !0
24 %b = load i8, ptr %ptr, !invariant.group !0
25 %c = load i8, ptr %ptr, !invariant.group !0
29 define i8 @optimizable1() {
30 ; CHECK-LABEL: define i8 @optimizable1() {
32 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
33 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
34 ; CHECK-NEXT: [[PTR2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[PTR]])
35 ; CHECK-NEXT: call void @foo(ptr [[PTR2]])
36 ; CHECK-NEXT: ret i8 42
40 store i8 42, ptr %ptr, !invariant.group !0
41 %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
42 %a = load i8, ptr %ptr, !invariant.group !0
44 call void @foo(ptr %ptr2); call to use %ptr2
48 define i8 @optimizable2() {
49 ; CHECK-LABEL: define i8 @optimizable2() {
51 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
52 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
53 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
54 ; CHECK-NEXT: store i8 13, ptr [[PTR]], align 1
55 ; CHECK-NEXT: call void @bar(i8 13)
56 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
57 ; CHECK-NEXT: ret i8 42
61 store i8 42, ptr %ptr, !invariant.group !0
62 call void @foo(ptr %ptr)
64 store i8 13, ptr %ptr ; can't use this store with invariant.group
65 %a = load i8, ptr %ptr
66 call void @bar(i8 %a) ; call to use %a
68 call void @foo(ptr %ptr)
69 %b = load i8, ptr %ptr, !invariant.group !0
74 define i1 @proveEqualityForStrip(ptr %a) {
75 ; CHECK-LABEL: define i1 @proveEqualityForStrip(
76 ; CHECK-SAME: ptr [[A:%.*]]) {
77 ; CHECK-NEXT: ret i1 true
79 %b1 = call ptr @llvm.strip.invariant.group.p0(ptr %a)
80 %b2 = call ptr @llvm.strip.invariant.group.p0(ptr %a)
81 %r = icmp eq ptr %b1, %b2
85 define i8 @unoptimizable1() {
86 ; CHECK-LABEL: define i8 @unoptimizable1() {
88 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
89 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
90 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
91 ; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
92 ; CHECK-NEXT: ret i8 [[A]]
97 call void @foo(ptr %ptr)
98 %a = load i8, ptr %ptr, !invariant.group !0
102 ; NewGVN doesn't support assumes.
103 define void @indirectLoads() {
104 ; CHECK-LABEL: define void @indirectLoads() {
106 ; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
107 ; CHECK-NEXT: [[CALL:%.*]] = call ptr @getPointer(ptr null)
108 ; CHECK-NEXT: call void @_ZN1AC1Ev(ptr [[CALL]])
109 ; CHECK-NEXT: [[VTABLE:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
110 ; CHECK-NEXT: [[CMP_VTABLES:%.*]] = icmp eq ptr [[VTABLE]], getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2)
111 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_VTABLES]])
112 ; CHECK-NEXT: store ptr [[CALL]], ptr [[A]], align 8
113 ; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VTABLE]], align 8
114 ; CHECK-NEXT: call void [[TMP0]](ptr [[CALL]])
115 ; CHECK-NEXT: [[VTABLE2:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
116 ; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VTABLE2]], align 8
117 ; CHECK-NEXT: call void [[TMP1]](ptr [[CALL]])
118 ; CHECK-NEXT: [[VTABLE4:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
119 ; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VTABLE4]], align 8
120 ; CHECK-NEXT: call void [[TMP2]](ptr [[CALL]])
121 ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VTABLE]], align 8
122 ; CHECK-NEXT: call void [[TMP3]](ptr [[CALL]])
123 ; CHECK-NEXT: ret void
126 %a = alloca ptr, align 8
128 %call = call ptr @getPointer(ptr null)
129 call void @_ZN1AC1Ev(ptr %call)
131 %vtable = load ptr, ptr %call, align 8, !invariant.group !0
132 %cmp.vtables = icmp eq ptr %vtable, getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2)
133 call void @llvm.assume(i1 %cmp.vtables)
135 store ptr %call, ptr %a, align 8
136 %0 = load ptr, ptr %a, align 8
138 ; FIXME: call void @_ZN1A3fooEv(
139 %vtable1 = load ptr, ptr %0, align 8, !invariant.group !0
140 %1 = load ptr, ptr %vtable1, align 8
142 %2 = load ptr, ptr %a, align 8
144 ; FIXME: call void @_ZN1A3fooEv(
145 %vtable2 = load ptr, ptr %2, align 8, !invariant.group !0
146 %3 = load ptr, ptr %vtable2, align 8
149 %4 = load ptr, ptr %a, align 8
151 %vtable4 = load ptr, ptr %4, align 8, !invariant.group !0
152 %5 = load ptr, ptr %vtable4, align 8
153 ; FIXME: call void @_ZN1A3fooEv(
156 %vtable5 = load ptr, ptr %call, align 8, !invariant.group !0
157 %6 = load ptr, ptr %vtable5, align 8
158 ; FIXME: call void @_ZN1A3fooEv(
164 ; NewGVN won't CSE loads with different pointee types.
165 define void @combiningBitCastWithLoad() {
166 ; CHECK-LABEL: define void @combiningBitCastWithLoad() {
168 ; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8
169 ; CHECK-NEXT: [[CALL:%.*]] = call ptr @getPointer(ptr null)
170 ; CHECK-NEXT: call void @_ZN1AC1Ev(ptr [[CALL]])
171 ; CHECK-NEXT: [[VTABLE:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]]
172 ; CHECK-NEXT: store ptr [[CALL]], ptr [[A]], align 8
173 ; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VTABLE]], align 8
174 ; CHECK-NEXT: call void [[TMP0]](ptr [[CALL]])
175 ; CHECK-NEXT: ret void
178 %a = alloca ptr, align 8
180 %call = call ptr @getPointer(ptr null)
181 call void @_ZN1AC1Ev(ptr %call)
183 %vtable = load ptr, ptr %call, align 8, !invariant.group !0
184 %cmp.vtables = icmp eq ptr %vtable, getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2)
186 store ptr %call, ptr %a, align 8
187 ; FIXME-NOT: !invariant.group
188 %0 = load ptr, ptr %a, align 8
190 %vtable1 = load ptr, ptr %0, align 8, !invariant.group !0
191 %1 = load ptr, ptr %vtable1, align 8
197 define void @loadCombine() {
198 ; CHECK-LABEL: define void @loadCombine() {
200 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
201 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
202 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
203 ; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
204 ; CHECK-NEXT: call void @bar(i8 [[A]])
205 ; CHECK-NEXT: call void @bar(i8 [[A]])
206 ; CHECK-NEXT: ret void
210 store i8 42, ptr %ptr
211 call void @foo(ptr %ptr)
212 %a = load i8, ptr %ptr, !invariant.group !0
213 %b = load i8, ptr %ptr, !invariant.group !0
214 call void @bar(i8 %a)
215 call void @bar(i8 %b)
219 define void @loadCombine1() {
220 ; CHECK-LABEL: define void @loadCombine1() {
222 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
223 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
224 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
225 ; CHECK-NEXT: [[C:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
226 ; CHECK-NEXT: call void @bar(i8 [[C]])
227 ; CHECK-NEXT: call void @bar(i8 [[C]])
228 ; CHECK-NEXT: ret void
232 store i8 42, ptr %ptr
233 call void @foo(ptr %ptr)
234 %c = load i8, ptr %ptr
235 %d = load i8, ptr %ptr, !invariant.group !0
236 call void @bar(i8 %c)
237 call void @bar(i8 %d)
241 define void @loadCombine2() {
242 ; CHECK-LABEL: define void @loadCombine2() {
244 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
245 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
246 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
247 ; CHECK-NEXT: [[E:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
248 ; CHECK-NEXT: call void @bar(i8 [[E]])
249 ; CHECK-NEXT: call void @bar(i8 [[E]])
250 ; CHECK-NEXT: ret void
254 store i8 42, ptr %ptr
255 call void @foo(ptr %ptr)
256 %e = load i8, ptr %ptr, !invariant.group !0
257 %f = load i8, ptr %ptr
258 call void @bar(i8 %e)
259 call void @bar(i8 %f)
263 define void @loadCombine3() {
264 ; CHECK-LABEL: define void @loadCombine3() {
266 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
267 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
268 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
269 ; CHECK-NEXT: [[E:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
270 ; CHECK-NEXT: call void @bar(i8 [[E]])
271 ; CHECK-NEXT: call void @bar(i8 [[E]])
272 ; CHECK-NEXT: ret void
276 store i8 42, ptr %ptr
277 call void @foo(ptr %ptr)
278 %e = load i8, ptr %ptr, !invariant.group !0
279 %f = load i8, ptr %ptr, !invariant.group !0
280 call void @bar(i8 %e)
281 call void @bar(i8 %f)
285 define i8 @unoptimizable2() {
286 ; CHECK-LABEL: define i8 @unoptimizable2() {
288 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
289 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
290 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
291 ; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1
292 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
293 ; CHECK-NEXT: ret i8 [[A]]
297 store i8 42, ptr %ptr
298 call void @foo(ptr %ptr)
299 %a = load i8, ptr %ptr
300 call void @foo(ptr %ptr)
301 %b = load i8, ptr %ptr, !invariant.group !0
306 define i8 @unoptimizable3() {
307 ; CHECK-LABEL: define i8 @unoptimizable3() {
309 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
310 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
311 ; CHECK-NEXT: [[PTR2:%.*]] = call ptr @getPointer(ptr [[PTR]])
312 ; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR2]], align 1, !invariant.group [[META0]]
313 ; CHECK-NEXT: ret i8 [[A]]
317 store i8 42, ptr %ptr, !invariant.group !0
318 %ptr2 = call ptr @getPointer(ptr %ptr)
319 %a = load i8, ptr %ptr2, !invariant.group !0
324 ; NewGVN cares about the launder for some reason.
325 define i8 @optimizable4() {
326 ; CHECK-LABEL: define i8 @optimizable4() {
328 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
329 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1
330 ; CHECK-NEXT: [[PTR2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[PTR]])
331 ; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR2]], align 1
332 ; CHECK-NEXT: ret i8 [[A]]
336 store i8 42, ptr %ptr
337 %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr)
339 %a = load i8, ptr %ptr2
345 define i8 @volatile1() {
346 ; CHECK-LABEL: define i8 @volatile1() {
348 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
349 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
350 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
351 ; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[PTR]], align 1
352 ; CHECK-NEXT: call void @bar(i8 [[B]])
353 ; CHECK-NEXT: [[C:%.*]] = load volatile i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
354 ; CHECK-NEXT: call void @bar(i8 [[C]])
355 ; CHECK-NEXT: ret i8 42
359 store i8 42, ptr %ptr, !invariant.group !0
360 call void @foo(ptr %ptr)
361 %a = load i8, ptr %ptr, !invariant.group !0
362 %b = load volatile i8, ptr %ptr
363 call void @bar(i8 %b)
365 %c = load volatile i8, ptr %ptr, !invariant.group !0
366 ; We might be able to optimize this, but nobody cares
367 call void @bar(i8 %c)
371 define i8 @volatile2() {
372 ; CHECK-LABEL: define i8 @volatile2() {
374 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
375 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
376 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
377 ; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[PTR]], align 1
378 ; CHECK-NEXT: call void @bar(i8 [[B]])
379 ; CHECK-NEXT: [[C:%.*]] = load volatile i8, ptr [[PTR]], align 1, !invariant.group [[META0]]
380 ; CHECK-NEXT: call void @bar(i8 [[C]])
381 ; CHECK-NEXT: ret i8 42
385 store i8 42, ptr %ptr, !invariant.group !0
386 call void @foo(ptr %ptr)
387 %a = load i8, ptr %ptr, !invariant.group !0
388 %b = load volatile i8, ptr %ptr
389 call void @bar(i8 %b)
391 %c = load volatile i8, ptr %ptr, !invariant.group !0
392 ; We might be able to optimize this, but nobody cares
393 call void @bar(i8 %c)
398 ; CHECK-LABEL: define void @fun() {
400 ; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
401 ; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]]
402 ; CHECK-NEXT: call void @foo(ptr [[PTR]])
403 ; CHECK-NEXT: call void @bar(i8 42)
404 ; CHECK-NEXT: ret void
408 store i8 42, ptr %ptr, !invariant.group !0
409 call void @foo(ptr %ptr)
411 %a = load i8, ptr %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change
412 call void @bar(i8 %a)
417 ; FIXME: NewGVN doesn't run instsimplify on a load from a vtable definition?
418 ; This test checks if invariant.group understands gep with zeros
419 define void @testGEP0() {
420 ; CHECK-LABEL: define void @testGEP0() {
421 ; CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_A:%.*]], align 8
422 ; CHECK-NEXT: store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr [[A]], align 8, !invariant.group [[META0]]
423 ; CHECK-NEXT: call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) [[A]])
424 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 4
425 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
426 ; CHECK-NEXT: br i1 [[TMP2]], label [[_Z1GR1A_EXIT:%.*]], label [[TMP3:%.*]]
428 ; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), align 8
429 ; CHECK-NEXT: call void [[TMP4]](ptr nonnull [[A]])
430 ; CHECK-NEXT: br label [[_Z1GR1A_EXIT]]
431 ; CHECK: _Z1gR1A.exit:
432 ; CHECK-NEXT: ret void
434 %a = alloca %struct.A, align 8
435 store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr %a, align 8, !invariant.group !0
436 call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) %a) ; This call may change vptr
437 %1 = load i8, ptr @unknownPtr, align 4
438 %2 = icmp eq i8 %1, 0
439 br i1 %2, label %_Z1gR1A.exit, label %3
441 ; This should be devirtualized by invariant.group
442 %4 = load ptr, ptr %a, align 8, !invariant.group !0
443 %5 = load ptr, ptr %4, align 8
444 ; FIXME: call void @_ZN1A3fooEv(ptr nonnull %a)
445 call void %5(ptr nonnull %a)
446 br label %_Z1gR1A.exit
448 _Z1gR1A.exit: ; preds = %0, %3
452 ; Check if no optimizations are performed with global pointers.
453 ; FIXME: we could do the optimizations if we would check if dependency comes
454 ; from the same function.
455 define void @testGlobal() {
456 ; CHECK-LABEL: define void @testGlobal() {
457 ; CHECK-NEXT: [[A:%.*]] = load i8, ptr @unknownPtr, align 1, !invariant.group [[META0]]
458 ; CHECK-NEXT: call void @foo2(ptr @unknownPtr, i8 [[A]])
459 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 1, !invariant.group [[META0]]
460 ; CHECK-NEXT: call void @bar(i8 [[TMP1]])
461 ; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 true)
462 ; CHECK-NEXT: [[TMP2:%.*]] = load i1, ptr @unknownPtr, align 1, !invariant.group [[META0]]
463 ; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 [[TMP2]])
464 ; CHECK-NEXT: [[TMP3:%.*]] = load i1, ptr @unknownPtr, align 1, !invariant.group [[META0]]
465 ; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 [[TMP3]])
466 ; CHECK-NEXT: ret void
468 %a = load i8, ptr @unknownPtr, !invariant.group !0
469 call void @foo2(ptr @unknownPtr, i8 %a)
470 %1 = load i8, ptr @unknownPtr, !invariant.group !0
471 call void @bar(i8 %1)
473 call void @fooBit(ptr @unknownPtr, i1 1)
474 ; Adding regex because of canonicalization of bitcasts
475 %2 = load i1, ptr @unknownPtr, !invariant.group !0
476 call void @fooBit(ptr @unknownPtr, i1 %2)
477 %3 = load i1, ptr @unknownPtr, !invariant.group !0
478 call void @fooBit(ptr @unknownPtr, i1 %3)
482 ; Might be similar to above where NewGVN doesn't handle loads of different types from the same location.
483 ; Not super important anyway.
484 define void @testTrunc() {
485 ; CHECK-LABEL: define void @testTrunc() {
486 ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
487 ; CHECK-NEXT: call void @foo(ptr [[A]])
488 ; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[A]], align 1, !invariant.group [[META0]]
489 ; CHECK-NEXT: call void @foo2(ptr [[A]], i8 [[B]])
490 ; CHECK-NEXT: call void @bar(i8 [[B]])
491 ; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 true)
492 ; CHECK-NEXT: [[TMP1:%.*]] = load i1, ptr [[A]], align 1, !invariant.group [[META0]]
493 ; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 [[TMP1]])
494 ; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 [[TMP1]])
495 ; CHECK-NEXT: ret void
498 call void @foo(ptr %a)
499 %b = load i8, ptr %a, !invariant.group !0
500 call void @foo2(ptr %a, i8 %b)
502 %1 = load i8, ptr %a, !invariant.group !0
503 call void @bar(i8 %1)
505 call void @fooBit(ptr %a, i1 1)
506 ; FIXME: %1 = trunc i8 %b to i1
507 %2 = load i1, ptr %a, !invariant.group !0
508 ; FIXME-NEXT: call void @fooBit(ptr %a, i1 %1)
509 call void @fooBit(ptr %a, i1 %2)
510 %3 = load i1, ptr %a, !invariant.group !0
511 ; FIXME-NEXT: call void @fooBit(ptr %a, i1 %1)
512 call void @fooBit(ptr %a, i1 %3)
516 ; See comment in @testGEP0 on what NewGVN is lacking.
517 define void @handling_loops() {
518 ; CHECK-LABEL: define void @handling_loops() {
519 ; CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_A:%.*]], align 8
520 ; CHECK-NEXT: store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr [[A]], align 8, !invariant.group [[META0]]
521 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 4
522 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[TMP1]], 0
523 ; CHECK-NEXT: br i1 [[TMP2]], label [[DOTLR_PH_I:%.*]], label [[_Z2G2R1A_EXIT:%.*]]
525 ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[TMP1]], 1
526 ; CHECK-NEXT: br i1 [[TMP3]], label [[DOT_CRIT_EDGE_PREHEADER:%.*]], label [[_Z2G2R1A_EXIT]]
527 ; CHECK: ._crit_edge.preheader:
528 ; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]]
529 ; CHECK: ._crit_edge:
530 ; CHECK-NEXT: [[TMP4:%.*]] = phi i8 [ [[TMP6:%.*]], [[DOT_CRIT_EDGE]] ], [ 1, [[DOT_CRIT_EDGE_PREHEADER]] ]
531 ; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), align 8
532 ; CHECK-NEXT: call void [[TMP5]](ptr nonnull [[A]])
533 ; CHECK-NEXT: [[TMP6]] = add nuw nsw i8 [[TMP4]], 1
534 ; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr @unknownPtr, align 4
535 ; CHECK-NEXT: [[TMP8:%.*]] = icmp slt i8 [[TMP6]], [[TMP7]]
536 ; CHECK-NEXT: br i1 [[TMP8]], label [[DOT_CRIT_EDGE]], label [[_Z2G2R1A_EXIT_LOOPEXIT:%.*]]
537 ; CHECK: _Z2g2R1A.exit.loopexit:
538 ; CHECK-NEXT: br label [[_Z2G2R1A_EXIT]]
539 ; CHECK: _Z2g2R1A.exit:
540 ; CHECK-NEXT: ret void
542 %a = alloca %struct.A, align 8
543 store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr %a, align 8, !invariant.group !0
544 %1 = load i8, ptr @unknownPtr, align 4
545 %2 = icmp sgt i8 %1, 0
546 br i1 %2, label %.lr.ph.i, label %_Z2g2R1A.exit
548 .lr.ph.i: ; preds = %0
549 %3 = load i8, ptr @unknownPtr, align 4
550 %4 = icmp sgt i8 %3, 1
551 br i1 %4, label %._crit_edge.preheader, label %_Z2g2R1A.exit
553 ._crit_edge.preheader: ; preds = %.lr.ph.i
554 br label %._crit_edge
556 ._crit_edge: ; preds = %._crit_edge.preheader, %._crit_edge
557 %5 = phi i8 [ %7, %._crit_edge ], [ 1, %._crit_edge.preheader ]
558 %.pre = load ptr, ptr %a, align 8, !invariant.group !0
559 %6 = load ptr, ptr %.pre, align 8
560 ; FIXME: call void @_ZN1A3fooEv(ptr nonnull %a)
561 call void %6(ptr nonnull %a) #3
562 ; FIXME-NOT: call void %
563 %7 = add nuw nsw i8 %5, 1
564 %8 = load i8, ptr @unknownPtr, align 4
565 %9 = icmp slt i8 %7, %8
566 br i1 %9, label %._crit_edge, label %_Z2g2R1A.exit.loopexit
568 _Z2g2R1A.exit.loopexit: ; preds = %._crit_edge
569 br label %_Z2g2R1A.exit
571 _Z2g2R1A.exit: ; preds = %_Z2g2R1A.exit.loopexit, %.lr.ph.i, %0
576 declare void @foo(ptr)
577 declare void @foo2(ptr, i8)
578 declare void @bar(i8)
579 declare ptr @getPointer(ptr)
580 declare void @_ZN1A3fooEv(ptr)
581 declare void @_ZN1AC1Ev(ptr)
582 declare void @fooBit(ptr, i1)
584 declare ptr @llvm.launder.invariant.group.p0(ptr)
585 declare ptr @llvm.strip.invariant.group.p0(ptr)
587 ; Function Attrs: nounwind
588 declare void @llvm.assume(i1 %cmp.vtables) #0
591 attributes #0 = { nounwind }
594 ; CHECK: [[META0]] = !{}