1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -loop-idiom -S %s | FileCheck %s
4 define void @reuse_cast_1(float** %ptr, i1 %c) {
5 ; CHECK-LABEL: @reuse_cast_1(
7 ; CHECK-NEXT: br label [[LOOP_0:%.*]]
9 ; CHECK-NEXT: [[TMP:%.*]] = load float*, float** [[PTR:%.*]], align 8
10 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[TMP]] to i8*
11 ; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP_2_PREHEADER:%.*]], label [[LOOP_1_PREHEADER:%.*]]
12 ; CHECK: loop.1.preheader:
13 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP2]], i8 0, i64 400, i1 false)
14 ; CHECK-NEXT: br label [[LOOP_1:%.*]]
15 ; CHECK: loop.2.preheader:
16 ; CHECK-NEXT: br label [[LOOP_2:%.*]]
18 ; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_1_NEXT:%.*]], [[LOOP_1]] ], [ 0, [[LOOP_1_PREHEADER]] ]
19 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[TMP]], i64 [[IV_1]]
20 ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
21 ; CHECK-NEXT: [[C_1:%.*]] = icmp ne i64 [[IV_1_NEXT]], 100
22 ; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_1]], label [[LOOP_0_LATCH_LOOPEXIT1:%.*]]
24 ; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ], [ 0, [[LOOP_2_PREHEADER]] ]
25 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, float* [[TMP]], i64 [[IV_2]]
26 ; CHECK-NEXT: store float 0.000000e+00, float* [[TMP10]], align 4
27 ; CHECK-NEXT: [[TMP11:%.*]] = load float*, float** [[PTR]], align 8
28 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IV_2]]
29 ; CHECK-NEXT: [[TMP13:%.*]] = load float, float* [[TMP12]], align 4
30 ; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 1
31 ; CHECK-NEXT: [[C_2:%.*]] = icmp ne i64 [[IV_2_NEXT]], 100
32 ; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_2]], label [[LOOP_0_LATCH_LOOPEXIT:%.*]]
33 ; CHECK: loop.0.latch.loopexit:
34 ; CHECK-NEXT: br label [[LOOP_0_LATCH:%.*]]
35 ; CHECK: loop.0.latch.loopexit1:
36 ; CHECK-NEXT: br label [[LOOP_0_LATCH]]
37 ; CHECK: loop.0.latch:
38 ; CHECK-NEXT: [[C_0:%.*]] = call i1 @cond()
39 ; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_0]], label [[END:%.*]]
41 ; CHECK-NEXT: ret void
46 loop.0: ; preds = %loop.0.latch, %entry
47 %tmp = load float*, float** %ptr, align 8
48 br i1 %c, label %loop.2, label %loop.1
50 loop.1: ; preds = %loop.1, %loop.0
51 %iv.1 = phi i64 [ %iv.1.next, %loop.1 ], [ 0, %loop.0 ]
52 %tmp4 = getelementptr inbounds float, float* %tmp, i64 %iv.1
53 store float 0.000000e+00, float* %tmp4, align 4
54 %iv.1.next = add nuw nsw i64 %iv.1, 1
55 %c.1 = icmp ne i64 %iv.1.next, 100
56 br i1 %c.1, label %loop.1, label %loop.0.latch
58 loop.2: ; preds = %loop.2, %loop.0
59 %iv.2 = phi i64 [ %iv.2.next, %loop.2 ], [ 0, %loop.0 ]
60 %tmp10 = getelementptr inbounds float, float* %tmp, i64 %iv.2
61 store float 0.000000e+00, float* %tmp10, align 4
62 %tmp11 = load float*, float** %ptr, align 8
63 %tmp12 = getelementptr inbounds float, float* %tmp11, i64 %iv.2
64 %tmp13 = load float, float* %tmp12, align 4
65 %iv.2.next = add nuw nsw i64 %iv.2, 1
66 %c.2 = icmp ne i64 %iv.2.next, 100
67 br i1 %c.2, label %loop.2, label %loop.0.latch
69 loop.0.latch: ; preds = %loop.2, %loop.1
70 %c.0 = call i1 @cond()
71 br i1 %c.0, label %loop.0, label %end
73 end: ; preds = %loop.0.latch
79 declare void @use.i8(i8*)
81 declare void @use.i1(i1)
83 define void @reuse_cast_2(i32 %x, i32* %ptr.1.start) {
84 ; CHECK-LABEL: @reuse_cast_2(
86 ; CHECK-NEXT: [[PTR_1_START2:%.*]] = bitcast i32* [[PTR_1_START:%.*]] to i8*
87 ; CHECK-NEXT: [[STACK:%.*]] = alloca [2 x i32], align 4
88 ; CHECK-NEXT: [[STACK1:%.*]] = bitcast [2 x i32]* [[STACK]] to i8*
89 ; CHECK-NEXT: [[C_0:%.*]] = icmp sgt i32 [[X:%.*]], 0
90 ; CHECK-NEXT: [[CAST_TO_REUSE:%.*]] = bitcast [2 x i32]* [[STACK]] to i8*
91 ; CHECK-NEXT: [[PTR_2_START:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[STACK]], i64 0, i64 0
92 ; CHECK-NEXT: call void @use.i8(i8* [[CAST_TO_REUSE]])
93 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[STACK1]], i8* align 4 [[PTR_1_START2]], i64 8, i1 false)
94 ; CHECK-NEXT: br label [[LOOP:%.*]]
96 ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
97 ; CHECK-NEXT: [[PTR_1:%.*]] = phi i32* [ [[PTR_1_START]], [[ENTRY]] ], [ [[PTR_1_NEXT:%.*]], [[LOOP]] ]
98 ; CHECK-NEXT: [[PTR_2:%.*]] = phi i32* [ [[PTR_2_START]], [[ENTRY]] ], [ [[PTR_2_NEXT:%.*]], [[LOOP]] ]
99 ; CHECK-NEXT: [[PTR_1_NEXT]] = getelementptr inbounds i32, i32* [[PTR_1]], i64 1
100 ; CHECK-NEXT: [[LV:%.*]] = load i32, i32* [[PTR_1]], align 4
101 ; CHECK-NEXT: [[PTR_2_NEXT]] = getelementptr inbounds i32, i32* [[PTR_2]], i64 1
102 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
103 ; CHECK-NEXT: [[C_1:%.*]] = icmp eq i32 [[IV]], 0
104 ; CHECK-NEXT: br i1 [[C_1]], label [[LOOP]], label [[EXIT:%.*]]
106 ; CHECK-NEXT: call void @use.i1(i1 [[C_0]])
107 ; CHECK-NEXT: ret void
110 %stack = alloca [2 x i32], align 4
111 %c.0 = icmp sgt i32 %x, 0
112 %cast.to.reuse = bitcast [2 x i32]* %stack to i8*
113 %ptr.2.start = getelementptr inbounds [2 x i32], [2 x i32]* %stack, i64 0, i64 0
114 call void @use.i8(i8* %cast.to.reuse)
117 loop: ; preds = %loop, %entry
118 %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
119 %ptr.1 = phi i32* [ %ptr.1.start, %entry ], [ %ptr.1.next, %loop ]
120 %ptr.2 = phi i32* [ %ptr.2.start, %entry ], [ %ptr.2.next, %loop ]
121 %ptr.1.next = getelementptr inbounds i32, i32* %ptr.1, i64 1
122 %lv = load i32, i32* %ptr.1, align 4
123 %ptr.2.next = getelementptr inbounds i32, i32* %ptr.2, i64 1
124 store i32 %lv, i32* %ptr.2, align 4
125 %iv.next = add nuw nsw i32 %iv, 1
126 %c.1 = icmp eq i32 %iv, 0
127 br i1 %c.1, label %loop, label %exit
129 exit: ; preds = %loop
130 call void @use.i1(i1 %c.0)