1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2 ; RUN: opt -p loop-vectorize -S %s | FileCheck %s
4 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
5 target triple = "x86_64-unknown-linux-gnu"
7 ; Test case for https://github.com/llvm/llvm-project/issues/106248.
8 define i64 @test_foldable_live_in_via_scev() {
9 ; CHECK-LABEL: define i64 @test_foldable_live_in_via_scev() {
10 ; CHECK-NEXT: [[ENTRY:.*]]:
11 ; CHECK-NEXT: [[CONV:%.*]] = zext i16 -6 to i64
12 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[CONV]], -65528
13 ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
14 ; CHECK: [[VECTOR_PH]]:
15 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
16 ; CHECK: [[VECTOR_BODY]]:
17 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
18 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ splat (i64 1), %[[VECTOR_PH]] ], [ [[TMP0:%.*]], %[[VECTOR_BODY]] ]
19 ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ splat (i64 1), %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ]
20 ; CHECK-NEXT: [[TMP0]] = mul <2 x i64> [[VEC_PHI]], splat (i64 2)
21 ; CHECK-NEXT: [[TMP1]] = mul <2 x i64> [[VEC_PHI1]], splat (i64 2)
22 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
23 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 96
24 ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
25 ; CHECK: [[MIDDLE_BLOCK]]:
26 ; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <2 x i64> [[TMP1]], [[TMP0]]
27 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[BIN_RDX]])
28 ; CHECK-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]]
29 ; CHECK: [[SCALAR_PH]]:
30 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 97, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
31 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
32 ; CHECK-NEXT: br label %[[LOOP:.*]]
34 ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
35 ; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MUL:%.*]], %[[LOOP]] ]
36 ; CHECK-NEXT: [[MUL]] = mul nsw i64 [[RED]], [[ADD]]
37 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1
38 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 100
39 ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
41 ; CHECK-NEXT: [[RET:%.*]] = phi i64 [ [[MUL]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ]
42 ; CHECK-NEXT: ret i64 [[RET]]
45 %conv = zext i16 -6 to i64
46 %add = add nsw i64 %conv, -65528
50 %iv = phi i32 [ 1, %entry ], [ %iv.next, %loop ]
51 %red = phi i64 [ 1, %entry ], [ %mul, %loop ]
52 %mul = mul nsw i64 %red, %add
53 %iv.next = add nsw i32 %iv, 1
54 %ec = icmp eq i32 %iv.next, 100
55 br i1 %ec, label %exit, label %loop
58 %ret = phi i64 [ %mul, %loop ]
62 ; Test case for https://github.com/llvm/llvm-project/issues/109528.
63 define i64 @second_lshr_operand_zero_via_scev() {
64 ; CHECK-LABEL: define i64 @second_lshr_operand_zero_via_scev() {
65 ; CHECK-NEXT: [[ENTRY:.*]]:
66 ; CHECK-NEXT: [[EXT_0:%.*]] = sext i8 0 to i32
67 ; CHECK-NEXT: br i1 true, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
68 ; CHECK: [[VECTOR_PH]]:
69 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
70 ; CHECK: [[VECTOR_BODY]]:
71 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
72 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
73 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP10:%.*]], %[[VECTOR_BODY]] ]
74 ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
75 ; CHECK-NEXT: [[VEC_IND2:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT3:%.*]], %[[VECTOR_BODY]] ]
76 ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
77 ; CHECK-NEXT: [[STEP_ADD4:%.*]] = add <2 x i32> [[VEC_IND2]], splat (i32 2)
78 ; CHECK-NEXT: [[TMP0:%.*]] = icmp eq <2 x i64> [[VEC_IND]], zeroinitializer
79 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> [[STEP_ADD]], zeroinitializer
80 ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i64> [[VEC_IND]], zeroinitializer
81 ; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[STEP_ADD]], zeroinitializer
82 ; CHECK-NEXT: [[TMP4:%.*]] = lshr <2 x i32> [[VEC_IND2]], zeroinitializer
83 ; CHECK-NEXT: [[TMP5:%.*]] = lshr <2 x i32> [[STEP_ADD4]], zeroinitializer
84 ; CHECK-NEXT: [[TMP6:%.*]] = zext <2 x i32> [[TMP4]] to <2 x i64>
85 ; CHECK-NEXT: [[TMP7:%.*]] = zext <2 x i32> [[TMP5]] to <2 x i64>
86 ; CHECK-NEXT: [[TMP8:%.*]] = select <2 x i1> [[TMP0]], <2 x i64> [[TMP2]], <2 x i64> [[TMP6]]
87 ; CHECK-NEXT: [[TMP9:%.*]] = select <2 x i1> [[TMP1]], <2 x i64> [[TMP3]], <2 x i64> [[TMP7]]
88 ; CHECK-NEXT: [[TMP10]] = or <2 x i64> [[TMP8]], [[VEC_PHI]]
89 ; CHECK-NEXT: [[TMP11]] = or <2 x i64> [[TMP9]], [[VEC_PHI1]]
90 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
91 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2)
92 ; CHECK-NEXT: [[VEC_IND_NEXT3]] = add <2 x i32> [[STEP_ADD4]], splat (i32 2)
93 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
94 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
95 ; CHECK: [[MIDDLE_BLOCK]]:
96 ; CHECK-NEXT: [[BIN_RDX:%.*]] = or <2 x i64> [[TMP11]], [[TMP10]]
97 ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[BIN_RDX]])
98 ; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
99 ; CHECK: [[SCALAR_PH]]:
100 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
101 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP13]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
102 ; CHECK-NEXT: br label %[[LOOPS:.*]]
104 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOPS]] ]
105 ; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOPS]] ]
106 ; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[IV]], 0
107 ; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 0
108 ; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[IV]] to i32
109 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[TMP14]], [[EXT_0]]
110 ; CHECK-NEXT: [[CONV_1:%.*]] = zext i32 [[SHR]] to i64
111 ; CHECK-NEXT: [[RED_NEXT_V:%.*]] = select i1 [[C]], i64 [[AND]], i64 [[CONV_1]]
112 ; CHECK-NEXT: [[RED_NEXT]] = or i64 [[RED_NEXT_V]], [[RED]]
113 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
114 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 0
115 ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOPS]], !llvm.loop [[LOOP5:![0-9]+]]
117 ; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOPS]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ]
118 ; CHECK-NEXT: ret i64 [[RES]]
121 %ext.0 = sext i8 0 to i32
125 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loops ]
126 %red = phi i64 [ 0, %entry ], [ %red.next, %loops ]
127 %c = icmp eq i64 %iv, 0
128 %and = and i64 %iv, 0
129 %0 = trunc i64 %iv to i32
130 %shr = lshr i32 %0, %ext.0
131 %conv.1 = zext i32 %shr to i64
132 %red.next.v = select i1 %c, i64 %and, i64 %conv.1
133 %red.next = or i64 %red.next.v, %red
134 %iv.next = add i64 %iv, 1
135 %ec = icmp eq i64 %iv.next, 0
136 br i1 %ec, label %exit, label %loops
139 %res = phi i64 [ %red.next, %loops ]
144 ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
145 ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
146 ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
147 ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
148 ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
149 ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}