1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue < %s | FileCheck %s
3 ; RUN: opt -S -passes=loop-vectorize < %s | FileCheck %s
5 ; This tests should produce the same result as with default options, and when tail folding
6 ; is preferred, because the vectorizer can't fold the tail by masking (due to an
7 ; outside user of %incdec.ptr in %end) and should fallback to a scalar epilogue.
9 ; The first test (@basic_loop) simply relies on the command-line switches.
10 ; The second test (@metadata) specificies its tail-folding preference via metadata.
11 ; Both tests should always generate a scalar epilogue.
13 target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
15 define void @basic_loop(ptr nocapture readonly %ptr, i32 %size, ptr %pos) {
16 ; CHECK-LABEL: @basic_loop(
18 ; CHECK-NEXT: [[PTR0:%.*]] = load ptr, ptr [[POS:%.*]], align 4
19 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[SIZE:%.*]], 4
20 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
22 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[SIZE]], 4
23 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[SIZE]], [[N_MOD_VF]]
24 ; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[SIZE]], [[N_VEC]]
25 ; CHECK-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 [[N_VEC]]
26 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
28 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
29 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
30 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP0]]
31 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i32 1
32 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
33 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
34 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
35 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP3]], align 1
36 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
37 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
38 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
39 ; CHECK: middle.block:
40 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[SIZE]], [[N_VEC]]
41 ; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]]
43 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SIZE]], [[HEADER:%.*]] ]
44 ; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ [[PTR]], [[HEADER]] ]
45 ; CHECK-NEXT: br label [[BODY:%.*]]
47 ; CHECK-NEXT: [[DEC66:%.*]] = phi i32 [ [[DEC:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
48 ; CHECK-NEXT: [[BUFF:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
49 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[BUFF]], i32 1
50 ; CHECK-NEXT: [[DEC]] = add nsw i32 [[DEC66]], -1
51 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1
52 ; CHECK-NEXT: store i8 [[TMP5]], ptr [[BUFF]], align 1
53 ; CHECK-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0
54 ; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop [[LOOP2:![0-9]+]]
56 ; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR]], [[BODY]] ], [ [[IND_END1]], [[MIDDLE_BLOCK]] ]
57 ; CHECK-NEXT: store ptr [[INCDEC_PTR_LCSSA]], ptr [[POS]], align 4
58 ; CHECK-NEXT: ret void
61 %ptr0 = load ptr, ptr %pos, align 4
65 %dec66 = phi i32 [ %dec, %body ], [ %size, %header ]
66 %buff = phi ptr [ %incdec.ptr, %body ], [ %ptr, %header ]
67 %incdec.ptr = getelementptr inbounds i8, ptr %buff, i32 1
68 %dec = add nsw i32 %dec66, -1
69 %0 = load i8, ptr %incdec.ptr, align 1
70 store i8 %0, ptr %buff, align 1
71 %tobool11 = icmp eq i32 %dec, 0
72 br i1 %tobool11, label %end, label %body
75 store ptr %incdec.ptr, ptr %pos, align 4
79 define void @metadata(ptr nocapture readonly %ptr, i32 %size, ptr %pos) {
80 ; CHECK-LABEL: @metadata(
82 ; CHECK-NEXT: [[PTR0:%.*]] = load ptr, ptr [[POS:%.*]], align 4
83 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[SIZE:%.*]], 4
84 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
86 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[SIZE]], 4
87 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[SIZE]], [[N_MOD_VF]]
88 ; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[SIZE]], [[N_VEC]]
89 ; CHECK-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 [[N_VEC]]
90 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
92 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
93 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
94 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP0]]
95 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i32 1
96 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
97 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
98 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
99 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP3]], align 1
100 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
101 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
102 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
103 ; CHECK: middle.block:
104 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[SIZE]], [[N_VEC]]
105 ; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]]
107 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SIZE]], [[HEADER:%.*]] ]
108 ; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ [[PTR]], [[HEADER]] ]
109 ; CHECK-NEXT: br label [[BODY:%.*]]
111 ; CHECK-NEXT: [[DEC66:%.*]] = phi i32 [ [[DEC:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
112 ; CHECK-NEXT: [[BUFF:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ]
113 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[BUFF]], i32 1
114 ; CHECK-NEXT: [[DEC]] = add nsw i32 [[DEC66]], -1
115 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1
116 ; CHECK-NEXT: store i8 [[TMP5]], ptr [[BUFF]], align 1
117 ; CHECK-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0
118 ; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop [[LOOP5:![0-9]+]]
120 ; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR]], [[BODY]] ], [ [[IND_END1]], [[MIDDLE_BLOCK]] ]
121 ; CHECK-NEXT: store ptr [[INCDEC_PTR_LCSSA]], ptr [[POS]], align 4
122 ; CHECK-NEXT: ret void
125 %ptr0 = load ptr, ptr %pos, align 4
129 %dec66 = phi i32 [ %dec, %body ], [ %size, %header ]
130 %buff = phi ptr [ %incdec.ptr, %body ], [ %ptr, %header ]
131 %incdec.ptr = getelementptr inbounds i8, ptr %buff, i32 1
132 %dec = add nsw i32 %dec66, -1
133 %0 = load i8, ptr %incdec.ptr, align 1
134 store i8 %0, ptr %buff, align 1
135 %tobool11 = icmp eq i32 %dec, 0
136 br i1 %tobool11, label %end, label %body, !llvm.loop !1
139 store ptr %incdec.ptr, ptr %pos, align 4
143 !1 = distinct !{!1, !2, !3}
144 !2 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
145 !3 = !{!"llvm.loop.vectorize.enable", i1 true}