1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes='loop-vectorize,instcombine' -sve-tail-folding-insn-threshold=0 -sve-tail-folding=all -S < %s | FileCheck %s
4 target triple = "aarch64"
6 ; Test that this uses the 'DataAndControlFlow' style of tail folding
7 ; where it performs the loop-indvar increment before the active.lane.mask
8 ; and uses %N directly for the tripcount.
9 define void @cannot_overflow_i32_induction_var(ptr noalias %dst, ptr readonly %src, i32 %N) #0 {
10 ; CHECK-LABEL: @cannot_overflow_i32_induction_var(
12 ; CHECK-NEXT: [[CMP6_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
13 ; CHECK-NEXT: br i1 [[CMP6_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
14 ; CHECK: for.body.preheader:
15 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
16 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
18 ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
19 ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
20 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]])
21 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
23 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
24 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
25 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]]
26 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP0]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
27 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 42)
28 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]]
29 ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP1]], ptr [[TMP2]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
30 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
31 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]])
32 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
33 ; CHECK-NEXT: br i1 [[TMP5]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
34 ; CHECK: middle.block:
35 ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
37 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
39 ; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
40 ; CHECK: for.cond.cleanup.loopexit:
41 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
42 ; CHECK: for.cond.cleanup:
43 ; CHECK-NEXT: ret void
46 %cmp6.not = icmp eq i32 %N, 0
47 br i1 %cmp6.not, label %for.cond.cleanup, label %for.body.preheader
50 %wide.trip.count = zext i32 %N to i64
54 %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
55 %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
56 %0 = load i32, ptr %arrayidx, align 4
57 %add = add nsw i32 %0, 42
58 %arrayidx2 = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv
59 store i32 %add, ptr %arrayidx2, align 4
60 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
61 %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
62 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
68 ; Test that this uses the 'DataAndControlFlowWithoutRuntimeCheck' style of
69 ; tail folding where it uses an updated trip count and do the loop-indvar
70 ; increment after the active.lane.mask.
71 define void @can_overflow_i64_induction_var(ptr noalias %dst, ptr readonly %src, i64 %N) #0 {
72 ; CHECK-LABEL: @can_overflow_i64_induction_var(
74 ; CHECK-NEXT: [[CMP6_NOT:%.*]] = icmp eq i64 [[N:%.*]], 0
75 ; CHECK-NEXT: br i1 [[CMP6_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
76 ; CHECK: for.body.preheader:
77 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
79 ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
80 ; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 2
81 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
82 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
83 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP1]])
84 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[N]])
85 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
87 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
88 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
89 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 [[INDEX]]
90 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
91 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 42)
92 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[INDEX]]
93 ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP4]], ptr [[TMP5]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
94 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]]
95 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]])
96 ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
97 ; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
98 ; CHECK: middle.block:
99 ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
101 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
103 ; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
104 ; CHECK: for.cond.cleanup.loopexit:
105 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
106 ; CHECK: for.cond.cleanup:
107 ; CHECK-NEXT: ret void
110 %cmp6.not = icmp eq i64 %N, 0
111 br i1 %cmp6.not, label %for.cond.cleanup, label %for.body.preheader
117 %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
118 %arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
119 %0 = load i32, ptr %arrayidx, align 4
120 %add = add nsw i32 %0, 42
121 %arrayidx2 = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv
122 store i32 %add, ptr %arrayidx2, align 4
123 %indvars.iv.next = add nuw i64 %indvars.iv, 1
124 %exitcond.not = icmp eq i64 %indvars.iv.next, %N
125 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
131 attributes #0 = { vscale_range(1,16) "target-features"="+sve" }