1 ; RUN: opt -passes=loop-vectorize -S < %s | FileCheck %s
3 target triple = "aarch64-unknown-linux-gnu"
5 define void @trip7_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
6 ; CHECK-LABEL: @trip7_i64(
7 ; CHECK: = call i64 @llvm.vscale.i64()
8 ; CHECK-NEXT: = mul i64
9 ; CHECK: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64()
10 ; CHECK-NEXT: [[VF:%.*]] = mul i64 [[VSCALE]], 2
12 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
13 ; CHECK: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ {{%.*}}, %vector.ph ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %vector.body ]
14 ; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
15 ; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
16 ; CHECK: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> {{%.*}}, ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
17 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[VF]]
18 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 7)
19 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NOT:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
20 ; CHECK-NEXT: [[COND:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NOT]], i32 0
21 ; CHECK-NEXT: br i1 [[COND]], label %middle.block, label %vector.body
26 for.body: ; preds = %entry, %for.body
27 %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
28 %arrayidx = getelementptr inbounds i64, ptr %src, i64 %i.06
29 %0 = load i64, ptr %arrayidx, align 8
30 %mul = shl nsw i64 %0, 1
31 %arrayidx1 = getelementptr inbounds i64, ptr %dst, i64 %i.06
32 %1 = load i64, ptr %arrayidx1, align 8
33 %add = add nsw i64 %1, %mul
34 store i64 %add, ptr %arrayidx1, align 8
35 %inc = add nuw nsw i64 %i.06, 1
36 %exitcond.not = icmp eq i64 %inc, 7
37 br i1 %exitcond.not, label %for.end, label %for.body
39 for.end: ; preds = %for.body
43 define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
44 ; CHECK-LABEL: @trip5_i8(
46 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
48 ; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
49 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[I_08]]
50 ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
51 ; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP0]], 1
52 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[I_08]]
53 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
54 ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP1]]
55 ; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
56 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
57 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 5
58 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
60 ; CHECK-NEXT: ret void
65 for.body: ; preds = %entry, %for.body
66 %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
67 %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
68 %0 = load i8, ptr %arrayidx, align 1
70 %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
71 %1 = load i8, ptr %arrayidx1, align 1
72 %add = add i8 %mul, %1
73 store i8 %add, ptr %arrayidx1, align 1
74 %inc = add nuw nsw i64 %i.08, 1
75 %exitcond.not = icmp eq i64 %inc, 5
76 br i1 %exitcond.not, label %for.end, label %for.body
78 for.end: ; preds = %for.body
82 attributes #0 = { vscale_range(1,16) "target-features"="+sve" }