1 ; RUN: opt -passes=loop-vectorize -S < %s | FileCheck %s
3 target triple = "aarch64-unknown-linux-gnu"
5 define void @trip7_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
6 ; CHECK-LABEL: @trip7_i64(
7 ; CHECK: = call i64 @llvm.vscale.i64()
8 ; CHECK-NEXT: = mul i64
9 ; CHECK: = call i64 @llvm.vscale.i64()
10 ; CHECK-NEXT: = mul i64
11 ; CHECK: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64()
12 ; CHECK-NEXT: [[VF:%.*]] = mul i64 [[VSCALE]], 2
14 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
15 ; CHECK: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ {{%.*}}, %vector.ph ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %vector.body ]
16 ; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
17 ; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
18 ; CHECK: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> {{%.*}}, ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
19 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[VF]]
20 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 7)
21 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NOT:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
22 ; CHECK-NEXT: [[COND:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NOT]], i32 0
23 ; CHECK-NEXT: br i1 [[COND]], label %middle.block, label %vector.body
28 for.body: ; preds = %entry, %for.body
29 %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
30 %arrayidx = getelementptr inbounds i64, ptr %src, i64 %i.06
31 %0 = load i64, ptr %arrayidx, align 8
32 %mul = shl nsw i64 %0, 1
33 %arrayidx1 = getelementptr inbounds i64, ptr %dst, i64 %i.06
34 %1 = load i64, ptr %arrayidx1, align 8
35 %add = add nsw i64 %1, %mul
36 store i64 %add, ptr %arrayidx1, align 8
37 %inc = add nuw nsw i64 %i.06, 1
38 %exitcond.not = icmp eq i64 %inc, 7
39 br i1 %exitcond.not, label %for.end, label %for.body
41 for.end: ; preds = %for.body
45 define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
46 ; CHECK-LABEL: @trip5_i8(
48 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
50 ; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
51 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[I_08]]
52 ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
53 ; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP0]], 1
54 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[I_08]]
55 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
56 ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP1]]
57 ; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
58 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
59 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 5
60 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
62 ; CHECK-NEXT: ret void
67 for.body: ; preds = %entry, %for.body
68 %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
69 %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
70 %0 = load i8, ptr %arrayidx, align 1
72 %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
73 %1 = load i8, ptr %arrayidx1, align 1
74 %add = add i8 %mul, %1
75 store i8 %add, ptr %arrayidx1, align 1
76 %inc = add nuw nsw i64 %i.08, 1
77 %exitcond.not = icmp eq i64 %inc, 5
78 br i1 %exitcond.not, label %for.end, label %for.body
80 for.end: ; preds = %for.body
84 attributes #0 = { vscale_range(1,16) "target-features"="+sve" }