1 ; RUN: opt -passes=loop-vectorize -scalable-vectorization=off -force-vector-width=4 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s
3 ; NOTE: These tests aren't really target-specific, but it's convenient to target AArch64
4 ; so that TTI.isLegalMaskedLoad can return true.
6 target triple = "aarch64-linux-gnu"
8 ; The original loop had an unconditional uniform load. Let's make sure
9 ; we don't artificially create new predicated blocks for the load.
10 define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) #0 {
11 ; CHECK-LABEL: @uniform_load(
13 ; CHECK: [[N_MINUS_VF:%.*]] = sub i64 %n, [[VSCALE_X_VF:.*]]
14 ; CHECK: [[CMP:%.*]] = icmp ugt i64 %n, [[VSCALE_X_VF]]
15 ; CHECK: [[N2:%.*]] = select i1 [[CMP]], i64 [[N_MINUS_VF]], i64 0
16 ; CHECK: [[INIT_ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 %n)
18 ; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ]
19 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[INIT_ACTIVE_LANE_MASK]], %vector.ph ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %vector.body ]
20 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IDX]], 0
21 ; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr %src, align 4
22 ; CHECK-NOT: load i32, ptr %src, align 4
23 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[LOAD_VAL]], i64 0
24 ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> zeroinitializer
25 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr %dst, i64 [[TMP3]]
26 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0
27 ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[TMP5]], ptr [[TMP7]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]])
28 ; CHECK-NEXT: [[IDX_NEXT]] = add i64 [[IDX]], 4
29 ; CHECK-NEXT: [[NEXT_ACTIVE_LANE_MASK]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[IDX]], i64 [[N2]])
30 ; CHECK-NEXT: [[NOT_ACTIVE_LANE_MASK:%.*]] = xor <4 x i1> [[NEXT_ACTIVE_LANE_MASK]], <i1 true, i1 true, i1 true, i1 true>
31 ; CHECK-NEXT: [[FIRST_LANE_SET:%.*]] = extractelement <4 x i1> [[NOT_ACTIVE_LANE_MASK]], i32 0
32 ; CHECK-NEXT: br i1 [[FIRST_LANE_SET]], label %middle.block, label %vector.body
37 for.body: ; preds = %entry, %for.body
38 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
39 %val = load i32, ptr %src, align 4
40 %arrayidx = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv
41 store i32 %val, ptr %arrayidx, align 4
42 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
43 %exitcond.not = icmp eq i64 %indvars.iv.next, %n
44 br i1 %exitcond.not, label %for.end, label %for.body
46 for.end: ; preds = %for.body, %entry
50 ; The original loop had a conditional uniform load. In this case we actually
51 ; do need to perform conditional loads and so we end up using a gather instead.
52 ; However, we at least ensure the mask is the overlap of the loop predicate
53 ; and the original condition.
54 define void @cond_uniform_load(ptr nocapture %dst, ptr nocapture readonly %src, ptr nocapture readonly %cond, i64 %n) #0 {
55 ; CHECK-LABEL: @cond_uniform_load(
57 ; CHECK: [[INIT_ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 %n)
58 ; CHECK: [[TMP1:%.*]] = insertelement <4 x ptr> poison, ptr %src, i64 0
59 ; CHECK-NEXT: [[SRC_SPLAT:%.*]] = shufflevector <4 x ptr> [[TMP1]], <4 x ptr> poison, <4 x i32> zeroinitializer
61 ; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ]
62 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[INIT_ACTIVE_LANE_MASK]], %vector.ph ], [ [[NEXT_ACTIVE_LANE_MASK:%.*]], %vector.body ]
63 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IDX]], 0
64 ; CHECK: [[COND_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr {{%.*}}, i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
65 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[COND_LOAD]], zeroinitializer
66 ; CHECK-NEXT: [[TMP5:%.*]] = xor <4 x i1> [[TMP4]], <i1 true, i1 true, i1 true, i1 true>
67 ; CHECK-NEXT: [[MASK:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer
68 ; CHECK-NEXT: call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[SRC_SPLAT]], i32 4, <4 x i1> [[MASK]], <4 x i32> poison)
72 for.body: ; preds = %entry, %if.end
73 %index = phi i64 [ %index.next, %if.end ], [ 0, %entry ]
74 %arrayidx = getelementptr inbounds i32, ptr %cond, i64 %index
75 %0 = load i32, ptr %arrayidx, align 4
76 %tobool.not = icmp eq i32 %0, 0
77 br i1 %tobool.not, label %if.end, label %if.then
79 if.then: ; preds = %for.body
80 %1 = load i32, ptr %src, align 4
83 if.end: ; preds = %if.then, %for.body
84 %val.0 = phi i32 [ %1, %if.then ], [ 0, %for.body ]
85 %arrayidx1 = getelementptr inbounds i32, ptr %dst, i64 %index
86 store i32 %val.0, ptr %arrayidx1, align 4
87 %index.next = add nuw i64 %index, 1
88 %exitcond.not = icmp eq i64 %index.next, %n
89 br i1 %exitcond.not, label %for.end, label %for.body
91 for.end: ; preds = %for.inc, %entry
95 attributes #0 = { "target-features"="+neon,+sve,+v8.1a" vscale_range(2, 0) }