1 ; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -scalable-vectorization=on -o - | FileCheck %s
3 define void @mloadstore_f32(float* noalias nocapture %a, float* noalias nocapture readonly %b, i64 %n) {
4 ; CHECK-LABEL: @mloadstore_f32
6 ; CHECK: %[[LOAD1:.*]] = load <vscale x 4 x float>, <vscale x 4 x float>*
7 ; CHECK-NEXT: %[[MASK:.*]] = fcmp ogt <vscale x 4 x float> %[[LOAD1]],
8 ; CHECK-NEXT: %[[GEPA:.*]] = getelementptr inbounds float, float* %a,
9 ; CHECK-NEXT: %[[MLOAD_PTRS:.*]] = bitcast float* %[[GEPA]] to <vscale x 4 x float>*
10 ; CHECK-NEXT: %[[LOAD2:.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* %[[MLOAD_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]]
11 ; CHECK-NEXT: %[[FADD:.*]] = fadd <vscale x 4 x float> %[[LOAD1]], %[[LOAD2]]
12 ; CHECK-NEXT: %[[MSTORE_PTRS:.*]] = bitcast float* %[[GEPA]] to <vscale x 4 x float>*
13 ; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0nxv4f32(<vscale x 4 x float> %[[FADD]], <vscale x 4 x float>* %[[MSTORE_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]])
17 for.body: ; preds = %entry, %for.inc
18 %i.011 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
19 %arrayidx = getelementptr inbounds float, float* %b, i64 %i.011
20 %0 = load float, float* %arrayidx, align 4
21 %cmp1 = fcmp ogt float %0, 0.000000e+00
22 br i1 %cmp1, label %if.then, label %for.inc
24 if.then: ; preds = %for.body
25 %arrayidx3 = getelementptr inbounds float, float* %a, i64 %i.011
26 %1 = load float, float* %arrayidx3, align 4
27 %add = fadd float %0, %1
28 store float %add, float* %arrayidx3, align 4
31 for.inc: ; preds = %for.body, %if.then
32 %inc = add nuw nsw i64 %i.011, 1
33 %exitcond.not = icmp eq i64 %inc, %n
34 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
36 exit: ; preds = %for.inc
40 define void @mloadstore_i32(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i64 %n) {
41 ; CHECK-LABEL: @mloadstore_i32
43 ; CHECK: %[[LOAD1:.*]] = load <vscale x 4 x i32>, <vscale x 4 x i32>*
44 ; CHECK-NEXT: %[[MASK:.*]] = icmp ne <vscale x 4 x i32> %[[LOAD1]],
45 ; CHECK-NEXT: %[[GEPA:.*]] = getelementptr inbounds i32, i32* %a,
46 ; CHECK-NEXT: %[[MLOAD_PTRS:.*]] = bitcast i32* %[[GEPA]] to <vscale x 4 x i32>*
47 ; CHECK-NEXT: %[[LOAD2:.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %[[MLOAD_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]]
48 ; CHECK-NEXT: %[[FADD:.*]] = add <vscale x 4 x i32> %[[LOAD1]], %[[LOAD2]]
49 ; CHECK-NEXT: %[[MSTORE_PTRS:.*]] = bitcast i32* %[[GEPA]] to <vscale x 4 x i32>*
50 ; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %[[FADD]], <vscale x 4 x i32>* %[[MSTORE_PTRS]], i32 4, <vscale x 4 x i1> %[[MASK]])
54 for.body: ; preds = %entry, %for.inc
55 %i.011 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
56 %arrayidx = getelementptr inbounds i32, i32* %b, i64 %i.011
57 %0 = load i32, i32* %arrayidx, align 4
58 %cmp1 = icmp ne i32 %0, 0
59 br i1 %cmp1, label %if.then, label %for.inc
61 if.then: ; preds = %for.body
62 %arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %i.011
63 %1 = load i32, i32* %arrayidx3, align 4
65 store i32 %add, i32* %arrayidx3, align 4
68 for.inc: ; preds = %for.body, %if.then
69 %inc = add nuw nsw i64 %i.011, 1
70 %exitcond.not = icmp eq i64 %inc, %n
71 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
73 exit: ; preds = %for.inc
77 !0 = distinct !{!0, !1, !2, !3, !4, !5}
78 !1 = !{!"llvm.loop.mustprogress"}
79 !2 = !{!"llvm.loop.vectorize.width", i32 4}
80 !3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
81 !4 = !{!"llvm.loop.interleave.count", i32 1}
82 !5 = !{!"llvm.loop.vectorize.enable", i1 true}