1 ; Test VLA for reverse with fixed size vector
2 ; This is the loop in c++ being vectorize in this file with
4 ; #pragma clang loop vectorize_width(8, fixed)
5 ; for (int i = N-1; i >= 0; --i)
8 ; RUN: opt -passes=loop-vectorize,dce -mtriple aarch64-linux-gnu -S \
9 ; RUN: -prefer-predicate-over-epilogue=scalar-epilogue < %s | FileCheck %s
11 define void @vector_reverse_f64(i64 %N, ptr %a, ptr %b) #0 {
12 ; CHECK-LABEL: vector_reverse_f64
13 ; CHECK-LABEL: vector.body
14 ; CHECK: %[[GEP:.*]] = getelementptr inbounds double, ptr %{{.*}}, i32 0
15 ; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds double, ptr %[[GEP]], i32 -7
16 ; CHECK-NEXT: %[[WIDE:.*]] = load <8 x double>, ptr %[[GEP1]], align 8
17 ; CHECK-NEXT: %[[REVERSE:.*]] = shufflevector <8 x double> %[[WIDE]], <8 x double> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
18 ; CHECK-NEXT: %[[FADD:.*]] = fadd <8 x double> %[[REVERSE]]
19 ; CHECK-NEXT: %[[GEP2:.*]] = getelementptr inbounds double, ptr {{.*}}, i64 {{.*}}
20 ; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds double, ptr %[[GEP2]], i32 0
21 ; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds double, ptr %[[GEP3]], i32 -7
22 ; CHECK-NEXT: %[[REVERSE6:.*]] = shufflevector <8 x double> %[[FADD]], <8 x double> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
23 ; CHECK-NEXT: store <8 x double> %[[REVERSE6]], ptr %[[GEP4]], align 8
26 %cmp7 = icmp sgt i64 %N, 0
27 br i1 %cmp7, label %for.body, label %for.cond.cleanup
29 for.cond.cleanup: ; preds = %for.cond.cleanup, %entry
32 for.body: ; preds = %entry, %for.body
33 %i.08.in = phi i64 [ %i.08, %for.body ], [ %N, %entry ]
34 %i.08 = add nsw i64 %i.08.in, -1
35 %arrayidx = getelementptr inbounds double, ptr %b, i64 %i.08
36 %0 = load double, ptr %arrayidx, align 8
37 %add = fadd double %0, 1.000000e+00
38 %arrayidx1 = getelementptr inbounds double, ptr %a, i64 %i.08
39 store double %add, ptr %arrayidx1, align 8
40 %cmp = icmp sgt i64 %i.08.in, 1
41 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
44 define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 {
45 ; CHECK-LABEL: vector_reverse_i64
46 ; CHECK-LABEL: vector.body
47 ; CHECK: %[[GEP:.*]] = getelementptr inbounds i64, ptr %{{.*}}, i32 0
48 ; CHECK-NEXT: %[[GEP1:.*]] = getelementptr inbounds i64, ptr %[[GEP]], i32 -7
49 ; CHECK-NEXT: %[[WIDE:.*]] = load <8 x i64>, ptr %[[GEP1]], align 8
50 ; CHECK-NEXT: %[[REVERSE:.*]] = shufflevector <8 x i64> %[[WIDE]], <8 x i64> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
51 ; CHECK-NEXT: %[[FADD:.*]] = add <8 x i64> %[[REVERSE]]
52 ; CHECK-NEXT: %[[GEP2:.*]] = getelementptr inbounds i64, ptr {{.*}}, i64 {{.*}}
53 ; CHECK-NEXT: %[[GEP3:.*]] = getelementptr inbounds i64, ptr %[[GEP2]], i32 0
54 ; CHECK-NEXT: %[[GEP4:.*]] = getelementptr inbounds i64, ptr %[[GEP3]], i32 -7
55 ; CHECK-NEXT: %[[REVERSE6:.*]] = shufflevector <8 x i64> %[[FADD]], <8 x i64> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
56 ; CHECK-NEXT: store <8 x i64> %[[REVERSE6]], ptr %[[GEP4]], align 8
59 %cmp8 = icmp sgt i64 %N, 0
60 br i1 %cmp8, label %for.body, label %for.cond.cleanup
62 for.cond.cleanup: ; preds = %for.cond.cleanup, %entry
65 for.body: ; preds = %entry, %for.body
66 %i.09.in = phi i64 [ %i.09, %for.body ], [ %N, %entry ]
67 %i.09 = add nsw i64 %i.09.in, -1
68 %arrayidx = getelementptr inbounds i64, ptr %b, i64 %i.09
69 %0 = load i64, ptr %arrayidx, align 8
71 %arrayidx2 = getelementptr inbounds i64, ptr %a, i64 %i.09
72 store i64 %add, ptr %arrayidx2, align 8
73 %cmp = icmp sgt i64 %i.09.in, 1
74 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
77 attributes #0 = { "target-cpu"="generic" "target-features"="+neon,+sve" }
79 !0 = distinct !{!0, !1, !2, !3, !4}
80 !1 = !{!"llvm.loop.mustprogress"}
81 !2 = !{!"llvm.loop.vectorize.width", i32 8}
82 !3 = !{!"llvm.loop.vectorize.scalable.enable", i1 false}
83 !4 = !{!"llvm.loop.vectorize.enable", i1 true}