1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2 ; RUN: opt -p loop-vectorize -S %s | FileCheck %s
4 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
5 target triple = "riscv64-unknown-linux-gnu"
7 ; Make sure we do not pick <vscale x 1 x i64> as VF for a loop with a
8 ; first-order recurrence.
9 define i64 @pr97452_scalable_vf1_for(ptr %src) #0 {
10 ; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for(
11 ; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
12 ; CHECK-NEXT: [[ENTRY:.*]]:
13 ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
14 ; CHECK: [[VECTOR_PH]]:
15 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
16 ; CHECK: [[VECTOR_BODY]]:
17 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
18 ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, %[[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], %[[VECTOR_BODY]] ]
19 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
20 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4
21 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP0]]
22 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[TMP1]]
23 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 0
24 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
25 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
26 ; CHECK-NEXT: [[WIDE_LOAD1]] = load <4 x i64>, ptr [[TMP5]], align 8
27 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
28 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
29 ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
30 ; CHECK: [[MIDDLE_BLOCK]]:
31 ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 3
32 ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 2
33 ; CHECK-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]]
34 ; CHECK: [[SCALAR_PH]]:
35 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
36 ; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
37 ; CHECK-NEXT: br label %[[LOOP:.*]]
39 ; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ]
40 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
41 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
42 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]]
43 ; CHECK-NEXT: [[L]] = load i64, ptr [[GEP]], align 8
44 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22
45 ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
47 ; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[SCALAR_RECUR]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ]
48 ; CHECK-NEXT: ret i64 [[RES]]
54 %for = phi i64 [ 0, %entry ], [ %l, %loop ]
55 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
56 %iv.next = add i64 %iv, 1
57 %gep = getelementptr inbounds i64, ptr %src, i64 %iv
58 %l = load i64, ptr %gep, align 8
59 %ec = icmp eq i64 %iv, 22
60 br i1 %ec, label %exit, label %loop
63 %res = phi i64 [ %for, %loop ]
67 attributes #0 = { "target-features"="+64bit,+v,+zvl128b,+zvl256b" }
69 ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
70 ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
71 ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
72 ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}