1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2 ; RUN: opt -passes=loop-vectorize -mcpu=neoverse-v1 -force-vector-interleave=2 -force-vector-width=1 -S %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
6 define i32 @pr70988(ptr %src, i32 %n) {
7 ; CHECK-LABEL: define i32 @pr70988(
8 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
10 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %n, 15
11 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP1]], i32 1)
12 ; CHECK-NEXT: [[UMAX:%.*]] = zext i32 [[TMP2]] to i64
13 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
15 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX]], 1
16 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2
17 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
18 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = icmp ult i64 0, [[UMAX]]
19 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ult i64 1, [[UMAX]]
20 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
22 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE5:%.*]] ]
23 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_LOAD_CONTINUE5]] ]
24 ; CHECK-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY1]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT7:%.*]], [[PRED_LOAD_CONTINUE5]] ]
25 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[PRED_LOAD_CONTINUE5]] ]
26 ; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[PRED_LOAD_CONTINUE5]] ]
27 ; CHECK-NEXT: br i1 [[ACTIVE_LANE_MASK]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
28 ; CHECK: pred.load.if:
29 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
30 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr %src, i64 [[TMP3]]
31 ; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
32 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
33 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
34 ; CHECK: pred.load.continue:
35 ; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP6]], [[PRED_LOAD_IF]] ]
36 ; CHECK-NEXT: br i1 [[ACTIVE_LANE_MASK2]], label [[PRED_LOAD_IF4:%.*]], label [[PRED_LOAD_CONTINUE5]]
37 ; CHECK: pred.load.if4:
38 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1
39 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr %src, i64 [[TMP9]]
40 ; CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8
41 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
42 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE5]]
43 ; CHECK: pred.load.continue5:
44 ; CHECK-NEXT: [[TMP14:%.*]] = phi i32 [ poison, [[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], [[PRED_LOAD_IF4]] ]
45 ; CHECK-NEXT: [[TMP15:%.*]] = tail call i32 @llvm.smax.i32(i32 [[TMP8]], i32 [[VEC_PHI]])
46 ; CHECK-NEXT: [[TMP16:%.*]] = tail call i32 @llvm.smax.i32(i32 [[TMP14]], i32 [[VEC_PHI3]])
47 ; CHECK-NEXT: [[TMP17]] = select i1 [[ACTIVE_LANE_MASK]], i32 [[TMP15]], i32 [[VEC_PHI]]
48 ; CHECK-NEXT: [[TMP18]] = select i1 [[ACTIVE_LANE_MASK2]], i32 [[TMP16]], i32 [[VEC_PHI3]]
49 ; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], 2
50 ; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX_NEXT]], 1
51 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = icmp ult i64 [[INDEX_NEXT]], [[UMAX]]
52 ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT7]] = icmp ult i64 [[TMP19]], [[UMAX]]
53 ; CHECK-NEXT: [[TMP20:%.*]] = xor i1 [[ACTIVE_LANE_MASK_NEXT]], true
54 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
55 ; CHECK: middle.block:
56 ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP17]], i32 [[TMP18]])
57 ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
59 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
60 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
61 ; CHECK-NEXT: br label [[LOOP:%.*]]
63 ; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ]
64 ; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ]
65 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr %src, i64 [[INDUC]]
66 ; CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[GEP]], align 8
67 ; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
68 ; CHECK-NEXT: [[TMP24]] = tail call i32 @llvm.smax.i32(i32 [[TMP23]], i32 [[MAX]])
69 ; CHECK-NEXT: [[INDUC_NEXT]] = add nuw nsw i64 [[INDUC]], 1
70 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDUC_NEXT]], [[UMAX]]
71 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
73 ; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[TMP24]], [[LOOP]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ]
74 ; CHECK-NEXT: ret i32 [[RES]]
78 %2 = call i32 @llvm.umax.i32(i32 %1, i32 1)
79 %umax = zext i32 %2 to i64
83 %induc = phi i64 [ 0, %entry ], [ %induc.next, %loop ]
84 %max = phi i32 [ 0, %entry ], [ %5, %loop ]
85 %gep = getelementptr i32, ptr %src, i64 %induc
86 %3 = load ptr, ptr %gep
88 %5 = tail call i32 @llvm.smax.i32(i32 %4, i32 %max)
89 %induc.next = add nuw nsw i64 %induc, 1
90 %exitcond.not = icmp eq i64 %induc.next, %umax
91 br i1 %exitcond.not, label %exit, label %loop
94 %res = phi i32 [ %5, %loop ]
98 declare i32 @llvm.smax.i32(i32, i32)
99 declare i32 @llvm.umax.i32(i32, i32)
101 ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
102 ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
103 ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
104 ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}