1 ; RUN: llc -march=hexagon -enable-pipeliner < %s
4 ; Test that causes an assert when the phi reuse code does not set
5 ; PhiOp2 correctly for use in the next stage. This occurs when the
6 ; number of stages is two or more.
8 ; Function Attrs: nounwind
9 define void @f0(ptr noalias nocapture %a0) #0 {
11 br i1 undef, label %b1, label %b3
16 b2: ; preds = %b2, %b1
17 %v1 = phi i32 [ 0, %b1 ], [ %v15, %b2 ]
18 %v2 = phi ptr [ %a0, %b1 ], [ %v14, %b2 ]
19 %v3 = phi ptr [ undef, %b1 ], [ %v6, %b2 ]
20 %v4 = phi <16 x i32> [ undef, %b1 ], [ %v10, %b2 ]
21 %v5 = phi <16 x i32> [ undef, %b1 ], [ %v4, %b2 ]
22 %v6 = getelementptr inbounds <16 x i32>, ptr %v3, i32 1
23 %v7 = load <16 x i32>, ptr %v3, align 64
24 %v8 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> undef, <16 x i32> %v7)
25 %v9 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v4, <16 x i32> %v5, i32 62)
26 %v10 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v8, <16 x i32> undef)
27 %v11 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v10, <16 x i32> %v4, i32 2)
28 %v12 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32> %v9, <16 x i32> %v11)
29 %v13 = getelementptr inbounds <16 x i32>, ptr %v2, i32 1
30 store <16 x i32> %v12, ptr %v2, align 64
31 %v14 = getelementptr inbounds <16 x i32>, ptr %v2, i32 2
32 store <16 x i32> zeroinitializer, ptr %v13, align 64
33 %v15 = add nsw i32 %v1, 1
34 %v16 = icmp slt i32 %v15, undef
35 br i1 %v16, label %b2, label %b3
37 b3: ; preds = %b2, %b0
41 ; Function Attrs: nounwind readnone
42 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
44 ; Function Attrs: nounwind readnone
45 declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
47 ; Function Attrs: nounwind readnone
48 declare <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32>, <16 x i32>) #1
50 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
51 attributes #1 = { nounwind readnone }