1 ; RUN: llc -march=hexagon < %s -pipeliner-experimental-cg=true | FileCheck %s
3 ; Test that the pipeliner generates correct code when attempting to reuse
4 ; an existing phi. This test case contains a phi that references another
5 ; phi (the value from the previous iteration), and when there is a use that
6 ; is schedule in a later iteration. When this occurs, the pipeliner was
7 ; using a value from the wrong iteration.
9 ; CHECK: loop0(.LBB0_[[LOOP:.]],
10 ; CHECK: .LBB0_[[LOOP]]:
11 ; CHECK: vlalign([[VREG1:v([0-9]+)]],[[VREG2:v([0-9]+)]],#2)
12 ; CHECK: [[VREG2]]:{{[0-9]+}} = vcombine([[VREG1]],v{{[0-9]+}})
13 ; CHECK: }{{[ \t]*}}:endloop0
15 ; Function Attrs: nounwind
16 define void @f0(i32 %a0, i32 %a1, i8* %a2, <16 x i32>* %a3) #0 {
18 %v0 = shl nsw i32 %a0, 1
21 %v3 = getelementptr inbounds i8, i8* %a2, i32 %v1
22 %v4 = getelementptr inbounds i8, i8* %a2, i32 %v2
23 %v5 = getelementptr inbounds i8, i8* %a2, i32 %a0
24 %v6 = getelementptr inbounds i8, i8* %a2, i32 %v0
25 %v7 = getelementptr inbounds i8, i8* %v6, i32 64
26 %v8 = bitcast i8* %v7 to <16 x i32>*
27 %v9 = getelementptr inbounds i8, i8* %v5, i32 64
28 %v10 = bitcast i8* %v9 to <16 x i32>*
29 %v11 = getelementptr inbounds i8, i8* %a2, i32 64
30 %v12 = bitcast i8* %v11 to <16 x i32>*
31 %v13 = getelementptr inbounds i8, i8* %v4, i32 64
32 %v14 = bitcast i8* %v13 to <16 x i32>*
33 %v15 = getelementptr inbounds i8, i8* %v3, i32 64
34 %v16 = bitcast i8* %v15 to <16 x i32>*
37 b1: ; preds = %b1, %b0
38 %v17 = phi <16 x i32>* [ %v59, %b1 ], [ %a3, %b0 ]
39 %v18 = phi <16 x i32>* [ %v34, %b1 ], [ %v8, %b0 ]
40 %v19 = phi <16 x i32>* [ %v32, %b1 ], [ %v10, %b0 ]
41 %v20 = phi <16 x i32>* [ %v30, %b1 ], [ %v12, %b0 ]
42 %v21 = phi <16 x i32>* [ %v28, %b1 ], [ %v14, %b0 ]
43 %v22 = phi <16 x i32>* [ %v26, %b1 ], [ %v16, %b0 ]
44 %v23 = phi <32 x i32> [ %v39, %b1 ], [ undef, %b0 ]
45 %v24 = phi <32 x i32> [ %v23, %b1 ], [ undef, %b0 ]
46 %v25 = phi i32 [ %v60, %b1 ], [ %a1, %b0 ]
47 %v26 = getelementptr inbounds <16 x i32>, <16 x i32>* %v22, i32 1
48 %v27 = load <16 x i32>, <16 x i32>* %v22, align 64
49 %v28 = getelementptr inbounds <16 x i32>, <16 x i32>* %v21, i32 1
50 %v29 = load <16 x i32>, <16 x i32>* %v21, align 64
51 %v30 = getelementptr inbounds <16 x i32>, <16 x i32>* %v20, i32 1
52 %v31 = load <16 x i32>, <16 x i32>* %v20, align 64
53 %v32 = getelementptr inbounds <16 x i32>, <16 x i32>* %v19, i32 1
54 %v33 = load <16 x i32>, <16 x i32>* %v19, align 64
55 %v34 = getelementptr inbounds <16 x i32>, <16 x i32>* %v18, i32 1
56 %v35 = load <16 x i32>, <16 x i32>* %v18, align 64
57 %v36 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %v27, <16 x i32> %v35) #2
58 %v37 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v36, <16 x i32> %v31, i32 101058054) #2
59 %v38 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v33, <16 x i32> %v29) #2
60 %v39 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v37, <32 x i32> %v38, i32 67372036) #2
61 %v40 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v23) #2
62 %v41 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v24) #2
63 %v42 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v40, <16 x i32> %v41, i32 2) #2
64 %v43 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v23) #2
65 %v44 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v24) #2
66 %v45 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v43, <16 x i32> %v44, i32 2) #2
67 %v46 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v39) #2
68 %v47 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v46, <16 x i32> %v40, i32 2) #2
69 %v48 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v39) #2
70 %v49 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v48, <16 x i32> %v43, i32 2) #2
71 %v50 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v45, <16 x i32> %v43) #2
72 %v51 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v40, <16 x i32> %v47) #2
73 %v52 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v42, <16 x i32> %v47) #2
74 %v53 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v52, <16 x i32> %v40, i32 101058054) #2
75 %v54 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v53, <16 x i32> %v50, i32 67372036) #2
76 %v55 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v45, <16 x i32> %v49) #2
77 %v56 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v55, <16 x i32> %v43, i32 101058054) #2
78 %v57 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %v56, <16 x i32> %v51, i32 67372036) #2
79 %v58 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %v57, <16 x i32> %v54) #2
80 %v59 = getelementptr inbounds <16 x i32>, <16 x i32>* %v17, i32 1
81 store <16 x i32> %v58, <16 x i32>* %v17, align 64
82 %v60 = add nsw i32 %v25, -64
83 %v61 = icmp sgt i32 %v25, 128
84 br i1 %v61, label %b1, label %b2
90 ; Function Attrs: nounwind readnone
91 declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
93 declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
94 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
95 declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
96 declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
97 declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
98 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
99 declare <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32>, <16 x i32>) #1
100 declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #1
101 declare <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32>, <16 x i32>, i32) #1
102 declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #1
104 attributes #0 = { nounwind "target-cpu"="hexagonv65" "target-features"="+hvxv65,+hvx-length64b" }
105 attributes #1 = { nounwind readnone }
106 attributes #2 = { nounwind }