1 ; RUN: llc -march=hexagon -disable-hsdr < %s | FileCheck %s
3 ; Check if instruction vandqrt.acc and its predecessor are scheduled in consecutive packets.
4 ; CHECK: or(q{{[0-3]+}},q{{[0-3]+}})
7 ; CHECK: |= vand(q{{[0-3]+}},r{{[0-9]+}})
10 target triple = "hexagon-unknown-linux-gnu"
12 ; Function Attrs: nounwind
13 define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i32* noalias nocapture %a4, i32 %a5) #0 {
16 %v1 = bitcast i32* %a4 to <16 x i32>*
18 %v3 = add i32 %v2, %a1
20 %v5 = add i32 %v3, %v4
21 %v6 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 -1)
24 %v9 = and i32 %v5, 511
25 %v10 = icmp eq i32 %v9, 0
26 %v11 = shl i32 -1, %v8
27 %v12 = select i1 %v10, i32 0, i32 %v11
28 %v13 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v12)
29 %v14 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v13)
30 %v15 = tail call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %v14)
31 %v16 = tail call <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %v5)
33 %v18 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v17)
34 %v19 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %v15, <512 x i1> %v16, i32 %v18)
35 %v20 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %a3)
36 %v21 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v20)
37 %v22 = icmp sgt i32 %v5, 0
38 br i1 %v22, label %b1, label %b8
41 %v23 = getelementptr inbounds i8, i8* %a0, i32 %a5
42 %v24 = bitcast i8* %v23 to <16 x i32>*
43 %v25 = load <16 x i32>, <16 x i32>* %v24, align 64, !tbaa !0
44 %v26 = add i32 %a5, 64
45 %v27 = getelementptr inbounds i8, i8* %a0, i32 %v26
46 %v28 = bitcast i8* %v27 to <16 x i32>*
47 %v29 = add i32 %a5, -64
48 %v30 = getelementptr inbounds i8, i8* %a0, i32 %v29
49 %v31 = bitcast i8* %v30 to <16 x i32>*
50 %v32 = load <16 x i32>, <16 x i32>* %v31, align 64, !tbaa !0
51 %v33 = tail call <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %a5)
52 %v34 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1> %v33, i32 16843009)
53 %v35 = tail call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %v34)
54 %v36 = add i32 %v0, %a5
55 %v37 = getelementptr inbounds i8, i8* %a0, i32 %v36
56 %v38 = bitcast i8* %v37 to <16 x i32>*
57 %v39 = sub i32 %a5, %v0
58 %v40 = getelementptr inbounds i8, i8* %a0, i32 %v39
59 %v41 = bitcast i8* %v40 to <16 x i32>*
60 %v42 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
61 %v43 = add i32 %v4, %a1
63 %v45 = sub i32 %v43, %v44
64 %v46 = xor i32 %v45, -1
65 %v47 = icmp sgt i32 %v46, -513
66 %v48 = select i1 %v47, i32 %v46, i32 -513
67 %v49 = add i32 %v48, %a1
68 %v50 = add i32 %v49, %v4
69 %v51 = add i32 %v50, 512
70 %v52 = sub i32 %v51, %v44
71 %v53 = lshr i32 %v52, 9
72 %v54 = mul nuw nsw i32 %v53, 16
73 %v55 = add nuw nsw i32 %v54, 16
74 %v56 = getelementptr i32, i32* %a4, i32 %v55
77 b2: ; preds = %b6, %b1
78 %v57 = phi i32 [ %v46, %b1 ], [ %v125, %b6 ]
79 %v58 = phi i32 [ %v5, %b1 ], [ %v123, %b6 ]
80 %v59 = phi <16 x i32>* [ %v1, %b1 ], [ %v122, %b6 ]
81 %v60 = phi <16 x i32>* [ %v38, %b1 ], [ %v114, %b6 ]
82 %v61 = phi <16 x i32>* [ %v41, %b1 ], [ %v115, %b6 ]
83 %v62 = phi <16 x i32>* [ %v28, %b1 ], [ %v116, %b6 ]
84 %v63 = phi i32 [ 512, %b1 ], [ %v69, %b6 ]
85 %v64 = phi i32 [ -2139062144, %b1 ], [ %v117, %b6 ]
86 %v65 = phi <16 x i32> [ %v32, %b1 ], [ %v118, %b6 ]
87 %v66 = phi <16 x i32> [ %v25, %b1 ], [ %v119, %b6 ]
88 %v67 = phi <16 x i32> [ %v35, %b1 ], [ %v6, %b6 ]
89 %v68 = icmp slt i32 %v58, %v63
90 %v69 = select i1 %v68, i32 %v58, i32 %v63
91 %v70 = icmp sgt i32 %v69, 0
92 br i1 %v70, label %b3, label %b6
95 %v71 = xor i32 %v63, -1
96 %v72 = icmp sgt i32 %v57, %v71
97 %v73 = select i1 %v72, i32 %v57, i32 %v71
98 %v74 = icmp sgt i32 %v73, -65
99 %v75 = add i32 %v73, 63
100 %v76 = select i1 %v74, i32 %v75, i32 -2
101 %v77 = sub i32 %v76, %v73
102 %v78 = lshr i32 %v77, 6
105 b4: ; preds = %b4, %b3
106 %v79 = phi i32 [ %v69, %b3 ], [ %v108, %b4 ]
107 %v80 = phi <16 x i32>* [ %v60, %b3 ], [ %v89, %b4 ]
108 %v81 = phi <16 x i32>* [ %v61, %b3 ], [ %v87, %b4 ]
109 %v82 = phi <16 x i32>* [ %v62, %b3 ], [ %v92, %b4 ]
110 %v83 = phi i32 [ %v64, %b3 ], [ %v106, %b4 ]
111 %v84 = phi <16 x i32> [ %v65, %b3 ], [ %v85, %b4 ]
112 %v85 = phi <16 x i32> [ %v66, %b3 ], [ %v93, %b4 ]
113 %v86 = phi <16 x i32> [ %v42, %b3 ], [ %v107, %b4 ]
114 %v87 = getelementptr inbounds <16 x i32>, <16 x i32>* %v81, i32 1
115 %v88 = load <16 x i32>, <16 x i32>* %v81, align 64, !tbaa !0
116 %v89 = getelementptr inbounds <16 x i32>, <16 x i32>* %v80, i32 1
117 %v90 = load <16 x i32>, <16 x i32>* %v80, align 64, !tbaa !0
118 %v91 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v85, <16 x i32> %v84, i32 3)
119 %v92 = getelementptr inbounds <16 x i32>, <16 x i32>* %v82, i32 1
120 %v93 = load <16 x i32>, <16 x i32>* %v82, align 64, !tbaa !0
121 %v94 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v93, <16 x i32> %v85, i32 3)
122 %v95 = tail call <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32> %v85, <16 x i32> %v21)
123 %v96 = tail call <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32> %v85, <16 x i32> %v21)
124 %v97 = tail call <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32> %v88, <16 x i32> %v90)
125 %v98 = tail call <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32> %v88, <16 x i32> %v90)
126 %v99 = tail call <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32> %v94, <16 x i32> %v91)
127 %v100 = tail call <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32> %v94, <16 x i32> %v91)
128 %v101 = tail call <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32> %v97, <16 x i32> %v99)
129 %v102 = tail call <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32> %v98, <16 x i32> %v100)
130 %v103 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v101, <16 x i32> %v96)
131 %v104 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v95, <16 x i32> %v102)
132 %v105 = tail call <512 x i1> @llvm.hexagon.V6.pred.or(<512 x i1> %v103, <512 x i1> %v104)
133 %v106 = tail call i32 @llvm.hexagon.S6.rol.i.r(i32 %v83, i32 1)
134 %v107 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %v86, <512 x i1> %v105, i32 %v106)
135 %v108 = add nsw i32 %v79, -64
136 %v109 = icmp sgt i32 %v79, 64
137 br i1 %v109, label %b4, label %b5
140 %v110 = add nuw nsw i32 %v78, 1
141 %v111 = getelementptr <16 x i32>, <16 x i32>* %v62, i32 %v110
142 %v112 = getelementptr <16 x i32>, <16 x i32>* %v60, i32 %v110
143 %v113 = getelementptr <16 x i32>, <16 x i32>* %v61, i32 %v110
146 b6: ; preds = %b5, %b2
147 %v114 = phi <16 x i32>* [ %v112, %b5 ], [ %v60, %b2 ]
148 %v115 = phi <16 x i32>* [ %v113, %b5 ], [ %v61, %b2 ]
149 %v116 = phi <16 x i32>* [ %v111, %b5 ], [ %v62, %b2 ]
150 %v117 = phi i32 [ %v106, %b5 ], [ %v64, %b2 ]
151 %v118 = phi <16 x i32> [ %v85, %b5 ], [ %v65, %b2 ]
152 %v119 = phi <16 x i32> [ %v93, %b5 ], [ %v66, %b2 ]
153 %v120 = phi <16 x i32> [ %v107, %b5 ], [ %v42, %b2 ]
154 %v121 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v120, <16 x i32> %v67)
155 %v122 = getelementptr inbounds <16 x i32>, <16 x i32>* %v59, i32 1
156 store <16 x i32> %v121, <16 x i32>* %v59, align 64, !tbaa !0
157 %v123 = add nsw i32 %v58, -512
158 %v124 = icmp sgt i32 %v58, 512
159 %v125 = add i32 %v57, 512
160 br i1 %v124, label %b2, label %b7
163 %v126 = bitcast i32* %v56 to <16 x i32>*
166 b8: ; preds = %b7, %b0
167 %v127 = phi <16 x i32>* [ %v126, %b7 ], [ %v1, %b0 ]
168 %v128 = getelementptr inbounds <16 x i32>, <16 x i32>* %v127, i32 -1
169 %v129 = load <16 x i32>, <16 x i32>* %v128, align 64, !tbaa !0
170 %v130 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v129, <16 x i32> %v19)
171 store <16 x i32> %v130, <16 x i32>* %v128, align 64, !tbaa !0
175 ; Function Attrs: nounwind readnone
176 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #1
178 ; Function Attrs: nounwind readnone
179 declare <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32>) #1
181 ; Function Attrs: nounwind readnone
182 declare <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1>, i32) #1
184 ; Function Attrs: nounwind readnone
185 declare <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32) #1
187 ; Function Attrs: nounwind readnone
188 declare i32 @llvm.hexagon.S2.vsplatrb(i32) #1
190 ; Function Attrs: nounwind readnone
191 declare <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32>, <512 x i1>, i32) #1
193 ; Function Attrs: nounwind readnone
194 declare <16 x i32> @llvm.hexagon.V6.vd0() #1
196 ; Function Attrs: nounwind readnone
197 declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
199 ; Function Attrs: nounwind readnone
200 declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
202 ; Function Attrs: nounwind readnone
203 declare <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32>, <16 x i32>) #1
205 ; Function Attrs: nounwind readnone
206 declare <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32>, <16 x i32>) #1
208 ; Function Attrs: nounwind readnone
209 declare <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32>, <16 x i32>) #1
211 ; Function Attrs: nounwind readnone
212 declare <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32>, <16 x i32>) #1
214 ; Function Attrs: nounwind readnone
215 declare <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #1
217 ; Function Attrs: nounwind readnone
218 declare <512 x i1> @llvm.hexagon.V6.pred.or(<512 x i1>, <512 x i1>) #1
220 ; Function Attrs: nounwind readnone
221 declare i32 @llvm.hexagon.S6.rol.i.r(i32, i32) #1
223 ; Function Attrs: nounwind readnone
224 declare <16 x i32> @llvm.hexagon.V6.vand(<16 x i32>, <16 x i32>) #1
226 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
227 attributes #1 = { nounwind readnone }
229 !0 = !{!1, !1, i64 0}
230 !1 = !{!"omnipotent char", !2, i64 0}
231 !2 = !{!"Simple C/C++ TBAA"}