1 ; RUN: llc -march=hexagon -O2 -enable-pipeliner -disable-block-placement=0 < %s | FileCheck %s
3 ; For the Phis generated in the epilog, test that we generate the correct
4 ; names for the values coming from the prolog stages. The test belows
5 ; checks that the value loaded in the first prolog block gets propagated
6 ; through the first epilog to the use after the loop.
8 ; CHECK: if ({{.*}}) jump
9 ; CHECK: [[VREG:v([0-9]+)]]{{.*}} = {{.*}}vmem(r{{[0-9]+}}++#1)
10 ; CHECK: if ({{.*}}) {{jump|jump:nt|jump:t}} [[EPLOG1:(.*)]]
11 ; CHECK: if ({{.*}}) {{jump|jump:nt|jump:t}} [[EPLOG:(.*)]]
13 ; CHECK: [[VREG1:v([0-9]+)]] = [[VREG]]
15 ; CHECK: [[VREG2:v[0-9]+]] = [[VREG1]]
16 ; CHECK: = vlalign([[VREG1]],[[VREG2]],#1)
18 ; Function Attrs: nounwind
19 define void @f0(ptr noalias nocapture readonly %a0, i32 %a1, i32 %a2, ptr noalias nocapture readonly %a3, i32 %a4, ptr noalias nocapture %a5, i32 %a6) #0 {
22 %v1 = getelementptr inbounds i8, ptr %a0, i32 %v0
23 %v4 = getelementptr inbounds i8, ptr %a0, i32 %a1
24 %v6 = mul nsw i32 %a1, 2
25 %v7 = getelementptr inbounds i8, ptr %a0, i32 %v6
26 %v10 = getelementptr inbounds i8, ptr %a5, i32 %a6
27 %v12 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
28 %v13 = load <16 x i32>, ptr %v1, align 64, !tbaa !0
29 %v14 = load <16 x i32>, ptr %a0, align 64, !tbaa !0
30 %v15 = load <16 x i32>, ptr %v4, align 64, !tbaa !0
31 %v16 = load <16 x i32>, ptr %v7, align 64, !tbaa !0
32 %v17 = load i8, ptr %a3, align 1, !tbaa !0
33 %v18 = getelementptr inbounds i8, ptr %a3, i32 1
34 %v19 = load i8, ptr %v18, align 1, !tbaa !0
35 %v20 = zext i8 %v19 to i64
36 %v21 = shl nuw nsw i64 %v20, 24
37 %v22 = zext i8 %v17 to i64
38 %v23 = shl nuw nsw i64 %v22, 16
39 %v24 = shl nuw nsw i64 %v20, 8
40 %v25 = or i64 %v22, %v23
41 %v26 = or i64 %v21, %v25
42 %v27 = or i64 %v24, %v26
43 %v28 = trunc i64 %v27 to i32
44 %v29 = getelementptr inbounds i8, ptr %a3, i32 3
45 %v30 = load i8, ptr %v29, align 1, !tbaa !0
46 %v31 = getelementptr inbounds i8, ptr %a3, i32 4
47 %v32 = load i8, ptr %v31, align 1, !tbaa !0
48 %v33 = zext i8 %v32 to i64
49 %v34 = shl nuw nsw i64 %v33, 24
50 %v35 = zext i8 %v30 to i64
51 %v36 = shl nuw nsw i64 %v35, 16
52 %v37 = shl nuw nsw i64 %v33, 8
53 %v38 = or i64 %v35, %v36
54 %v39 = or i64 %v34, %v38
55 %v40 = or i64 %v37, %v39
56 %v41 = trunc i64 %v40 to i32
57 %v42 = getelementptr inbounds i8, ptr %a3, i32 6
58 %v43 = load i8, ptr %v42, align 1, !tbaa !0
59 %v44 = getelementptr inbounds i8, ptr %a3, i32 7
60 %v45 = load i8, ptr %v44, align 1, !tbaa !0
61 %v46 = zext i8 %v45 to i64
62 %v47 = shl nuw nsw i64 %v46, 24
63 %v48 = zext i8 %v43 to i64
64 %v49 = shl nuw nsw i64 %v48, 16
65 %v50 = shl nuw nsw i64 %v46, 8
66 %v51 = or i64 %v48, %v49
67 %v52 = or i64 %v47, %v51
68 %v53 = or i64 %v50, %v52
69 %v54 = trunc i64 %v53 to i32
70 %v55 = getelementptr inbounds i8, ptr %a3, i32 5
71 %v56 = load i8, ptr %v55, align 1, !tbaa !0
72 %v57 = getelementptr inbounds i8, ptr %a3, i32 2
73 %v58 = load i8, ptr %v57, align 1, !tbaa !0
74 %v59 = zext i8 %v58 to i64
75 %v60 = shl nuw nsw i64 %v59, 24
76 %v61 = zext i8 %v56 to i64
77 %v62 = shl nuw nsw i64 %v61, 16
78 %v63 = shl nuw nsw i64 %v59, 8
79 %v64 = or i64 %v61, %v62
80 %v65 = or i64 %v60, %v64
81 %v66 = or i64 %v63, %v65
82 %v67 = trunc i64 %v66 to i32
83 %v68 = getelementptr inbounds i8, ptr %a3, i32 8
84 %v69 = load i8, ptr %v68, align 1, !tbaa !0
85 %v70 = zext i8 %v69 to i64
86 %v71 = shl nuw nsw i64 %v70, 24
87 %v72 = shl nuw nsw i64 %v70, 16
88 %v73 = shl nuw nsw i64 %v70, 8
89 %v74 = or i64 %v70, %v72
90 %v75 = or i64 %v71, %v74
91 %v76 = or i64 %v73, %v75
92 %v77 = trunc i64 %v76 to i32
93 %v78 = icmp sgt i32 %a2, 64
94 br i1 %v78, label %b1, label %b4
97 %v79 = add i32 %v6, 64
98 %v80 = getelementptr inbounds i8, ptr %a0, i32 %v79
99 %v82 = add i32 %a1, 64
100 %v83 = getelementptr inbounds i8, ptr %a0, i32 %v82
101 %v85 = getelementptr inbounds i8, ptr %a0, i32 64
102 %v86 = bitcast ptr %v85 to ptr
103 %v87 = sub i32 64, %a1
104 %v88 = getelementptr inbounds i8, ptr %a0, i32 %v87
105 %v90 = add i32 %a2, -65
106 %v91 = lshr i32 %v90, 6
107 %v92 = mul i32 %v91, 64
108 %v93 = add i32 %v92, %a6
109 %v94 = add i32 %v93, 64
110 %v95 = getelementptr i8, ptr %a5, i32 %v94
111 %v96 = add i32 %v92, 64
112 %v97 = getelementptr i8, ptr %a5, i32 %v96
115 b2: ; preds = %b2, %b1
116 %v98 = phi i32 [ %a2, %b1 ], [ %v153, %b2 ]
117 %v99 = phi <16 x i32> [ %v12, %b1 ], [ %v100, %b2 ]
118 %v100 = phi <16 x i32> [ %v13, %b1 ], [ %v118, %b2 ]
119 %v101 = phi <16 x i32> [ %v12, %b1 ], [ %v102, %b2 ]
120 %v102 = phi <16 x i32> [ %v14, %b1 ], [ %v120, %b2 ]
121 %v103 = phi <16 x i32> [ %v12, %b1 ], [ %v104, %b2 ]
122 %v104 = phi <16 x i32> [ %v15, %b1 ], [ %v122, %b2 ]
123 %v105 = phi <16 x i32> [ %v12, %b1 ], [ %v106, %b2 ]
124 %v106 = phi <16 x i32> [ %v16, %b1 ], [ %v124, %b2 ]
125 %v107 = phi ptr [ %v88, %b1 ], [ %v117, %b2 ]
126 %v108 = phi ptr [ %v86, %b1 ], [ %v119, %b2 ]
127 %v109 = phi ptr [ %v83, %b1 ], [ %v121, %b2 ]
128 %v110 = phi ptr [ %v80, %b1 ], [ %v123, %b2 ]
129 %v111 = phi ptr [ %a5, %b1 ], [ %v148, %b2 ]
130 %v112 = phi ptr [ %v10, %b1 ], [ %v152, %b2 ]
131 %v113 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v100, <16 x i32> %v99, i32 1)
132 %v114 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v102, <16 x i32> %v101, i32 1)
133 %v115 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v104, <16 x i32> %v103, i32 1)
134 %v116 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v106, <16 x i32> %v105, i32 1)
135 %v117 = getelementptr inbounds <16 x i32>, ptr %v107, i32 1
136 %v118 = load <16 x i32>, ptr %v107, align 64, !tbaa !0
137 %v119 = getelementptr inbounds <16 x i32>, ptr %v108, i32 1
138 %v120 = load <16 x i32>, ptr %v108, align 64, !tbaa !0
139 %v121 = getelementptr inbounds <16 x i32>, ptr %v109, i32 1
140 %v122 = load <16 x i32>, ptr %v109, align 64, !tbaa !0
141 %v123 = getelementptr inbounds <16 x i32>, ptr %v110, i32 1
142 %v124 = load <16 x i32>, ptr %v110, align 64, !tbaa !0
143 %v125 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v118, <16 x i32> %v100, i32 1)
144 %v126 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v120, <16 x i32> %v102, i32 1)
145 %v127 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v122, <16 x i32> %v104, i32 1)
146 %v128 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v124, <16 x i32> %v106, i32 1)
147 %v129 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v125, <16 x i32> %v113)
148 %v130 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v126, <16 x i32> %v114)
149 %v131 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v129, i32 %v28)
150 %v132 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v130, i32 %v28)
151 %v133 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v127, <16 x i32> %v115)
152 %v134 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v131, <32 x i32> %v130, i32 %v41)
153 %v135 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v132, <32 x i32> %v133, i32 %v41)
154 %v136 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v125, <16 x i32> %v126)
155 %v137 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v126, <16 x i32> %v127)
156 %v138 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v134, <32 x i32> %v136, i32 %v67)
157 %v139 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v135, <32 x i32> %v137, i32 %v67)
158 %v140 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v128, <16 x i32> %v116)
159 %v141 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v138, <32 x i32> %v133, i32 %v54)
160 %v142 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v139, <32 x i32> %v140, i32 %v54)
161 %v143 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v141, <16 x i32> %v127, i32 %v77)
162 %v144 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v142, <16 x i32> %v128, i32 %v77)
163 %v145 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v143)
164 %v146 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v143)
165 %v147 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v145, <16 x i32> %v146, i32 %a4)
166 %v148 = getelementptr inbounds <16 x i32>, ptr %v111, i32 1
167 store <16 x i32> %v147, ptr %v111, align 64, !tbaa !0
168 %v149 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v144)
169 %v150 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v144)
170 %v151 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v149, <16 x i32> %v150, i32 %a4)
171 %v152 = getelementptr inbounds <16 x i32>, ptr %v112, i32 1
172 store <16 x i32> %v151, ptr %v112, align 64, !tbaa !0
173 %v153 = add nsw i32 %v98, -64
174 %v154 = icmp sgt i32 %v153, 64
175 br i1 %v154, label %b2, label %b3
180 b4: ; preds = %b3, %b0
181 %v157 = phi <16 x i32> [ %v100, %b3 ], [ %v12, %b0 ]
182 %v158 = phi <16 x i32> [ %v118, %b3 ], [ %v13, %b0 ]
183 %v159 = phi <16 x i32> [ %v102, %b3 ], [ %v12, %b0 ]
184 %v160 = phi <16 x i32> [ %v120, %b3 ], [ %v14, %b0 ]
185 %v161 = phi <16 x i32> [ %v104, %b3 ], [ %v12, %b0 ]
186 %v162 = phi <16 x i32> [ %v122, %b3 ], [ %v15, %b0 ]
187 %v163 = phi <16 x i32> [ %v106, %b3 ], [ %v12, %b0 ]
188 %v164 = phi <16 x i32> [ %v124, %b3 ], [ %v16, %b0 ]
189 %v165 = phi ptr [ %v97, %b3 ], [ %a5, %b0 ]
190 %v166 = phi ptr [ %v95, %b3 ], [ %v10, %b0 ]
191 %v167 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v158, <16 x i32> %v157, i32 1)
192 %v168 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v160, <16 x i32> %v159, i32 1)
193 %v169 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v162, <16 x i32> %v161, i32 1)
194 %v170 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v164, <16 x i32> %v163, i32 1)
195 %v171 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v158, <16 x i32> %v158, i32 1)
196 %v172 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v160, <16 x i32> %v160, i32 1)
197 %v173 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v162, <16 x i32> %v162, i32 1)
198 %v174 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v164, <16 x i32> %v164, i32 1)
199 %v175 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v171, <16 x i32> %v167)
200 %v176 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v172, <16 x i32> %v168)
201 %v177 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v175, i32 %v28)
202 %v178 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v176, i32 %v28)
203 %v179 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v173, <16 x i32> %v169)
204 %v180 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v177, <32 x i32> %v176, i32 %v41)
205 %v181 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v178, <32 x i32> %v179, i32 %v41)
206 %v182 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v171, <16 x i32> %v172)
207 %v183 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v172, <16 x i32> %v173)
208 %v184 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v180, <32 x i32> %v182, i32 %v67)
209 %v185 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v181, <32 x i32> %v183, i32 %v67)
210 %v186 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v174, <16 x i32> %v170)
211 %v187 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v184, <32 x i32> %v179, i32 %v54)
212 %v188 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v185, <32 x i32> %v186, i32 %v54)
213 %v189 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v187, <16 x i32> %v173, i32 %v77)
214 %v190 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v188, <16 x i32> %v174, i32 %v77)
215 %v191 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v189)
216 %v192 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v189)
217 %v193 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v191, <16 x i32> %v192, i32 %a4)
218 store <16 x i32> %v193, ptr %v165, align 64, !tbaa !0
219 %v194 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v190)
220 %v195 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v190)
221 %v196 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v194, <16 x i32> %v195, i32 %a4)
222 store <16 x i32> %v196, ptr %v166, align 64, !tbaa !0
226 ; Function Attrs: nounwind readnone
227 declare <16 x i32> @llvm.hexagon.V6.vd0() #1
229 ; Function Attrs: nounwind readnone
230 declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
232 ; Function Attrs: nounwind readnone
233 declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
235 ; Function Attrs: nounwind readnone
236 declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
238 ; Function Attrs: nounwind readnone
239 declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32>, i32) #1
241 ; Function Attrs: nounwind readnone
242 declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32) #1
244 ; Function Attrs: nounwind readnone
245 declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
247 ; Function Attrs: nounwind readnone
248 declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #1
250 ; Function Attrs: nounwind readnone
251 declare <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32>, <16 x i32>, i32) #1
253 ; Function Attrs: nounwind readnone
254 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
256 ; Function Attrs: nounwind readnone
257 declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
259 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
260 attributes #1 = { nounwind readnone }
262 !0 = !{!1, !1, i64 0}
263 !1 = !{!"omnipotent char", !2, i64 0}
264 !2 = !{!"Simple C/C++ TBAA"}