1 ; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
2 ; CHECK: v{{[0-9]+}}.w = vadd
4 target triple = "hexagon"
6 ; Function Attrs: nounwind readnone
7 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0
9 ; Function Attrs: nounwind readnone
10 declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #0
12 ; Function Attrs: nounwind readnone
13 declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #0
15 ; Function Attrs: nounwind readnone
16 declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #0
18 ; Function Attrs: nounwind readnone
19 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #0
21 ; Function Attrs: nounwind
22 define void @f0(ptr noalias nocapture %a0, ptr noalias nocapture readonly %a1, i32 %a2, ptr noalias nocapture readonly %a3, i1 %cond) #1 {
24 %v0 = add nsw i32 %a2, 63
26 %v4 = getelementptr inbounds i32, ptr %a1, i32 32
27 %v6 = load <16 x i32>, ptr %v4, align 64, !tbaa !0
28 %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 32768)
29 %v8 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2147450879)
30 %v9 = icmp sgt i32 %v1, 0
31 br i1 %v9, label %b1, label %b4
34 %v11 = load <16 x i32>, ptr %a1, align 64, !tbaa !0
35 %v12 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v6, <16 x i32> %v11, i32 2)
36 %v13 = getelementptr inbounds i32, ptr %a1, i32 48
37 %v14 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v12, <16 x i32> undef)
38 br i1 %cond, label %b2, label %b3
41 %v16 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
42 %v17 = load <16 x i32>, ptr %v16, align 64, !tbaa !0
43 %v18 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v17, <16 x i32> %v6, i32 4)
44 %v19 = load <16 x i32>, ptr %v13, align 64, !tbaa !0
45 %v20 = getelementptr inbounds <16 x i32>, ptr %v13, i32 2
46 %v21 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v18, <16 x i32> %v19)
47 %v22 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 4)
48 %v23 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 8)
49 %v24 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 12)
50 %v25 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v14, <16 x i32> %v22)
51 %v26 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v25, <16 x i32> %v23)
52 %v27 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v26, <16 x i32> %v24)
53 %v28 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v19, <16 x i32> undef, i32 16)
54 %v29 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v27, <16 x i32> %v11)
55 %v30 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v27, <16 x i32> %v28)
56 %v31 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v29, i32 53019433)
57 %v32 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v30, i32 53019433)
58 %v33 = load <16 x i32>, ptr %a3, align 64, !tbaa !0
59 %v34 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %v33)
60 %v35 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v34, <16 x i32> %v34)
61 %v36 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v32, <16 x i32> %v31)
62 store <16 x i32> %v36, ptr %a0, align 64, !tbaa !0
63 %v37 = getelementptr inbounds <16 x i32>, ptr %v13, i32 3
64 %v38 = load <16 x i32>, ptr %v37, align 64, !tbaa !0
65 %v39 = load <16 x i32>, ptr %v20, align 64, !tbaa !0
66 %v40 = getelementptr inbounds <16 x i32>, ptr %v13, i32 4
67 %v41 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %v39)
68 %v42 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 4)
69 %v43 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 8)
70 %v44 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 12)
71 %v45 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v21, <16 x i32> %v42)
72 %v46 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v45, <16 x i32> %v43)
73 %v47 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v46, <16 x i32> %v44)
74 %v48 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v47, <16 x i32> %v6)
75 %v49 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v47, <16 x i32> undef)
76 %v50 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v48, i32 53019433)
77 %v51 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v49, i32 53019433)
78 %v52 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v51, <16 x i32> %v50)
79 %v53 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v52, <16 x i32> undef, i32 56)
80 %v54 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v35)
81 %v55 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %v53, <16 x i32> %v54)
82 %v56 = tail call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %v55, <16 x i32> %v8)
83 %v57 = getelementptr inbounds <16 x i32>, ptr %a0, i32 undef
84 store <16 x i32> %v56, ptr %v57, align 64, !tbaa !0
85 %v58 = getelementptr <16 x i32>, ptr %a0, i32 2
86 %v59 = getelementptr inbounds <16 x i32>, ptr %v13, i32 5
87 %v60 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> zeroinitializer, <16 x i32> %v38, i32 4)
88 %v61 = load <16 x i32>, ptr %v40, align 64, !tbaa !0
89 %v62 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v60, <16 x i32> %v61)
90 %v63 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 4)
91 %v64 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 8)
92 %v65 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 12)
93 %v66 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v41, <16 x i32> %v63)
94 %v67 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v66, <16 x i32> %v64)
95 %v68 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v67, <16 x i32> %v65)
96 %v69 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v61, <16 x i32> %v39, i32 16)
97 %v70 = getelementptr inbounds <16 x i32>, ptr %v13, i32 1
98 %v71 = load <16 x i32>, ptr %v70, align 64, !tbaa !0
99 %v72 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v68, <16 x i32> %v71)
100 %v73 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v68, <16 x i32> %v69)
101 %v74 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v72, i32 53019433)
102 %v75 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v73, i32 53019433)
103 %v76 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v75, <16 x i32> %v74)
104 store <16 x i32> %v76, ptr %v58, align 64, !tbaa !0
105 %v77 = getelementptr inbounds <16 x i32>, ptr %v13, i32 7
106 %v78 = load <16 x i32>, ptr %v77, align 64, !tbaa !0
107 %v79 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> undef)
108 %v80 = getelementptr <16 x i32>, ptr %a0, i32 4
109 %v81 = getelementptr inbounds <16 x i32>, ptr %v13, i32 9
110 %v82 = load <16 x i32>, ptr %v81, align 64, !tbaa !0
111 %v83 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v82, <16 x i32> %v78, i32 4)
112 %v84 = getelementptr inbounds <16 x i32>, ptr %v13, i32 10
113 %v85 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v83, <16 x i32> undef)
114 %v86 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 4)
115 %v87 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 8)
116 %v88 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 12)
117 %v89 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v79, <16 x i32> %v86)
118 %v90 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v89, <16 x i32> %v87)
119 %v91 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v90, <16 x i32> %v88)
120 %v92 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> undef, i32 16)
121 %v93 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v91, <16 x i32> zeroinitializer)
122 %v94 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v91, <16 x i32> %v92)
123 %v95 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v93, i32 53019433)
124 %v96 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v94, i32 53019433)
125 %v97 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> undef, <16 x i32> undef)
126 %v98 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v96, <16 x i32> %v95)
127 store <16 x i32> %v98, ptr %v80, align 64, !tbaa !0
128 %v99 = getelementptr inbounds <16 x i32>, ptr %v13, i32 11
129 %v100 = load <16 x i32>, ptr %v99, align 64, !tbaa !0
130 %v101 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v100, <16 x i32> %v82, i32 4)
131 %v102 = load <16 x i32>, ptr %v84, align 64, !tbaa !0
132 %v103 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v101, <16 x i32> %v102)
133 %v104 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v103, <16 x i32> %v85, i32 4)
134 %v105 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v103, <16 x i32> %v85, i32 8)
135 %v106 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v85, <16 x i32> %v104)
136 %v107 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v106, <16 x i32> %v105)
137 %v108 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v107, <16 x i32> undef)
138 %v109 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v102, <16 x i32> undef, i32 16)
139 %v110 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v108, <16 x i32> %v78)
140 %v111 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v108, <16 x i32> %v109)
141 %v112 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v110, i32 53019433)
142 %v113 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v111, i32 53019433)
143 %v114 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v113, <16 x i32> %v112)
144 %v115 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v114, <16 x i32> undef, i32 56)
145 %v116 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v97)
146 %v117 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %v115, <16 x i32> %v116)
147 %v118 = tail call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %v117, <16 x i32> %v8)
148 %v119 = getelementptr inbounds <16 x i32>, ptr %a0, i32 undef
149 store <16 x i32> %v118, ptr %v119, align 64, !tbaa !0
150 %v120 = getelementptr <16 x i32>, ptr %a0, i32 6
151 %v121 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> undef)
152 store <16 x i32> %v121, ptr %v120, align 64, !tbaa !0
162 ; Function Attrs: nounwind readnone
163 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #0
165 ; Function Attrs: nounwind readnone
166 declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #0
168 ; Function Attrs: nounwind readnone
169 declare <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32>, <16 x i32>, i32) #0
171 ; Function Attrs: nounwind readnone
172 declare <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32>, <16 x i32>) #0
174 ; Function Attrs: nounwind readnone
175 declare <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32>, <16 x i32>) #0
177 ; Function Attrs: nounwind readnone
178 declare <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32>, <16 x i32>) #0
180 attributes #0 = { nounwind readnone }
181 attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
183 !0 = !{!1, !1, i64 0}
184 !1 = !{!"omnipotent char", !2, i64 0}
185 !2 = !{!"Simple C/C++ TBAA"}