1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSSE3-SLOW
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,fast-hops | FileCheck %s --check-prefix=SSSE3-FAST
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1-SLOW
6 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefix=AVX1-FAST
7 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
9 ; PR37890 - subvector reduction followed by shuffle reduction
11 define float @PR37890_v4f32(<4 x float> %a) {
12 ; SSE2-LABEL: PR37890_v4f32:
14 ; SSE2-NEXT: movaps %xmm0, %xmm1
15 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
16 ; SSE2-NEXT: addps %xmm0, %xmm1
17 ; SSE2-NEXT: movaps %xmm1, %xmm0
18 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
19 ; SSE2-NEXT: addss %xmm1, %xmm0
22 ; SSSE3-SLOW-LABEL: PR37890_v4f32:
23 ; SSSE3-SLOW: # %bb.0:
24 ; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm1
25 ; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
26 ; SSSE3-SLOW-NEXT: addps %xmm0, %xmm1
27 ; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
28 ; SSSE3-SLOW-NEXT: addss %xmm0, %xmm1
29 ; SSSE3-SLOW-NEXT: movaps %xmm1, %xmm0
30 ; SSSE3-SLOW-NEXT: retq
32 ; SSSE3-FAST-LABEL: PR37890_v4f32:
33 ; SSSE3-FAST: # %bb.0:
34 ; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
35 ; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
36 ; SSSE3-FAST-NEXT: retq
38 ; AVX1-SLOW-LABEL: PR37890_v4f32:
40 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
41 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
42 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
43 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
44 ; AVX1-SLOW-NEXT: retq
46 ; AVX1-FAST-LABEL: PR37890_v4f32:
48 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
49 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
50 ; AVX1-FAST-NEXT: retq
52 ; AVX2-LABEL: PR37890_v4f32:
54 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
55 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
56 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
57 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
59 %hi0 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3>
60 %lo0 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
61 %sum0 = fadd fast <2 x float> %lo0, %hi0
62 %hi1 = shufflevector <2 x float> %sum0, <2 x float> undef, <2 x i32> <i32 1, i32 undef>
63 %sum1 = fadd fast <2 x float> %sum0, %hi1
64 %e = extractelement <2 x float> %sum1, i32 0
68 define double @PR37890_v4f64(<4 x double> %a) {
69 ; SSE2-LABEL: PR37890_v4f64:
71 ; SSE2-NEXT: addpd %xmm1, %xmm0
72 ; SSE2-NEXT: movapd %xmm0, %xmm1
73 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
74 ; SSE2-NEXT: addsd %xmm0, %xmm1
75 ; SSE2-NEXT: movapd %xmm1, %xmm0
78 ; SSSE3-SLOW-LABEL: PR37890_v4f64:
79 ; SSSE3-SLOW: # %bb.0:
80 ; SSSE3-SLOW-NEXT: addpd %xmm1, %xmm0
81 ; SSSE3-SLOW-NEXT: movapd %xmm0, %xmm1
82 ; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
83 ; SSSE3-SLOW-NEXT: addsd %xmm0, %xmm1
84 ; SSSE3-SLOW-NEXT: movapd %xmm1, %xmm0
85 ; SSSE3-SLOW-NEXT: retq
87 ; SSSE3-FAST-LABEL: PR37890_v4f64:
88 ; SSSE3-FAST: # %bb.0:
89 ; SSSE3-FAST-NEXT: addpd %xmm1, %xmm0
90 ; SSSE3-FAST-NEXT: haddpd %xmm0, %xmm0
91 ; SSSE3-FAST-NEXT: retq
93 ; AVX1-SLOW-LABEL: PR37890_v4f64:
95 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
96 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
97 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
98 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
99 ; AVX1-SLOW-NEXT: vzeroupper
100 ; AVX1-SLOW-NEXT: retq
102 ; AVX1-FAST-LABEL: PR37890_v4f64:
103 ; AVX1-FAST: # %bb.0:
104 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
105 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm1, %xmm0
106 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
107 ; AVX1-FAST-NEXT: vzeroupper
108 ; AVX1-FAST-NEXT: retq
110 ; AVX2-LABEL: PR37890_v4f64:
112 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
113 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
114 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
115 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
116 ; AVX2-NEXT: vzeroupper
118 %hi0 = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> <i32 2, i32 3>
119 %lo0 = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> <i32 0, i32 1>
120 %sum0 = fadd fast <2 x double> %lo0, %hi0
121 %hi1 = shufflevector <2 x double> %sum0, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
122 %sum1 = fadd fast <2 x double> %sum0, %hi1
123 %e = extractelement <2 x double> %sum1, i32 0
127 define float @PR37890_v8f32(<8 x float> %a) {
128 ; SSE2-LABEL: PR37890_v8f32:
130 ; SSE2-NEXT: addps %xmm1, %xmm0
131 ; SSE2-NEXT: movaps %xmm0, %xmm1
132 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
133 ; SSE2-NEXT: addps %xmm0, %xmm1
134 ; SSE2-NEXT: movaps %xmm1, %xmm0
135 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
136 ; SSE2-NEXT: addss %xmm1, %xmm0
139 ; SSSE3-SLOW-LABEL: PR37890_v8f32:
140 ; SSSE3-SLOW: # %bb.0:
141 ; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
142 ; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm1
143 ; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
144 ; SSSE3-SLOW-NEXT: addps %xmm0, %xmm1
145 ; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
146 ; SSSE3-SLOW-NEXT: addss %xmm0, %xmm1
147 ; SSSE3-SLOW-NEXT: movaps %xmm1, %xmm0
148 ; SSSE3-SLOW-NEXT: retq
150 ; SSSE3-FAST-LABEL: PR37890_v8f32:
151 ; SSSE3-FAST: # %bb.0:
152 ; SSSE3-FAST-NEXT: addps %xmm1, %xmm0
153 ; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
154 ; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
155 ; SSSE3-FAST-NEXT: retq
157 ; AVX1-SLOW-LABEL: PR37890_v8f32:
158 ; AVX1-SLOW: # %bb.0:
159 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
160 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
161 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
162 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
163 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
164 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
165 ; AVX1-SLOW-NEXT: vzeroupper
166 ; AVX1-SLOW-NEXT: retq
168 ; AVX1-FAST-LABEL: PR37890_v8f32:
169 ; AVX1-FAST: # %bb.0:
170 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
171 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm1, %xmm0
172 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
173 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
174 ; AVX1-FAST-NEXT: vzeroupper
175 ; AVX1-FAST-NEXT: retq
177 ; AVX2-LABEL: PR37890_v8f32:
179 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
180 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
181 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
182 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
183 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
184 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
185 ; AVX2-NEXT: vzeroupper
187 %hi0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
188 %lo0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
189 %sum0 = fadd fast <4 x float> %lo0, %hi0
190 %hi1 = shufflevector <4 x float> %sum0, <4 x float> undef, <2 x i32> <i32 2, i32 3>
191 %lo1 = shufflevector <4 x float> %sum0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
192 %sum1 = fadd fast <2 x float> %lo1, %hi1
193 %hi2 = shufflevector <2 x float> %sum1, <2 x float> undef, <2 x i32> <i32 1, i32 undef>
194 %sum2 = fadd fast <2 x float> %sum1, %hi2
195 %e = extractelement <2 x float> %sum2, i32 0
199 define double @PR37890_v8f64(<8 x double> %a) {
200 ; SSE2-LABEL: PR37890_v8f64:
202 ; SSE2-NEXT: addpd %xmm3, %xmm1
203 ; SSE2-NEXT: addpd %xmm2, %xmm1
204 ; SSE2-NEXT: addpd %xmm0, %xmm1
205 ; SSE2-NEXT: movapd %xmm1, %xmm0
206 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
207 ; SSE2-NEXT: addsd %xmm1, %xmm0
210 ; SSSE3-SLOW-LABEL: PR37890_v8f64:
211 ; SSSE3-SLOW: # %bb.0:
212 ; SSSE3-SLOW-NEXT: addpd %xmm3, %xmm1
213 ; SSSE3-SLOW-NEXT: addpd %xmm2, %xmm1
214 ; SSSE3-SLOW-NEXT: addpd %xmm0, %xmm1
215 ; SSSE3-SLOW-NEXT: movapd %xmm1, %xmm0
216 ; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
217 ; SSSE3-SLOW-NEXT: addsd %xmm1, %xmm0
218 ; SSSE3-SLOW-NEXT: retq
220 ; SSSE3-FAST-LABEL: PR37890_v8f64:
221 ; SSSE3-FAST: # %bb.0:
222 ; SSSE3-FAST-NEXT: addpd %xmm3, %xmm1
223 ; SSSE3-FAST-NEXT: addpd %xmm2, %xmm1
224 ; SSSE3-FAST-NEXT: addpd %xmm0, %xmm1
225 ; SSSE3-FAST-NEXT: haddpd %xmm1, %xmm1
226 ; SSSE3-FAST-NEXT: movapd %xmm1, %xmm0
227 ; SSSE3-FAST-NEXT: retq
229 ; AVX1-SLOW-LABEL: PR37890_v8f64:
230 ; AVX1-SLOW: # %bb.0:
231 ; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
232 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
233 ; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
234 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
235 ; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
236 ; AVX1-SLOW-NEXT: vzeroupper
237 ; AVX1-SLOW-NEXT: retq
239 ; AVX1-FAST-LABEL: PR37890_v8f64:
240 ; AVX1-FAST: # %bb.0:
241 ; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
242 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
243 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm1, %xmm0
244 ; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
245 ; AVX1-FAST-NEXT: vzeroupper
246 ; AVX1-FAST-NEXT: retq
248 ; AVX2-LABEL: PR37890_v8f64:
250 ; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
251 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
252 ; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
253 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
254 ; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
255 ; AVX2-NEXT: vzeroupper
257 %hi0 = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
258 %lo0 = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
259 %sum0 = fadd fast <4 x double> %lo0, %hi0
260 %hi1 = shufflevector <4 x double> %sum0, <4 x double> undef, <2 x i32> <i32 2, i32 3>
261 %lo1 = shufflevector <4 x double> %sum0, <4 x double> undef, <2 x i32> <i32 0, i32 1>
262 %sum1 = fadd fast <2 x double> %lo1, %hi1
263 %hi2 = shufflevector <2 x double> %sum1, <2 x double> undef, <2 x i32> <i32 1, i32 undef>
264 %sum2 = fadd fast <2 x double> %sum1, %hi2
265 %e = extractelement <2 x double> %sum2, i32 0
269 define float @PR37890_v16f32(<16 x float> %a) {
270 ; SSE2-LABEL: PR37890_v16f32:
272 ; SSE2-NEXT: addps %xmm3, %xmm1
273 ; SSE2-NEXT: addps %xmm2, %xmm1
274 ; SSE2-NEXT: addps %xmm0, %xmm1
275 ; SSE2-NEXT: movaps %xmm1, %xmm2
276 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
277 ; SSE2-NEXT: addps %xmm1, %xmm2
278 ; SSE2-NEXT: movaps %xmm2, %xmm0
279 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1]
280 ; SSE2-NEXT: addss %xmm2, %xmm0
283 ; SSSE3-SLOW-LABEL: PR37890_v16f32:
284 ; SSSE3-SLOW: # %bb.0:
285 ; SSSE3-SLOW-NEXT: addps %xmm3, %xmm1
286 ; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
287 ; SSSE3-SLOW-NEXT: addps %xmm0, %xmm1
288 ; SSSE3-SLOW-NEXT: movaps %xmm1, %xmm2
289 ; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
290 ; SSSE3-SLOW-NEXT: addps %xmm1, %xmm2
291 ; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm0 = xmm2[1,1,3,3]
292 ; SSSE3-SLOW-NEXT: addss %xmm2, %xmm0
293 ; SSSE3-SLOW-NEXT: retq
295 ; SSSE3-FAST-LABEL: PR37890_v16f32:
296 ; SSSE3-FAST: # %bb.0:
297 ; SSSE3-FAST-NEXT: addps %xmm3, %xmm1
298 ; SSSE3-FAST-NEXT: addps %xmm2, %xmm1
299 ; SSSE3-FAST-NEXT: addps %xmm0, %xmm1
300 ; SSSE3-FAST-NEXT: movaps %xmm1, %xmm0
301 ; SSSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
302 ; SSSE3-FAST-NEXT: addps %xmm1, %xmm0
303 ; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
304 ; SSSE3-FAST-NEXT: retq
306 ; AVX1-SLOW-LABEL: PR37890_v16f32:
307 ; AVX1-SLOW: # %bb.0:
308 ; AVX1-SLOW-NEXT: vaddps %ymm1, %ymm0, %ymm0
309 ; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
310 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
311 ; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
312 ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
313 ; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
314 ; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
315 ; AVX1-SLOW-NEXT: vzeroupper
316 ; AVX1-SLOW-NEXT: retq
318 ; AVX1-FAST-LABEL: PR37890_v16f32:
319 ; AVX1-FAST: # %bb.0:
320 ; AVX1-FAST-NEXT: vaddps %ymm1, %ymm0, %ymm0
321 ; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
322 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm1, %xmm0
323 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
324 ; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
325 ; AVX1-FAST-NEXT: vzeroupper
326 ; AVX1-FAST-NEXT: retq
328 ; AVX2-LABEL: PR37890_v16f32:
330 ; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
331 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
332 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
333 ; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
334 ; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
335 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
336 ; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
337 ; AVX2-NEXT: vzeroupper
339 %hi0 = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
340 %lo0 = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
341 %sum0 = fadd fast <8 x float> %lo0, %hi0
342 %hi1 = shufflevector <8 x float> %sum0, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
343 %lo1 = shufflevector <8 x float> %sum0, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
344 %sum1 = fadd fast <4 x float> %lo1, %hi1
345 %hi2 = shufflevector <4 x float> %sum1, <4 x float> undef, <2 x i32> <i32 2, i32 3>
346 %lo2 = shufflevector <4 x float> %sum1, <4 x float> undef, <2 x i32> <i32 0, i32 1>
347 %sum2 = fadd fast <2 x float> %lo2, %hi2
348 %hi3 = shufflevector <2 x float> %sum2, <2 x float> undef, <2 x i32> <i32 1, i32 undef>
349 %sum3 = fadd fast <2 x float> %sum2, %hi3
350 %e = extractelement <2 x float> %sum3, i32 0