1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+m,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V128,RV32-V128
3 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+m,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V128,RV64-V128
4 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+m,+zvl512b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V512,RV32-V512
5 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+m,+zvl512b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V512,RV64-V512
7 ; Test optimizing interleaves to widening arithmetic.
9 define <4 x half> @interleave_v2f16(<2 x half> %x, <2 x half> %y) {
10 ; CHECK-LABEL: interleave_v2f16:
12 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
13 ; CHECK-NEXT: vwaddu.vv v10, v8, v9
14 ; CHECK-NEXT: li a0, -1
15 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9
16 ; CHECK-NEXT: vmv1r.v v8, v10
18 %a = shufflevector <2 x half> %x, <2 x half> %y, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
22 ; Vector order switched for coverage.
23 define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) {
24 ; CHECK-LABEL: interleave_v2f32:
26 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
27 ; CHECK-NEXT: vwaddu.vv v10, v9, v8
28 ; CHECK-NEXT: li a0, -1
29 ; CHECK-NEXT: vwmaccu.vx v10, a0, v8
30 ; CHECK-NEXT: vmv1r.v v8, v10
32 %a = shufflevector <2 x float> %x, <2 x float> %y, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
36 ; One vXf64 test case to very that we don't optimize it.
37 ; FIXME: Is there better codegen we can do here?
38 define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
39 ; V128-LABEL: interleave_v2f64:
41 ; V128-NEXT: vmv1r.v v12, v9
42 ; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
44 ; V128-NEXT: vmv.v.i v0, 10
45 ; V128-NEXT: vsrl.vi v14, v9, 1
46 ; V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
47 ; V128-NEXT: vrgatherei16.vv v10, v8, v14
48 ; V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
49 ; V128-NEXT: vmv.v.v v8, v10
52 ; RV32-V512-LABEL: interleave_v2f64:
54 ; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
55 ; RV32-V512-NEXT: vid.v v10
56 ; RV32-V512-NEXT: vsrl.vi v11, v10, 1
57 ; RV32-V512-NEXT: vmv.v.i v0, 10
58 ; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu
59 ; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11
60 ; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t
61 ; RV32-V512-NEXT: vmv.v.v v8, v10
64 ; RV64-V512-LABEL: interleave_v2f64:
66 ; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu
67 ; RV64-V512-NEXT: vid.v v10
68 ; RV64-V512-NEXT: vsrl.vi v11, v10, 1
69 ; RV64-V512-NEXT: vmv.v.i v0, 10
70 ; RV64-V512-NEXT: vrgather.vv v10, v8, v11
71 ; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t
72 ; RV64-V512-NEXT: vmv.v.v v8, v10
74 %a = shufflevector <2 x double> %x, <2 x double> %y, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
78 ; Undef elements for coverage
79 define <8 x half> @interleave_v4f16(<4 x half> %x, <4 x half> %y) {
80 ; V128-LABEL: interleave_v4f16:
82 ; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
83 ; V128-NEXT: vwaddu.vv v10, v8, v9
84 ; V128-NEXT: li a0, -1
85 ; V128-NEXT: vwmaccu.vx v10, a0, v9
86 ; V128-NEXT: vmv1r.v v8, v10
89 ; V512-LABEL: interleave_v4f16:
91 ; V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
92 ; V512-NEXT: vwaddu.vv v10, v8, v9
93 ; V512-NEXT: li a0, -1
94 ; V512-NEXT: vwmaccu.vx v10, a0, v9
95 ; V512-NEXT: vmv1r.v v8, v10
97 %a = shufflevector <4 x half> %x, <4 x half> %y, <8 x i32> <i32 0, i32 4, i32 undef, i32 5, i32 2, i32 undef, i32 3, i32 7>
101 define <8 x float> @interleave_v4f32(<4 x float> %x, <4 x float> %y) {
102 ; V128-LABEL: interleave_v4f32:
104 ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma
105 ; V128-NEXT: vwaddu.vv v10, v8, v9
106 ; V128-NEXT: li a0, -1
107 ; V128-NEXT: vwmaccu.vx v10, a0, v9
108 ; V128-NEXT: vmv2r.v v8, v10
111 ; V512-LABEL: interleave_v4f32:
113 ; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, ma
114 ; V512-NEXT: vwaddu.vv v10, v8, v9
115 ; V512-NEXT: li a0, -1
116 ; V512-NEXT: vwmaccu.vx v10, a0, v9
117 ; V512-NEXT: vmv1r.v v8, v10
119 %a = shufflevector <4 x float> %x, <4 x float> %y, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
123 ; Vector order switched for coverage.
124 define <16 x half> @interleave_v8f16(<8 x half> %x, <8 x half> %y) {
125 ; V128-LABEL: interleave_v8f16:
127 ; V128-NEXT: vsetivli zero, 8, e16, m1, ta, ma
128 ; V128-NEXT: vwaddu.vv v10, v9, v8
129 ; V128-NEXT: li a0, -1
130 ; V128-NEXT: vwmaccu.vx v10, a0, v8
131 ; V128-NEXT: vmv2r.v v8, v10
134 ; V512-LABEL: interleave_v8f16:
136 ; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, ma
137 ; V512-NEXT: vwaddu.vv v10, v9, v8
138 ; V512-NEXT: li a0, -1
139 ; V512-NEXT: vwmaccu.vx v10, a0, v8
140 ; V512-NEXT: vmv1r.v v8, v10
142 %a = shufflevector <8 x half> %x, <8 x half> %y, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
146 define <16 x float> @interleave_v8f32(<8 x float> %x, <8 x float> %y) {
147 ; V128-LABEL: interleave_v8f32:
149 ; V128-NEXT: vsetivli zero, 8, e32, m2, ta, ma
150 ; V128-NEXT: vwaddu.vv v12, v8, v10
151 ; V128-NEXT: li a0, -1
152 ; V128-NEXT: vwmaccu.vx v12, a0, v10
153 ; V128-NEXT: vmv4r.v v8, v12
156 ; V512-LABEL: interleave_v8f32:
158 ; V512-NEXT: vsetivli zero, 8, e32, mf2, ta, ma
159 ; V512-NEXT: vwaddu.vv v10, v8, v9
160 ; V512-NEXT: li a0, -1
161 ; V512-NEXT: vwmaccu.vx v10, a0, v9
162 ; V512-NEXT: vmv1r.v v8, v10
164 %a = shufflevector <8 x float> %x, <8 x float> %y, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
168 define <32 x half> @interleave_v16f16(<16 x half> %x, <16 x half> %y) {
169 ; V128-LABEL: interleave_v16f16:
171 ; V128-NEXT: vsetivli zero, 16, e16, m2, ta, ma
172 ; V128-NEXT: vwaddu.vv v12, v8, v10
173 ; V128-NEXT: li a0, -1
174 ; V128-NEXT: vwmaccu.vx v12, a0, v10
175 ; V128-NEXT: vmv4r.v v8, v12
178 ; V512-LABEL: interleave_v16f16:
180 ; V512-NEXT: vsetivli zero, 16, e16, mf2, ta, ma
181 ; V512-NEXT: vwaddu.vv v10, v8, v9
182 ; V512-NEXT: li a0, -1
183 ; V512-NEXT: vwmaccu.vx v10, a0, v9
184 ; V512-NEXT: vmv1r.v v8, v10
186 %a = shufflevector <16 x half> %x, <16 x half> %y, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
190 define <32 x float> @interleave_v16f32(<16 x float> %x, <16 x float> %y) {
191 ; V128-LABEL: interleave_v16f32:
193 ; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
194 ; V128-NEXT: vwaddu.vv v16, v8, v12
195 ; V128-NEXT: li a0, -1
196 ; V128-NEXT: vwmaccu.vx v16, a0, v12
197 ; V128-NEXT: vmv8r.v v8, v16
200 ; V512-LABEL: interleave_v16f32:
202 ; V512-NEXT: vsetivli zero, 16, e32, m1, ta, ma
203 ; V512-NEXT: vwaddu.vv v10, v8, v9
204 ; V512-NEXT: li a0, -1
205 ; V512-NEXT: vwmaccu.vx v10, a0, v9
206 ; V512-NEXT: vmv2r.v v8, v10
208 %a = shufflevector <16 x float> %x, <16 x float> %y, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
212 define <64 x half> @interleave_v32f16(<32 x half> %x, <32 x half> %y) {
213 ; V128-LABEL: interleave_v32f16:
215 ; V128-NEXT: li a0, 32
216 ; V128-NEXT: vsetvli zero, a0, e16, m4, ta, ma
217 ; V128-NEXT: vwaddu.vv v16, v8, v12
218 ; V128-NEXT: li a0, -1
219 ; V128-NEXT: vwmaccu.vx v16, a0, v12
220 ; V128-NEXT: vmv8r.v v8, v16
223 ; V512-LABEL: interleave_v32f16:
225 ; V512-NEXT: li a0, 32
226 ; V512-NEXT: vsetvli zero, a0, e16, m1, ta, ma
227 ; V512-NEXT: vwaddu.vv v10, v8, v9
228 ; V512-NEXT: li a0, -1
229 ; V512-NEXT: vwmaccu.vx v10, a0, v9
230 ; V512-NEXT: vmv2r.v v8, v10
232 %a = shufflevector <32 x half> %x, <32 x half> %y, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
236 define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
237 ; V128-LABEL: interleave_v32f32:
239 ; V128-NEXT: addi sp, sp, -16
240 ; V128-NEXT: .cfi_def_cfa_offset 16
241 ; V128-NEXT: csrr a0, vlenb
242 ; V128-NEXT: slli a0, a0, 3
243 ; V128-NEXT: sub sp, sp, a0
244 ; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
245 ; V128-NEXT: addi a0, sp, 16
246 ; V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
247 ; V128-NEXT: vsetivli zero, 16, e32, m8, ta, ma
248 ; V128-NEXT: vslidedown.vi v24, v16, 16
249 ; V128-NEXT: li a0, 32
250 ; V128-NEXT: vslidedown.vi v0, v8, 16
251 ; V128-NEXT: lui a1, 699051
252 ; V128-NEXT: vsetivli zero, 16, e64, m8, ta, ma
253 ; V128-NEXT: vzext.vf2 v8, v24
254 ; V128-NEXT: vzext.vf2 v24, v0
255 ; V128-NEXT: addi a1, a1, -1366
256 ; V128-NEXT: vmv.s.x v0, a1
257 ; V128-NEXT: vsll.vx v8, v8, a0
258 ; V128-NEXT: vsetvli zero, a0, e32, m8, ta, ma
259 ; V128-NEXT: vmerge.vvm v24, v24, v8, v0
260 ; V128-NEXT: addi a0, sp, 16
261 ; V128-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
262 ; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
263 ; V128-NEXT: vwaddu.vv v0, v8, v16
264 ; V128-NEXT: li a0, -1
265 ; V128-NEXT: vwmaccu.vx v0, a0, v16
266 ; V128-NEXT: vmv8r.v v8, v0
267 ; V128-NEXT: vmv8r.v v16, v24
268 ; V128-NEXT: csrr a0, vlenb
269 ; V128-NEXT: slli a0, a0, 3
270 ; V128-NEXT: add sp, sp, a0
271 ; V128-NEXT: .cfi_def_cfa sp, 16
272 ; V128-NEXT: addi sp, sp, 16
273 ; V128-NEXT: .cfi_def_cfa_offset 0
276 ; V512-LABEL: interleave_v32f32:
278 ; V512-NEXT: li a0, 32
279 ; V512-NEXT: vsetvli zero, a0, e32, m2, ta, ma
280 ; V512-NEXT: vwaddu.vv v12, v8, v10
281 ; V512-NEXT: li a0, -1
282 ; V512-NEXT: vwmaccu.vx v12, a0, v10
283 ; V512-NEXT: vmv4r.v v8, v12
285 %a = shufflevector <32 x float> %x, <32 x float> %y, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
289 define <4 x half> @unary_interleave_v4f16(<4 x half> %x) {
290 ; V128-LABEL: unary_interleave_v4f16:
292 ; V128-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
293 ; V128-NEXT: vslidedown.vi v10, v8, 2
294 ; V128-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
295 ; V128-NEXT: vwaddu.vv v9, v8, v10
296 ; V128-NEXT: li a0, -1
297 ; V128-NEXT: vwmaccu.vx v9, a0, v10
298 ; V128-NEXT: vmv1r.v v8, v9
301 ; V512-LABEL: unary_interleave_v4f16:
303 ; V512-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
304 ; V512-NEXT: vslidedown.vi v10, v8, 2
305 ; V512-NEXT: vwaddu.vv v9, v8, v10
306 ; V512-NEXT: li a0, -1
307 ; V512-NEXT: vwmaccu.vx v9, a0, v10
308 ; V512-NEXT: vmv1r.v v8, v9
310 %a = shufflevector <4 x half> %x, <4 x half> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
314 define <4 x float> @unary_interleave_v4f32(<4 x float> %x) {
315 ; V128-LABEL: unary_interleave_v4f32:
317 ; V128-NEXT: vsetivli zero, 2, e32, m1, ta, ma
318 ; V128-NEXT: vslidedown.vi v10, v8, 2
319 ; V128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
320 ; V128-NEXT: vwaddu.vv v9, v8, v10
321 ; V128-NEXT: li a0, -1
322 ; V128-NEXT: vwmaccu.vx v9, a0, v10
323 ; V128-NEXT: vmv1r.v v8, v9
326 ; V512-LABEL: unary_interleave_v4f32:
328 ; V512-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
329 ; V512-NEXT: vslidedown.vi v10, v8, 2
330 ; V512-NEXT: vwaddu.vv v9, v8, v10
331 ; V512-NEXT: li a0, -1
332 ; V512-NEXT: vwmaccu.vx v9, a0, v10
333 ; V512-NEXT: vmv1r.v v8, v9
335 %a = shufflevector <4 x float> %x, <4 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
339 ; FIXME: Is there better codegen we can do here?
340 define <4 x double> @unary_interleave_v4f64(<4 x double> %x) {
341 ; V128-LABEL: unary_interleave_v4f64:
343 ; V128-NEXT: lui a0, 12304
344 ; V128-NEXT: addi a0, a0, 512
345 ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma
346 ; V128-NEXT: vmv.s.x v10, a0
347 ; V128-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
348 ; V128-NEXT: vsext.vf2 v12, v10
349 ; V128-NEXT: vsetvli zero, zero, e64, m2, ta, ma
350 ; V128-NEXT: vrgatherei16.vv v10, v8, v12
351 ; V128-NEXT: vmv.v.v v8, v10
354 ; RV32-V512-LABEL: unary_interleave_v4f64:
355 ; RV32-V512: # %bb.0:
356 ; RV32-V512-NEXT: lui a0, 12304
357 ; RV32-V512-NEXT: addi a0, a0, 512
358 ; RV32-V512-NEXT: vsetivli zero, 4, e32, m1, ta, ma
359 ; RV32-V512-NEXT: vmv.s.x v9, a0
360 ; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
361 ; RV32-V512-NEXT: vsext.vf2 v10, v9
362 ; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, ma
363 ; RV32-V512-NEXT: vrgatherei16.vv v9, v8, v10
364 ; RV32-V512-NEXT: vmv.v.v v8, v9
365 ; RV32-V512-NEXT: ret
367 ; RV64-V512-LABEL: unary_interleave_v4f64:
368 ; RV64-V512: # %bb.0:
369 ; RV64-V512-NEXT: lui a0, 12304
370 ; RV64-V512-NEXT: addi a0, a0, 512
371 ; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, ma
372 ; RV64-V512-NEXT: vmv.s.x v9, a0
373 ; RV64-V512-NEXT: vsext.vf8 v10, v9
374 ; RV64-V512-NEXT: vrgather.vv v9, v8, v10
375 ; RV64-V512-NEXT: vmv.v.v v8, v9
376 ; RV64-V512-NEXT: ret
377 %a = shufflevector <4 x double> %x, <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
381 define <8 x half> @unary_interleave_v8f16(<8 x half> %x) {
382 ; V128-LABEL: unary_interleave_v8f16:
384 ; V128-NEXT: vsetivli zero, 4, e16, m1, ta, ma
385 ; V128-NEXT: vslidedown.vi v10, v8, 4
386 ; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
387 ; V128-NEXT: vwaddu.vv v9, v8, v10
388 ; V128-NEXT: li a0, -1
389 ; V128-NEXT: vwmaccu.vx v9, a0, v10
390 ; V128-NEXT: vmv1r.v v8, v9
393 ; V512-LABEL: unary_interleave_v8f16:
395 ; V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
396 ; V512-NEXT: vslidedown.vi v10, v8, 4
397 ; V512-NEXT: vwaddu.vv v9, v8, v10
398 ; V512-NEXT: li a0, -1
399 ; V512-NEXT: vwmaccu.vx v9, a0, v10
400 ; V512-NEXT: vmv1r.v v8, v9
402 %a = shufflevector <8 x half> %x, <8 x half> poison, <8 x i32> <i32 0, i32 4, i32 undef, i32 5, i32 2, i32 undef, i32 3, i32 7>
406 define <8 x float> @unary_interleave_v8f32(<8 x float> %x) {
407 ; V128-LABEL: unary_interleave_v8f32:
409 ; V128-NEXT: vsetivli zero, 4, e32, m2, ta, ma
410 ; V128-NEXT: vslidedown.vi v12, v8, 4
411 ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma
412 ; V128-NEXT: vwaddu.vv v10, v12, v8
413 ; V128-NEXT: li a0, -1
414 ; V128-NEXT: vwmaccu.vx v10, a0, v8
415 ; V128-NEXT: vmv2r.v v8, v10
418 ; V512-LABEL: unary_interleave_v8f32:
420 ; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, ma
421 ; V512-NEXT: vslidedown.vi v10, v8, 4
422 ; V512-NEXT: vwaddu.vv v9, v10, v8
423 ; V512-NEXT: li a0, -1
424 ; V512-NEXT: vwmaccu.vx v9, a0, v8
425 ; V512-NEXT: vmv1r.v v8, v9
427 %a = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> <i32 4, i32 0, i32 undef, i32 1, i32 6, i32 undef, i32 7, i32 3>
430 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: