1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
4 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
6 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
7 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+xop | FileCheck %s --check-prefix=XOP
9 define void @v3i64(<2 x i64> %a, <2 x i64> %b, <3 x i64>* %p) nounwind {
12 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
13 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
14 ; SSE2-NEXT: movq %xmm2, 16(%rdi)
15 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
20 ; SSE42-NEXT: pextrq $1, %xmm0, 16(%rdi)
21 ; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
22 ; SSE42-NEXT: movdqa %xmm0, (%rdi)
27 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
28 ; AVX-NEXT: vpextrq $1, %xmm0, 16(%rdi)
29 ; AVX-NEXT: vmovdqa %xmm1, (%rdi)
34 ; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
35 ; XOP-NEXT: vpextrq $1, %xmm0, 16(%rdi)
36 ; XOP-NEXT: vmovdqa %xmm1, (%rdi)
38 %r = shufflevector <2 x i64> %a, <2 x i64> %b, <3 x i32> <i32 0, i32 2, i32 1>
39 store <3 x i64> %r, <3 x i64>* %p
42 define void @v3f64(<2 x double> %a, <2 x double> %b, <3 x double>* %p) nounwind {
45 ; SSE-NEXT: movhps %xmm0, 16(%rdi)
46 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
47 ; SSE-NEXT: movaps %xmm0, (%rdi)
52 ; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
53 ; AVX-NEXT: vmovhps %xmm0, 16(%rdi)
54 ; AVX-NEXT: vmovaps %xmm1, (%rdi)
59 ; XOP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
60 ; XOP-NEXT: vmovhps %xmm0, 16(%rdi)
61 ; XOP-NEXT: vmovaps %xmm1, (%rdi)
63 %r = shufflevector <2 x double> %a, <2 x double> %b, <3 x i32> <i32 0, i32 2, i32 1>
64 store <3 x double> %r, <3 x double>* %p
68 define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
71 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
72 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
73 ; SSE2-NEXT: movd %xmm2, 8(%rdi)
74 ; SSE2-NEXT: movq %xmm0, (%rdi)
79 ; SSE42-NEXT: extractps $1, %xmm0, 8(%rdi)
80 ; SSE42-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
81 ; SSE42-NEXT: movlps %xmm0, (%rdi)
86 ; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
87 ; AVX-NEXT: vextractps $1, %xmm0, 8(%rdi)
88 ; AVX-NEXT: vmovlps %xmm1, (%rdi)
93 ; XOP-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
94 ; XOP-NEXT: vextractps $1, %xmm0, 8(%rdi)
95 ; XOP-NEXT: vmovlps %xmm1, (%rdi)
97 %r = shufflevector <2 x i32> %a, <2 x i32> %b, <3 x i32> <i32 0, i32 2, i32 1>
98 store <3 x i32> %r, <3 x i32>* %p
102 define void @v5i16(<4 x i16> %a, <4 x i16> %b, <5 x i16>* %p) nounwind {
105 ; SSE2-NEXT: psrlq $16, %xmm1
106 ; SSE2-NEXT: pextrw $3, %xmm0, %eax
107 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
108 ; SSE2-NEXT: movw %ax, 8(%rdi)
109 ; SSE2-NEXT: movq %xmm0, (%rdi)
112 ; SSE42-LABEL: v5i16:
114 ; SSE42-NEXT: psrlq $16, %xmm1
115 ; SSE42-NEXT: pextrw $3, %xmm0, 8(%rdi)
116 ; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
117 ; SSE42-NEXT: movq %xmm0, (%rdi)
122 ; AVX-NEXT: vpsrlq $16, %xmm1, %xmm1
123 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
124 ; AVX-NEXT: vpextrw $3, %xmm0, 8(%rdi)
125 ; AVX-NEXT: vmovq %xmm1, (%rdi)
130 ; XOP-NEXT: vpsrlq $16, %xmm1, %xmm1
131 ; XOP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
132 ; XOP-NEXT: vpextrw $3, %xmm0, 8(%rdi)
133 ; XOP-NEXT: vmovq %xmm1, (%rdi)
135 %r = shufflevector <4 x i16> %a, <4 x i16> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
136 store <5 x i16> %r, <5 x i16>* %p
140 define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
143 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
144 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
145 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
146 ; SSE2-NEXT: movd %xmm2, 16(%rdi)
147 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
150 ; SSE42-LABEL: v5i32:
152 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
153 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
154 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
155 ; SSE42-NEXT: pextrd $3, %xmm0, 16(%rdi)
156 ; SSE42-NEXT: movdqa %xmm2, (%rdi)
161 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
162 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
163 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
164 ; AVX1-NEXT: vpextrd $3, %xmm0, 16(%rdi)
165 ; AVX1-NEXT: vmovdqa %xmm1, (%rdi)
170 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
171 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
172 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
173 ; AVX2-NEXT: vpextrd $3, %xmm0, 16(%rdi)
174 ; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
179 ; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7],xmm0[4,5,6,7],xmm1[8,9,10,11]
180 ; XOP-NEXT: vpextrd $3, %xmm0, 16(%rdi)
181 ; XOP-NEXT: vmovdqa %xmm1, (%rdi)
183 %r = shufflevector <4 x i32> %a, <4 x i32> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
184 store <5 x i32> %r, <5 x i32>* %p
188 define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
191 ; SSE2-NEXT: movaps %xmm0, %xmm2
192 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[1,2]
193 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
194 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
195 ; SSE2-NEXT: movss %xmm0, 16(%rdi)
196 ; SSE2-NEXT: movaps %xmm2, (%rdi)
199 ; SSE42-LABEL: v5f32:
201 ; SSE42-NEXT: extractps $3, %xmm0, 16(%rdi)
202 ; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,2]
203 ; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
204 ; SSE42-NEXT: movaps %xmm0, (%rdi)
209 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
210 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
211 ; AVX-NEXT: vextractps $3, %xmm0, 16(%rdi)
212 ; AVX-NEXT: vmovaps %xmm1, (%rdi)
217 ; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
218 ; XOP-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
219 ; XOP-NEXT: vextractps $3, %xmm0, 16(%rdi)
220 ; XOP-NEXT: vmovaps %xmm1, (%rdi)
222 %r = shufflevector <4 x float> %a, <4 x float> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
223 store <5 x float> %r, <5 x float>* %p
227 define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
230 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
231 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,1,3,4,5,6,7]
232 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,0,255,0,255,255,255,255,255,255,255,255,255,255,255]
233 ; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
234 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
235 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,0,4,5,6,7]
236 ; SSE2-NEXT: pand %xmm2, %xmm1
237 ; SSE2-NEXT: pandn %xmm0, %xmm2
238 ; SSE2-NEXT: por %xmm1, %xmm2
239 ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
240 ; SSE2-NEXT: movb %al, 6(%rdi)
241 ; SSE2-NEXT: movd %xmm2, (%rdi)
242 ; SSE2-NEXT: pextrw $2, %xmm2, %eax
243 ; SSE2-NEXT: movw %ax, 4(%rdi)
248 ; SSE42-NEXT: pextrb $0, %xmm1, 6(%rdi)
249 ; SSE42-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
250 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
251 ; SSE42-NEXT: pextrw $2, %xmm1, 4(%rdi)
252 ; SSE42-NEXT: movd %xmm1, (%rdi)
257 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
258 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
259 ; AVX-NEXT: vpextrb $0, %xmm1, 6(%rdi)
260 ; AVX-NEXT: vpextrw $2, %xmm0, 4(%rdi)
261 ; AVX-NEXT: vmovd %xmm0, (%rdi)
266 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[3],xmm1[2],xmm0[1],xmm1[3,0,u,u,u,u,u,u,u,u,u]
267 ; XOP-NEXT: vpextrb $0, %xmm1, 6(%rdi)
268 ; XOP-NEXT: vpextrw $2, %xmm0, 4(%rdi)
269 ; XOP-NEXT: vmovd %xmm0, (%rdi)
271 %r = shufflevector <4 x i8> %a, <4 x i8> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
272 store <7 x i8> %r, <7 x i8>* %p
276 define void @v7i16(<4 x i16> %a, <4 x i16> %b, <7 x i16>* %p) nounwind {
279 ; SSE2-NEXT: movd %xmm1, %eax
280 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
281 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,1,0,3,4,5,6,7]
282 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
283 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
284 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,2,4,5,6,7]
285 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,6,4,7]
286 ; SSE2-NEXT: movw %ax, 12(%rdi)
287 ; SSE2-NEXT: movq %xmm0, (%rdi)
288 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
289 ; SSE2-NEXT: movd %xmm0, 8(%rdi)
292 ; SSE42-LABEL: v7i16:
294 ; SSE42-NEXT: pextrw $0, %xmm1, 12(%rdi)
295 ; SSE42-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
296 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
297 ; SSE42-NEXT: pextrd $2, %xmm1, 8(%rdi)
298 ; SSE42-NEXT: movq %xmm1, (%rdi)
303 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
304 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
305 ; AVX-NEXT: vpextrw $0, %xmm1, 12(%rdi)
306 ; AVX-NEXT: vpextrd $2, %xmm0, 8(%rdi)
307 ; AVX-NEXT: vmovq %xmm0, (%rdi)
312 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,1],xmm1[4,5],xmm0[6,7],xmm1[4,5],xmm0[2,3],xmm1[6,7,0,1],xmm0[6,7]
313 ; XOP-NEXT: vpextrw $0, %xmm1, 12(%rdi)
314 ; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
315 ; XOP-NEXT: vmovq %xmm0, (%rdi)
317 %r = shufflevector <4 x i16> %a, <4 x i16> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
318 store <7 x i16> %r, <7 x i16>* %p
323 define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
326 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,2,2]
327 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
328 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
329 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
330 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
331 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
332 ; SSE2-NEXT: movd %xmm1, 24(%rdi)
333 ; SSE2-NEXT: movq %xmm0, 16(%rdi)
334 ; SSE2-NEXT: movdqa %xmm3, (%rdi)
337 ; SSE42-LABEL: v7i32:
339 ; SSE42-NEXT: movdqa %xmm0, %xmm2
340 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
341 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,2]
342 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
343 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
344 ; SSE42-NEXT: movd %xmm1, 24(%rdi)
345 ; SSE42-NEXT: movq %xmm0, 16(%rdi)
346 ; SSE42-NEXT: movdqa %xmm2, (%rdi)
351 ; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
352 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,2]
353 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
354 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3]
355 ; AVX-NEXT: vmovss %xmm1, 24(%rdi)
356 ; AVX-NEXT: vmovlps %xmm0, 16(%rdi)
357 ; AVX-NEXT: vmovaps %xmm2, (%rdi)
362 ; XOP-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
363 ; XOP-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,2]
364 ; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
365 ; XOP-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3]
366 ; XOP-NEXT: vmovss %xmm1, 24(%rdi)
367 ; XOP-NEXT: vmovlps %xmm0, 16(%rdi)
368 ; XOP-NEXT: vmovaps %xmm2, (%rdi)
370 %r = shufflevector <4 x i32> %a, <4 x i32> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
371 store <7 x i32> %r, <7 x i32>* %p
375 define void @v12i8(<8 x i8> %a, <8 x i8> %b, <12 x i8>* %p) nounwind {
378 ; SSE2-NEXT: pxor %xmm2, %xmm2
379 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
380 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
381 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7]
382 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
383 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
384 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
385 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,4]
386 ; SSE2-NEXT: packuswb %xmm2, %xmm0
387 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,255]
388 ; SSE2-NEXT: pand %xmm2, %xmm0
389 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,1,1,4,5,6,7]
390 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,3]
391 ; SSE2-NEXT: pandn %xmm1, %xmm2
392 ; SSE2-NEXT: por %xmm0, %xmm2
393 ; SSE2-NEXT: movq %xmm2, (%rdi)
394 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
395 ; SSE2-NEXT: movd %xmm0, 8(%rdi)
398 ; SSE42-LABEL: v12i8:
400 ; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
401 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
402 ; SSE42-NEXT: pextrd $2, %xmm0, 8(%rdi)
403 ; SSE42-NEXT: movq %xmm0, (%rdi)
408 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
409 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
410 ; AVX-NEXT: vpextrd $2, %xmm0, 8(%rdi)
411 ; AVX-NEXT: vmovq %xmm0, (%rdi)
416 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4],xmm1[0],xmm0[1,5],xmm1[1],xmm0[2,6],xmm1[2],xmm0[3,7],xmm1[3],xmm0[u,u,u,u]
417 ; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
418 ; XOP-NEXT: vmovq %xmm0, (%rdi)
420 %r = shufflevector <8 x i8> %a, <8 x i8> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
421 store <12 x i8> %r, <12 x i8>* %p
425 define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
426 ; SSE2-LABEL: v12i16:
428 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,3]
429 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
430 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,6,5,4,7]
431 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
432 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,1,4,5,6,7]
433 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,4]
434 ; SSE2-NEXT: pand %xmm3, %xmm4
435 ; SSE2-NEXT: pandn %xmm2, %xmm3
436 ; SSE2-NEXT: por %xmm4, %xmm3
437 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
438 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
439 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
440 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,1,3,4,5,6,7]
441 ; SSE2-NEXT: pand %xmm2, %xmm0
442 ; SSE2-NEXT: pandn %xmm1, %xmm2
443 ; SSE2-NEXT: por %xmm0, %xmm2
444 ; SSE2-NEXT: movq %xmm2, 16(%rdi)
445 ; SSE2-NEXT: movdqa %xmm3, (%rdi)
448 ; SSE42-LABEL: v12i16:
450 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
451 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
452 ; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
453 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
454 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,3]
455 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
456 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
457 ; SSE42-NEXT: movdqa %xmm0, (%rdi)
458 ; SSE42-NEXT: movq %xmm3, 16(%rdi)
461 ; AVX1-LABEL: v12i16:
463 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
464 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
465 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
466 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
467 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,3]
468 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
469 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
470 ; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
471 ; AVX1-NEXT: vmovq %xmm2, 16(%rdi)
474 ; AVX2-SLOW-LABEL: v12i16:
475 ; AVX2-SLOW: # %bb.0:
476 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
477 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
478 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
479 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
480 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %xmm1
481 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
482 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
483 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%rdi)
484 ; AVX2-SLOW-NEXT: vmovq %xmm2, 16(%rdi)
485 ; AVX2-SLOW-NEXT: retq
487 ; AVX2-FAST-LABEL: v12i16:
488 ; AVX2-FAST: # %bb.0:
489 ; AVX2-FAST-NEXT: vpbroadcastd %xmm1, %xmm2
490 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
491 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4],xmm2[5],xmm3[6,7]
492 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
493 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,14,15,6,7,8,9,10,11,12,13,14,15]
494 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
495 ; AVX2-FAST-NEXT: vmovq %xmm0, 16(%rdi)
496 ; AVX2-FAST-NEXT: vmovdqa %xmm2, (%rdi)
497 ; AVX2-FAST-NEXT: retq
501 ; XOP-NEXT: vpperm {{.*#+}} xmm2 = xmm0[0,1,8,9],xmm1[0,1],xmm0[2,3,10,11],xmm1[2,3],xmm0[4,5,12,13]
502 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[4,5],xmm0[6,7,14,15],xmm1[6,7],xmm0[8,9,10,11,12,13,14,15]
503 ; XOP-NEXT: vmovq %xmm0, 16(%rdi)
504 ; XOP-NEXT: vmovdqa %xmm2, (%rdi)
506 %r = shufflevector <8 x i16> %a, <8 x i16> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
507 store <12 x i16> %r, <12 x i16>* %p
511 define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
512 ; SSE2-LABEL: v12i32:
514 ; SSE2-NEXT: movaps %xmm2, %xmm3
515 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[1,0]
516 ; SSE2-NEXT: movaps %xmm0, %xmm4
517 ; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
518 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
519 ; SSE2-NEXT: movaps %xmm0, %xmm3
520 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,1]
521 ; SSE2-NEXT: movaps %xmm2, %xmm5
522 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[1,0]
523 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2]
524 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,2],xmm2[3,2]
525 ; SSE2-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
526 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[0,2]
527 ; SSE2-NEXT: movaps %xmm2, 32(%rdi)
528 ; SSE2-NEXT: movaps %xmm5, 16(%rdi)
529 ; SSE2-NEXT: movaps %xmm4, (%rdi)
532 ; SSE42-LABEL: v12i32:
534 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
535 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
536 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5,6,7]
537 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,0,1]
538 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5],xmm4[6,7]
539 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
540 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7]
541 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5],xmm4[6,7]
542 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
543 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
544 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7]
545 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
546 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
547 ; SSE42-NEXT: movdqa %xmm0, 32(%rdi)
548 ; SSE42-NEXT: movdqa %xmm4, 16(%rdi)
549 ; SSE42-NEXT: movdqa %xmm3, (%rdi)
552 ; AVX1-LABEL: v12i32:
554 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
555 ; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm2[0,0,2,2,4,4,6,6]
556 ; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = ymm0[0,u,u,1,5,u,u,6]
557 ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
558 ; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[0,1,0,1]
559 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
560 ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
561 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
562 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[3,3]
563 ; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
564 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
565 ; AVX1-NEXT: vmovaps %xmm0, 32(%rdi)
566 ; AVX1-NEXT: vmovaps %ymm2, (%rdi)
567 ; AVX1-NEXT: vzeroupper
570 ; AVX2-SLOW-LABEL: v12i32:
571 ; AVX2-SLOW: # %bb.0:
572 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm2 = <0,4,u,1,5,u,2,6>
573 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm2, %ymm2
574 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm3
575 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
576 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm3
577 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[3,3]
578 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
579 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
580 ; AVX2-SLOW-NEXT: vmovaps %xmm0, 32(%rdi)
581 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rdi)
582 ; AVX2-SLOW-NEXT: vzeroupper
583 ; AVX2-SLOW-NEXT: retq
585 ; AVX2-FAST-LABEL: v12i32:
586 ; AVX2-FAST: # %bb.0:
587 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = <0,4,u,1,5,u,2,6>
588 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm2
589 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm3
590 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
591 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [3,3,7,7,7,7,6,7]
592 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm3, %ymm0
593 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
594 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
595 ; AVX2-FAST-NEXT: vmovaps %xmm0, 32(%rdi)
596 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rdi)
597 ; AVX2-FAST-NEXT: vzeroupper
598 ; AVX2-FAST-NEXT: retq
602 ; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
603 ; XOP-NEXT: vpermil2ps {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[u,1,5,u],ymm2[6],ymm0[6]
604 ; XOP-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[0,1,0,1]
605 ; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
606 ; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
607 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
608 ; XOP-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[3,3]
609 ; XOP-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
610 ; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
611 ; XOP-NEXT: vmovaps %xmm0, 32(%rdi)
612 ; XOP-NEXT: vmovaps %ymm2, (%rdi)
613 ; XOP-NEXT: vzeroupper
615 %r = shufflevector <8 x i32> %a, <8 x i32> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
616 store <12 x i32> %r, <12 x i32>* %p
620 define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounwind {
621 ; SSE2-LABEL: pr29025:
623 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
624 ; SSE2-NEXT: pxor %xmm1, %xmm1
625 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
626 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
627 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
628 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
629 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
630 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
631 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,4]
632 ; SSE2-NEXT: packuswb %xmm1, %xmm0
633 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,255]
634 ; SSE2-NEXT: pand %xmm1, %xmm0
635 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,1,1,4,5,6,7]
636 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,3]
637 ; SSE2-NEXT: pandn %xmm2, %xmm1
638 ; SSE2-NEXT: por %xmm0, %xmm1
639 ; SSE2-NEXT: movq %xmm1, (%rdi)
640 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
641 ; SSE2-NEXT: movd %xmm0, 8(%rdi)
644 ; SSE42-LABEL: pr29025:
646 ; SSE42-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
647 ; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
648 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
649 ; SSE42-NEXT: pextrd $2, %xmm0, 8(%rdi)
650 ; SSE42-NEXT: movq %xmm0, (%rdi)
653 ; AVX-LABEL: pr29025:
655 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
656 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
657 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
658 ; AVX-NEXT: vpextrd $2, %xmm0, 8(%rdi)
659 ; AVX-NEXT: vmovq %xmm0, (%rdi)
662 ; XOP-LABEL: pr29025:
664 ; XOP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
665 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4],xmm2[0],xmm0[1,5],xmm2[1],xmm0[2,6],xmm2[2],xmm0[3,7],xmm2[3],xmm0[u,u,u,u]
666 ; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
667 ; XOP-NEXT: vmovq %xmm0, (%rdi)
669 %s1 = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
670 %s2 = shufflevector <4 x i8> %c, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
671 %r = shufflevector <8 x i8> %s1, <8 x i8> %s2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
672 store <12 x i8> %r, <12 x i8>* %p, align 1
676 define void @interleave_24i8_out(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 x i8>* %q3) nounwind {
677 ; SSE2-LABEL: interleave_24i8_out:
679 ; SSE2-NEXT: movdqu (%rdi), %xmm0
680 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
681 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,255,255,255,255,255,255,255,255]
682 ; SSE2-NEXT: movdqa %xmm0, %xmm2
683 ; SSE2-NEXT: pand %xmm4, %xmm2
684 ; SSE2-NEXT: pandn %xmm1, %xmm4
685 ; SSE2-NEXT: por %xmm2, %xmm4
686 ; SSE2-NEXT: pxor %xmm2, %xmm2
687 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
688 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
689 ; SSE2-NEXT: pand %xmm5, %xmm4
690 ; SSE2-NEXT: movdqa %xmm0, %xmm3
691 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
692 ; SSE2-NEXT: pandn %xmm3, %xmm5
693 ; SSE2-NEXT: por %xmm4, %xmm5
694 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,1,3]
695 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
696 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
697 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
698 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7]
699 ; SSE2-NEXT: packuswb %xmm0, %xmm4
700 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [0,255,255,0,255,255,0,255,255,255,255,255,255,255,255,255]
701 ; SSE2-NEXT: movdqa %xmm0, %xmm6
702 ; SSE2-NEXT: pand %xmm5, %xmm6
703 ; SSE2-NEXT: pandn %xmm1, %xmm5
704 ; SSE2-NEXT: por %xmm6, %xmm5
705 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
706 ; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535]
707 ; SSE2-NEXT: pand %xmm6, %xmm5
708 ; SSE2-NEXT: pandn %xmm3, %xmm6
709 ; SSE2-NEXT: por %xmm5, %xmm6
710 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,0,3,4,5,6,7]
711 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
712 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
713 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
714 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,6,7,4]
715 ; SSE2-NEXT: packuswb %xmm0, %xmm5
716 ; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
717 ; SSE2-NEXT: pand %xmm6, %xmm0
718 ; SSE2-NEXT: pandn %xmm1, %xmm6
719 ; SSE2-NEXT: por %xmm0, %xmm6
720 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
721 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,65535,65535,0,65535,65535,0,65535]
722 ; SSE2-NEXT: pand %xmm0, %xmm6
723 ; SSE2-NEXT: pandn %xmm3, %xmm0
724 ; SSE2-NEXT: por %xmm6, %xmm0
725 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
726 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
727 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
728 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
729 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
730 ; SSE2-NEXT: packuswb %xmm0, %xmm0
731 ; SSE2-NEXT: movq %xmm4, (%rsi)
732 ; SSE2-NEXT: movq %xmm5, (%rdx)
733 ; SSE2-NEXT: movq %xmm0, (%rcx)
736 ; SSE42-LABEL: interleave_24i8_out:
738 ; SSE42-NEXT: movdqu (%rdi), %xmm0
739 ; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
740 ; SSE42-NEXT: movdqa %xmm1, %xmm2
741 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm2[2,5,u,u,u,u,u,u,u,u]
742 ; SSE42-NEXT: movdqa %xmm0, %xmm3
743 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[0,3,6,9,12,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
744 ; SSE42-NEXT: por %xmm2, %xmm3
745 ; SSE42-NEXT: movdqa %xmm1, %xmm2
746 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,xmm2[0,3,6,u,u,u,u,u,u,u,u]
747 ; SSE42-NEXT: movdqa %xmm0, %xmm4
748 ; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[1,4,7,10,13],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u]
749 ; SSE42-NEXT: por %xmm2, %xmm4
750 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
751 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
752 ; SSE42-NEXT: por %xmm1, %xmm0
753 ; SSE42-NEXT: movq %xmm3, (%rsi)
754 ; SSE42-NEXT: movq %xmm4, (%rdx)
755 ; SSE42-NEXT: movq %xmm0, (%rcx)
758 ; AVX-LABEL: interleave_24i8_out:
760 ; AVX-NEXT: vmovdqu (%rdi), %xmm0
761 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
762 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
763 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
764 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
765 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
766 ; AVX-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
767 ; AVX-NEXT: vpor %xmm3, %xmm4, %xmm3
768 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
769 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
770 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
771 ; AVX-NEXT: vmovq %xmm2, (%rsi)
772 ; AVX-NEXT: vmovq %xmm3, (%rdx)
773 ; AVX-NEXT: vmovq %xmm0, (%rcx)
776 ; XOP-LABEL: interleave_24i8_out:
778 ; XOP-NEXT: vmovdqu (%rdi), %xmm0
779 ; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
780 ; XOP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
781 ; XOP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
782 ; XOP-NEXT: vpor %xmm2, %xmm3, %xmm2
783 ; XOP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
784 ; XOP-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
785 ; XOP-NEXT: vpor %xmm3, %xmm4, %xmm3
786 ; XOP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
787 ; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
788 ; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0
789 ; XOP-NEXT: vmovq %xmm2, (%rsi)
790 ; XOP-NEXT: vmovq %xmm3, (%rdx)
791 ; XOP-NEXT: vmovq %xmm0, (%rcx)
793 %wide.vec = load <24 x i8>, <24 x i8>* %p, align 4
794 %s1 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
795 %s2 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
796 %s3 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
797 store <8 x i8> %s1, <8 x i8>* %q1, align 4
798 store <8 x i8> %s2, <8 x i8>* %q2, align 4
799 store <8 x i8> %s3, <8 x i8>* %q3, align 4
803 define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 x i8>* %q3) nounwind {
804 ; SSE2-LABEL: interleave_24i8_in:
806 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
807 ; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
808 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
809 ; SSE2-NEXT: pxor %xmm3, %xmm3
810 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
811 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
812 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535]
813 ; SSE2-NEXT: pand %xmm5, %xmm4
814 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
815 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[0,1,3,3,4,5,6,7]
816 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
817 ; SSE2-NEXT: pandn %xmm3, %xmm5
818 ; SSE2-NEXT: por %xmm4, %xmm5
819 ; SSE2-NEXT: movdqa %xmm1, %xmm3
820 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
821 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
822 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
823 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
824 ; SSE2-NEXT: packuswb %xmm5, %xmm3
825 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
826 ; SSE2-NEXT: pand %xmm4, %xmm3
827 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
828 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,3,4,5,6,7]
829 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6]
830 ; SSE2-NEXT: pandn %xmm5, %xmm4
831 ; SSE2-NEXT: por %xmm3, %xmm4
832 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
833 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
834 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,1,0,4,5,6,7]
835 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
836 ; SSE2-NEXT: packuswb %xmm0, %xmm1
837 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
838 ; SSE2-NEXT: pand %xmm2, %xmm1
839 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,3,4,5,6,7]
840 ; SSE2-NEXT: pandn %xmm0, %xmm2
841 ; SSE2-NEXT: por %xmm1, %xmm2
842 ; SSE2-NEXT: movq %xmm2, 16(%rdi)
843 ; SSE2-NEXT: movdqu %xmm4, (%rdi)
846 ; SSE42-LABEL: interleave_24i8_in:
848 ; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
849 ; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
850 ; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
851 ; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
852 ; SSE42-NEXT: movdqa %xmm0, %xmm2
853 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,8],zero,xmm2[1,9],zero,xmm2[2,10],zero,xmm2[3,11],zero,xmm2[4,12],zero,xmm2[5]
854 ; SSE42-NEXT: movdqa %xmm1, %xmm3
855 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = zero,zero,xmm3[0],zero,zero,xmm3[1],zero,zero,xmm3[2],zero,zero,xmm3[3],zero,zero,xmm3[4],zero
856 ; SSE42-NEXT: por %xmm2, %xmm3
857 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
858 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
859 ; SSE42-NEXT: por %xmm0, %xmm1
860 ; SSE42-NEXT: movq %xmm1, 16(%rdi)
861 ; SSE42-NEXT: movdqu %xmm3, (%rdi)
864 ; AVX-LABEL: interleave_24i8_in:
866 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
867 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
868 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
869 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
870 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
871 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[1],zero,zero,xmm1[2],zero,zero,xmm1[3],zero,zero,xmm1[4],zero
872 ; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
873 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
874 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
875 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
876 ; AVX-NEXT: vmovq %xmm0, 16(%rdi)
877 ; AVX-NEXT: vmovdqu %xmm2, (%rdi)
880 ; XOP-LABEL: interleave_24i8_in:
882 ; XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
883 ; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
884 ; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
885 ; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
886 ; XOP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
887 ; XOP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[1],zero,zero,xmm1[2],zero,zero,xmm1[3],zero,zero,xmm1[4],zero
888 ; XOP-NEXT: vpor %xmm3, %xmm2, %xmm2
889 ; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
890 ; XOP-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
891 ; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0
892 ; XOP-NEXT: vmovq %xmm0, 16(%rdi)
893 ; XOP-NEXT: vmovdqu %xmm2, (%rdi)
895 %s1 = load <8 x i8>, <8 x i8>* %q1, align 4
896 %s2 = load <8 x i8>, <8 x i8>* %q2, align 4
897 %s3 = load <8 x i8>, <8 x i8>* %q3, align 4
898 %t1 = shufflevector <8 x i8> %s1, <8 x i8> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
899 %t2 = shufflevector <8 x i8> %s3, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
900 %interleaved = shufflevector <16 x i8> %t1, <16 x i8> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
901 store <24 x i8> %interleaved, <24 x i8>* %p, align 4
906 define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
907 ; SSE2-LABEL: interleave_24i16_out:
909 ; SSE2-NEXT: movdqu (%rdi), %xmm3
910 ; SSE2-NEXT: movdqu 16(%rdi), %xmm2
911 ; SSE2-NEXT: movdqu 32(%rdi), %xmm8
912 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,65535,0]
913 ; SSE2-NEXT: movdqa %xmm3, %xmm4
914 ; SSE2-NEXT: pand %xmm1, %xmm4
915 ; SSE2-NEXT: pandn %xmm2, %xmm1
916 ; SSE2-NEXT: por %xmm4, %xmm1
917 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
918 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
919 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
920 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
921 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,7,6,7]
922 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[0,1,2,1]
923 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,5]
924 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,0],xmm4[2,0]
925 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,0]
926 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
927 ; SSE2-NEXT: movdqa %xmm4, %xmm5
928 ; SSE2-NEXT: pandn %xmm2, %xmm5
929 ; SSE2-NEXT: movdqa %xmm3, %xmm6
930 ; SSE2-NEXT: pand %xmm4, %xmm6
931 ; SSE2-NEXT: por %xmm5, %xmm6
932 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,2,3,4,5,6,7]
933 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
934 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
935 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
936 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,7]
937 ; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
938 ; SSE2-NEXT: pand %xmm6, %xmm5
939 ; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm8[0,3,2,3,4,5,6,7]
940 ; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
941 ; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
942 ; SSE2-NEXT: movdqa %xmm6, %xmm0
943 ; SSE2-NEXT: pandn %xmm7, %xmm0
944 ; SSE2-NEXT: por %xmm5, %xmm0
945 ; SSE2-NEXT: pand %xmm4, %xmm2
946 ; SSE2-NEXT: pandn %xmm3, %xmm4
947 ; SSE2-NEXT: por %xmm2, %xmm4
948 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
949 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
950 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
951 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
952 ; SSE2-NEXT: pand %xmm6, %xmm2
953 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
954 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
955 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
956 ; SSE2-NEXT: pandn %xmm3, %xmm6
957 ; SSE2-NEXT: por %xmm2, %xmm6
958 ; SSE2-NEXT: movups %xmm1, (%rsi)
959 ; SSE2-NEXT: movdqu %xmm0, (%rdx)
960 ; SSE2-NEXT: movdqu %xmm6, (%rcx)
963 ; SSE42-LABEL: interleave_24i16_out:
965 ; SSE42-NEXT: movdqu (%rdi), %xmm0
966 ; SSE42-NEXT: movdqu 16(%rdi), %xmm1
967 ; SSE42-NEXT: movdqu 32(%rdi), %xmm2
968 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
969 ; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5]
970 ; SSE42-NEXT: movdqa %xmm0, %xmm4
971 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6],xmm1[7]
972 ; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
973 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5],xmm3[6,7]
974 ; SSE42-NEXT: movdqa %xmm2, %xmm3
975 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,4,5,6,7,0,1,0,1,6,7,12,13]
976 ; SSE42-NEXT: movdqa %xmm0, %xmm5
977 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm1[2],xmm5[3,4],xmm1[5],xmm5[6,7]
978 ; SSE42-NEXT: pshufb {{.*#+}} xmm5 = xmm5[2,3,8,9,14,15,4,5,10,11,10,11,8,9,14,15]
979 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm3[5,6,7]
980 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7,8,9,2,3,8,9,14,15]
981 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
982 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
983 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
984 ; SSE42-NEXT: movdqu %xmm4, (%rsi)
985 ; SSE42-NEXT: movdqu %xmm5, (%rdx)
986 ; SSE42-NEXT: movdqu %xmm1, (%rcx)
989 ; AVX1-LABEL: interleave_24i16_out:
991 ; AVX1-NEXT: vmovdqu (%rdi), %xmm0
992 ; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
993 ; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
994 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
995 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5]
996 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
997 ; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
998 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,5],xmm3[6,7]
999 ; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[0,1,6,7,4,5,6,7,0,1,0,1,6,7,12,13]
1000 ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
1001 ; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,8,9,14,15,4,5,10,11,u,u,u,u,u,u]
1002 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3,4],xmm4[5,6,7]
1003 ; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7,8,9,2,3,8,9,14,15]
1004 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
1005 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,u,u,u,u,u,u]
1006 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
1007 ; AVX1-NEXT: vmovdqu %xmm3, (%rsi)
1008 ; AVX1-NEXT: vmovdqu %xmm4, (%rdx)
1009 ; AVX1-NEXT: vmovdqu %xmm0, (%rcx)
1012 ; AVX2-LABEL: interleave_24i16_out:
1014 ; AVX2-NEXT: vmovdqu (%rdi), %ymm0
1015 ; AVX2-NEXT: vmovdqu 32(%rdi), %xmm1
1016 ; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1017 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
1018 ; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6],xmm3[7]
1019 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11]
1020 ; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1021 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
1022 ; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
1023 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13]
1024 ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1025 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1026 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
1027 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15]
1028 ; AVX2-NEXT: vmovdqu %xmm2, (%rsi)
1029 ; AVX2-NEXT: vmovdqu %xmm3, (%rdx)
1030 ; AVX2-NEXT: vmovdqu %xmm0, (%rcx)
1031 ; AVX2-NEXT: vzeroupper
1034 ; XOP-LABEL: interleave_24i16_out:
1036 ; XOP-NEXT: vmovdqu (%rdi), %xmm0
1037 ; XOP-NEXT: vmovdqu 16(%rdi), %xmm1
1038 ; XOP-NEXT: vmovdqu 32(%rdi), %xmm2
1039 ; XOP-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
1040 ; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm3[0,1,6,7,12,13,2,3,8,9,14,15],xmm2[4,5,10,11]
1041 ; XOP-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
1042 ; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm4[2,3,8,9,14,15,4,5,10,11],xmm2[0,1,6,7,12,13]
1043 ; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
1044 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13],xmm2[2,3,8,9,14,15]
1045 ; XOP-NEXT: vmovdqu %xmm3, (%rsi)
1046 ; XOP-NEXT: vmovdqu %xmm4, (%rdx)
1047 ; XOP-NEXT: vmovdqu %xmm0, (%rcx)
1049 %wide.vec = load <24 x i16>, <24 x i16>* %p, align 4
1050 %s1 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
1051 %s2 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
1052 %s3 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
1053 store <8 x i16> %s1, <8 x i16>* %q1, align 4
1054 store <8 x i16> %s2, <8 x i16>* %q2, align 4
1055 store <8 x i16> %s3, <8 x i16>* %q3, align 4
1059 define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
1060 ; SSE2-LABEL: interleave_24i16_in:
1062 ; SSE2-NEXT: movdqu (%rsi), %xmm3
1063 ; SSE2-NEXT: movdqu (%rdx), %xmm2
1064 ; SSE2-NEXT: movdqu (%rcx), %xmm1
1065 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,3]
1066 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
1067 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1068 ; SSE2-NEXT: pandn %xmm4, %xmm5
1069 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,3,3,3]
1070 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
1071 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
1072 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
1073 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
1074 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
1075 ; SSE2-NEXT: pand %xmm0, %xmm3
1076 ; SSE2-NEXT: por %xmm5, %xmm3
1077 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1078 ; SSE2-NEXT: pandn %xmm4, %xmm5
1079 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,3,3,4,5,6,7]
1080 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
1081 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
1082 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7]
1083 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
1084 ; SSE2-NEXT: pand %xmm0, %xmm2
1085 ; SSE2-NEXT: por %xmm5, %xmm2
1086 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
1087 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
1088 ; SSE2-NEXT: pand %xmm5, %xmm1
1089 ; SSE2-NEXT: pandn %xmm6, %xmm5
1090 ; SSE2-NEXT: por %xmm1, %xmm5
1091 ; SSE2-NEXT: pand %xmm0, %xmm5
1092 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,4,6,7]
1093 ; SSE2-NEXT: pandn %xmm1, %xmm0
1094 ; SSE2-NEXT: por %xmm5, %xmm0
1095 ; SSE2-NEXT: movdqu %xmm0, 16(%rdi)
1096 ; SSE2-NEXT: movdqu %xmm2, 32(%rdi)
1097 ; SSE2-NEXT: movdqu %xmm3, (%rdi)
1100 ; SSE42-LABEL: interleave_24i16_in:
1102 ; SSE42-NEXT: movdqu (%rsi), %xmm0
1103 ; SSE42-NEXT: movdqu (%rdx), %xmm1
1104 ; SSE42-NEXT: movdqu (%rcx), %xmm2
1105 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
1106 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,3,3,3]
1107 ; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1108 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
1109 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,0,3]
1110 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm0[0,1],xmm5[2],xmm0[3,4],xmm5[5],xmm0[6,7]
1111 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,2]
1112 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6],xmm3[7]
1113 ; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[0,1,3,3,4,5,6,7]
1114 ; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
1115 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7]
1116 ; SSE42-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
1117 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,6,7,4,5,8,9,10,11,10,11,12,13,14,15]
1118 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
1119 ; SSE42-NEXT: movdqu %xmm4, 32(%rdi)
1120 ; SSE42-NEXT: movdqu %xmm3, 16(%rdi)
1121 ; SSE42-NEXT: movdqu %xmm5, (%rdi)
1124 ; AVX1-LABEL: interleave_24i16_in:
1126 ; AVX1-NEXT: vmovdqu (%rsi), %xmm0
1127 ; AVX1-NEXT: vmovdqu (%rdx), %xmm1
1128 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2]
1129 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm1[0,1,3,3,4,5,6,7]
1130 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
1131 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4],xmm3[5],xmm2[6,7]
1132 ; AVX1-NEXT: vmovdqu (%rcx), %xmm3
1133 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,2,2]
1134 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
1135 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1136 ; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
1137 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[2,2,3,3]
1138 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6],xmm5[7]
1139 ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1140 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
1141 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[0,0,0,3]
1142 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
1143 ; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
1144 ; AVX1-NEXT: vmovdqu %xmm4, 32(%rdi)
1145 ; AVX1-NEXT: vmovdqu %xmm2, 16(%rdi)
1148 ; AVX2-LABEL: interleave_24i16_in:
1150 ; AVX2-NEXT: vmovdqu (%rsi), %xmm0
1151 ; AVX2-NEXT: vmovdqu (%rdx), %xmm1
1152 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
1153 ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = mem[0,1,0,1]
1154 ; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,1,2,3,6,7,2,3,8,9,8,9,4,5,6,7,16,17,18,19,22,23,18,19,24,25,24,25,20,21,22,23]
1155 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
1156 ; AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
1157 ; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8],ymm2[9],ymm4[10,11],ymm2[12],ymm4[13,14],ymm2[15]
1158 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
1159 ; AVX2-NEXT: vpermd %ymm3, %ymm4, %ymm4
1160 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
1161 ; AVX2-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2
1162 ; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1163 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
1164 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[2,2,3,3]
1165 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
1166 ; AVX2-NEXT: vmovdqu %xmm0, 32(%rdi)
1167 ; AVX2-NEXT: vmovdqu %ymm2, (%rdi)
1168 ; AVX2-NEXT: vzeroupper
1171 ; XOP-LABEL: interleave_24i16_in:
1173 ; XOP-NEXT: vmovdqu (%rsi), %xmm0
1174 ; XOP-NEXT: vmovdqu (%rdx), %xmm1
1175 ; XOP-NEXT: vmovdqu (%rcx), %xmm2
1176 ; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm0[u,u,6,7],xmm1[6,7],xmm0[u,u,8,9],xmm1[8,9],xmm0[u,u,10,11]
1177 ; XOP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
1178 ; XOP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
1179 ; XOP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1180 ; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm2[0,1],xmm4[4,5,6,7],xmm2[2,3],xmm4[8,9,10,11]
1181 ; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
1182 ; XOP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1183 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[4,5],xmm2[10,11],xmm0[10,11,8,9],xmm2[12,13],xmm0[14,15,12,13],xmm2[14,15]
1184 ; XOP-NEXT: vmovdqu %xmm0, 32(%rdi)
1185 ; XOP-NEXT: vmovups %ymm3, (%rdi)
1186 ; XOP-NEXT: vzeroupper
1188 %s1 = load <8 x i16>, <8 x i16>* %q1, align 4
1189 %s2 = load <8 x i16>, <8 x i16>* %q2, align 4
1190 %s3 = load <8 x i16>, <8 x i16>* %q3, align 4
1191 %t1 = shufflevector <8 x i16> %s1, <8 x i16> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1192 %t2 = shufflevector <8 x i16> %s3, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1193 %interleaved = shufflevector <16 x i16> %t1, <16 x i16> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
1194 store <24 x i16> %interleaved, <24 x i16>* %p, align 4
1198 define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind {
1199 ; SSE2-LABEL: interleave_24i32_out:
1201 ; SSE2-NEXT: movdqu 64(%rdi), %xmm10
1202 ; SSE2-NEXT: movups 80(%rdi), %xmm8
1203 ; SSE2-NEXT: movdqu (%rdi), %xmm0
1204 ; SSE2-NEXT: movdqu 16(%rdi), %xmm11
1205 ; SSE2-NEXT: movups 32(%rdi), %xmm5
1206 ; SSE2-NEXT: movdqu 48(%rdi), %xmm9
1207 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,0,1]
1208 ; SSE2-NEXT: movaps %xmm5, %xmm7
1209 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,1,2,3]
1210 ; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
1211 ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,3]
1212 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm11[2,0]
1213 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1214 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm5[2,0]
1215 ; SSE2-NEXT: movaps %xmm8, %xmm5
1216 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm10[1,1,2,3]
1217 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,3,0,1]
1218 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
1219 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm8[0,3]
1220 ; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm10[2,0]
1221 ; SSE2-NEXT: movdqa %xmm9, %xmm2
1222 ; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,3],xmm8[2,0]
1223 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm10[0,0]
1224 ; SSE2-NEXT: movaps %xmm2, %xmm4
1225 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm10[3,3]
1226 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm4[2,0]
1227 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
1228 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm11[0,0]
1229 ; SSE2-NEXT: movaps %xmm0, %xmm4
1230 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm11[3,3]
1231 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm4[2,0]
1232 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm7[2,0]
1233 ; SSE2-NEXT: movups %xmm9, 16(%rsi)
1234 ; SSE2-NEXT: movups %xmm3, (%rsi)
1235 ; SSE2-NEXT: movups %xmm2, 16(%rdx)
1236 ; SSE2-NEXT: movups %xmm0, (%rdx)
1237 ; SSE2-NEXT: movups %xmm1, 16(%rcx)
1238 ; SSE2-NEXT: movups %xmm6, (%rcx)
1241 ; SSE42-LABEL: interleave_24i32_out:
1243 ; SSE42-NEXT: movups 80(%rdi), %xmm8
1244 ; SSE42-NEXT: movdqu 64(%rdi), %xmm9
1245 ; SSE42-NEXT: movdqu (%rdi), %xmm4
1246 ; SSE42-NEXT: movdqu 16(%rdi), %xmm2
1247 ; SSE42-NEXT: movups 32(%rdi), %xmm10
1248 ; SSE42-NEXT: movdqu 48(%rdi), %xmm5
1249 ; SSE42-NEXT: movdqa %xmm2, %xmm6
1250 ; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3],xmm6[4,5,6,7]
1251 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
1252 ; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm2[2,3]
1253 ; SSE42-NEXT: insertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm10[1]
1254 ; SSE42-NEXT: movdqa %xmm9, %xmm1
1255 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3],xmm1[4,5,6,7]
1256 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,0,1]
1257 ; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm9[2,3]
1258 ; SSE42-NEXT: insertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[1]
1259 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm10[0,1,2,2]
1260 ; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,0,3,3]
1261 ; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5],xmm3[6,7]
1262 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,3]
1263 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,2,2]
1264 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,5],xmm3[6,7]
1265 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm2[2,3],xmm7[4,5,6,7]
1266 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,1,0,3]
1267 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5,6,7]
1268 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2,3],xmm0[4,5,6,7]
1269 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm8[0,1,0,3]
1270 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
1271 ; SSE42-NEXT: movups %xmm5, 16(%rsi)
1272 ; SSE42-NEXT: movups %xmm4, (%rsi)
1273 ; SSE42-NEXT: movdqu %xmm3, 16(%rdx)
1274 ; SSE42-NEXT: movdqu %xmm6, (%rdx)
1275 ; SSE42-NEXT: movdqu %xmm2, 16(%rcx)
1276 ; SSE42-NEXT: movdqu %xmm1, (%rcx)
1279 ; AVX1-LABEL: interleave_24i32_out:
1281 ; AVX1-NEXT: vmovups (%rdi), %ymm0
1282 ; AVX1-NEXT: vmovups 32(%rdi), %ymm1
1283 ; AVX1-NEXT: vmovups 80(%rdi), %xmm2
1284 ; AVX1-NEXT: vmovups 64(%rdi), %xmm3
1285 ; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[1]
1286 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
1287 ; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1288 ; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
1289 ; AVX1-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3]
1290 ; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,3,2,1]
1291 ; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,3,2,3]
1292 ; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
1293 ; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
1294 ; AVX1-NEXT: vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
1295 ; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,0,3,2]
1296 ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
1297 ; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1298 ; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
1299 ; AVX1-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
1300 ; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
1301 ; AVX1-NEXT: vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
1302 ; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
1303 ; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1304 ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0,3]
1305 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
1306 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1307 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1308 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
1309 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
1310 ; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
1311 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1312 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
1313 ; AVX1-NEXT: vmovups %ymm4, (%rsi)
1314 ; AVX1-NEXT: vmovups %ymm5, (%rdx)
1315 ; AVX1-NEXT: vmovups %ymm0, (%rcx)
1316 ; AVX1-NEXT: vzeroupper
1319 ; AVX2-SLOW-LABEL: interleave_24i32_out:
1320 ; AVX2-SLOW: # %bb.0:
1321 ; AVX2-SLOW-NEXT: vmovups (%rdi), %ymm0
1322 ; AVX2-SLOW-NEXT: vmovups 32(%rdi), %ymm1
1323 ; AVX2-SLOW-NEXT: vmovups 64(%rdi), %ymm2
1324 ; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm3 = [21474836482,21474836482,21474836482,21474836482]
1325 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm3, %ymm3
1326 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1327 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm5 = <0,3,6,1,4,7,u,u>
1328 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm5, %ymm4
1329 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
1330 ; AVX2-SLOW-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,0,3,6,0,0,3,6]
1331 ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,0,1]
1332 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm4, %ymm4
1333 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1334 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm6 = <1,4,7,2,5,u,u,u>
1335 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm6, %ymm5
1336 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1337 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1338 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
1339 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm0
1340 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm2[0,1,0,3,4,5,4,7]
1341 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1342 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1343 ; AVX2-SLOW-NEXT: vmovups %ymm3, (%rsi)
1344 ; AVX2-SLOW-NEXT: vmovups %ymm4, (%rdx)
1345 ; AVX2-SLOW-NEXT: vmovups %ymm0, (%rcx)
1346 ; AVX2-SLOW-NEXT: vzeroupper
1347 ; AVX2-SLOW-NEXT: retq
1349 ; AVX2-FAST-LABEL: interleave_24i32_out:
1350 ; AVX2-FAST: # %bb.0:
1351 ; AVX2-FAST-NEXT: vmovups (%rdi), %ymm0
1352 ; AVX2-FAST-NEXT: vmovups 32(%rdi), %ymm1
1353 ; AVX2-FAST-NEXT: vmovups 64(%rdi), %ymm2
1354 ; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm3 = [21474836482,21474836482,21474836482,21474836482]
1355 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm3, %ymm3
1356 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1357 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm5 = <0,3,6,1,4,7,u,u>
1358 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm5, %ymm4
1359 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
1360 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [0,0,3,6,0,0,3,6]
1361 ; AVX2-FAST-NEXT: # ymm4 = mem[0,1,0,1]
1362 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm4
1363 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1364 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm6 = <1,4,7,2,5,u,u,u>
1365 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm6, %ymm5
1366 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1367 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm5 = [0,1,0,3,0,1,4,7]
1368 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm5, %ymm2
1369 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1370 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
1371 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
1372 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
1373 ; AVX2-FAST-NEXT: vmovups %ymm3, (%rsi)
1374 ; AVX2-FAST-NEXT: vmovups %ymm4, (%rdx)
1375 ; AVX2-FAST-NEXT: vmovups %ymm0, (%rcx)
1376 ; AVX2-FAST-NEXT: vzeroupper
1377 ; AVX2-FAST-NEXT: retq
1379 ; XOP-LABEL: interleave_24i32_out:
1381 ; XOP-NEXT: vmovups (%rdi), %ymm0
1382 ; XOP-NEXT: vmovups 32(%rdi), %ymm1
1383 ; XOP-NEXT: vmovups 80(%rdi), %xmm2
1384 ; XOP-NEXT: vmovups 64(%rdi), %xmm3
1385 ; XOP-NEXT: vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[1]
1386 ; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
1387 ; XOP-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1388 ; XOP-NEXT: vextractf128 $1, %ymm5, %xmm6
1389 ; XOP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3]
1390 ; XOP-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,3,2,1]
1391 ; XOP-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,3,2,3]
1392 ; XOP-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
1393 ; XOP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
1394 ; XOP-NEXT: vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
1395 ; XOP-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,0,3,2]
1396 ; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
1397 ; XOP-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1398 ; XOP-NEXT: vextractf128 $1, %ymm6, %xmm7
1399 ; XOP-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
1400 ; XOP-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
1401 ; XOP-NEXT: vpermilps {{.*#+}} xmm7 = xmm7[1,1,3,3]
1402 ; XOP-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
1403 ; XOP-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1404 ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0,3]
1405 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
1406 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1407 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
1408 ; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
1409 ; XOP-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
1410 ; XOP-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
1411 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1412 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
1413 ; XOP-NEXT: vmovups %ymm4, (%rsi)
1414 ; XOP-NEXT: vmovups %ymm5, (%rdx)
1415 ; XOP-NEXT: vmovups %ymm0, (%rcx)
1416 ; XOP-NEXT: vzeroupper
1418 %wide.vec = load <24 x i32>, <24 x i32>* %p, align 4
1419 %s1 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
1420 %s2 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
1421 %s3 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
1422 store <8 x i32> %s1, <8 x i32>* %q1, align 4
1423 store <8 x i32> %s2, <8 x i32>* %q2, align 4
1424 store <8 x i32> %s3, <8 x i32>* %q3, align 4
1428 define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind {
1429 ; SSE2-LABEL: interleave_24i32_in:
1431 ; SSE2-NEXT: movups (%rsi), %xmm1
1432 ; SSE2-NEXT: movups 16(%rsi), %xmm0
1433 ; SSE2-NEXT: movups (%rdx), %xmm8
1434 ; SSE2-NEXT: movups 16(%rdx), %xmm5
1435 ; SSE2-NEXT: movups (%rcx), %xmm3
1436 ; SSE2-NEXT: movups 16(%rcx), %xmm6
1437 ; SSE2-NEXT: movaps %xmm3, %xmm7
1438 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm1[1,0]
1439 ; SSE2-NEXT: movaps %xmm1, %xmm9
1440 ; SSE2-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
1441 ; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm7[0,2]
1442 ; SSE2-NEXT: movaps %xmm5, %xmm7
1443 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,2],xmm6[3,2]
1444 ; SSE2-NEXT: movaps %xmm6, %xmm4
1445 ; SSE2-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
1446 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm7[0,2]
1447 ; SSE2-NEXT: movaps %xmm0, %xmm7
1448 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[2,1]
1449 ; SSE2-NEXT: movaps %xmm6, %xmm2
1450 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[1,0]
1451 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[0,2]
1452 ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm0[1,0]
1453 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
1454 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0,2]
1455 ; SSE2-NEXT: movaps %xmm8, %xmm5
1456 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,2],xmm3[3,2]
1457 ; SSE2-NEXT: movaps %xmm3, %xmm6
1458 ; SSE2-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
1459 ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm5[0,2]
1460 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm8[2,1]
1461 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm8[1,0]
1462 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[0,2]
1463 ; SSE2-NEXT: movups %xmm3, 16(%rdi)
1464 ; SSE2-NEXT: movups %xmm6, 32(%rdi)
1465 ; SSE2-NEXT: movups %xmm0, 48(%rdi)
1466 ; SSE2-NEXT: movups %xmm2, 64(%rdi)
1467 ; SSE2-NEXT: movups %xmm4, 80(%rdi)
1468 ; SSE2-NEXT: movups %xmm9, (%rdi)
1471 ; SSE42-LABEL: interleave_24i32_in:
1473 ; SSE42-NEXT: movdqu (%rsi), %xmm8
1474 ; SSE42-NEXT: movdqu 16(%rsi), %xmm4
1475 ; SSE42-NEXT: movdqu (%rdx), %xmm2
1476 ; SSE42-NEXT: movdqu 16(%rdx), %xmm5
1477 ; SSE42-NEXT: movdqu (%rcx), %xmm3
1478 ; SSE42-NEXT: movdqu 16(%rcx), %xmm6
1479 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,1,1]
1480 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
1481 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3],xmm7[4,5,6,7]
1482 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,1]
1483 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5],xmm7[6,7]
1484 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
1485 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
1486 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5,6,7]
1487 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,2,3,3]
1488 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm0[0,1,2,3],xmm7[4,5],xmm0[6,7]
1489 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,2]
1490 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5,6,7]
1491 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7]
1492 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
1493 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
1494 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6,7]
1495 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,1,0,1]
1496 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4,5],xmm4[6,7]
1497 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1]
1498 ; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
1499 ; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3],xmm6[4,5,6,7]
1500 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,2,3,3]
1501 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5],xmm6[6,7]
1502 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
1503 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
1504 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5],xmm2[6,7]
1505 ; SSE42-NEXT: movdqu %xmm2, 16(%rdi)
1506 ; SSE42-NEXT: movdqu %xmm4, 32(%rdi)
1507 ; SSE42-NEXT: movdqu %xmm5, 48(%rdi)
1508 ; SSE42-NEXT: movdqu %xmm0, 64(%rdi)
1509 ; SSE42-NEXT: movdqu %xmm7, 80(%rdi)
1510 ; SSE42-NEXT: movdqu %xmm1, (%rdi)
1513 ; AVX1-LABEL: interleave_24i32_in:
1515 ; AVX1-NEXT: vmovupd (%rsi), %ymm0
1516 ; AVX1-NEXT: vmovups (%rdx), %xmm1
1517 ; AVX1-NEXT: vmovups 16(%rdx), %xmm2
1518 ; AVX1-NEXT: vmovups (%rsi), %xmm3
1519 ; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm3[2,0],xmm1[2,0]
1520 ; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm1[1,1],xmm4[0,2]
1521 ; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm3[0,0]
1522 ; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
1523 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
1524 ; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = mem[0,1,0,1]
1525 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
1526 ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
1527 ; AVX1-NEXT: vmovups 16(%rcx), %xmm3
1528 ; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm2[3,0],xmm3[3,0]
1529 ; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm3[2,1],xmm4[0,2]
1530 ; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0],xmm2[1,0]
1531 ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm3[2,0],xmm2[2,2]
1532 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
1533 ; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = ymm0[1,1,3,3]
1534 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
1535 ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
1536 ; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
1537 ; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2]
1538 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7]
1539 ; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
1540 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
1541 ; AVX1-NEXT: vmovups %ymm0, 32(%rdi)
1542 ; AVX1-NEXT: vmovups %ymm2, 64(%rdi)
1543 ; AVX1-NEXT: vmovups %ymm1, (%rdi)
1544 ; AVX1-NEXT: vzeroupper
1547 ; AVX2-SLOW-LABEL: interleave_24i32_in:
1548 ; AVX2-SLOW: # %bb.0:
1549 ; AVX2-SLOW-NEXT: vmovups (%rsi), %ymm0
1550 ; AVX2-SLOW-NEXT: vmovups (%rdx), %ymm1
1551 ; AVX2-SLOW-NEXT: vmovups (%rcx), %ymm2
1552 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
1553 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
1554 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
1555 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
1556 ; AVX2-SLOW-NEXT: vbroadcastsd (%rcx), %ymm4
1557 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1558 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
1559 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
1560 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
1561 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
1562 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,3,3,3]
1563 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1564 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
1565 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
1566 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
1567 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
1568 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1569 ; AVX2-SLOW-NEXT: vmovups %ymm0, 32(%rdi)
1570 ; AVX2-SLOW-NEXT: vmovups %ymm4, 64(%rdi)
1571 ; AVX2-SLOW-NEXT: vmovups %ymm3, (%rdi)
1572 ; AVX2-SLOW-NEXT: vzeroupper
1573 ; AVX2-SLOW-NEXT: retq
1575 ; AVX2-FAST-LABEL: interleave_24i32_in:
1576 ; AVX2-FAST: # %bb.0:
1577 ; AVX2-FAST-NEXT: vmovups (%rsi), %ymm0
1578 ; AVX2-FAST-NEXT: vmovups (%rdx), %ymm1
1579 ; AVX2-FAST-NEXT: vmovups (%rcx), %ymm2
1580 ; AVX2-FAST-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [1,0,2,2,1,0,2,2]
1581 ; AVX2-FAST-NEXT: # ymm3 = mem[0,1,0,1]
1582 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm3, %ymm3
1583 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
1584 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
1585 ; AVX2-FAST-NEXT: vbroadcastsd (%rcx), %ymm4
1586 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1587 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = [5,6,5,6,5,6,7,7]
1588 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm4
1589 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[2,1,3,3]
1590 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
1591 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,3,3,3]
1592 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1593 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
1594 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
1595 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
1596 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
1597 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1598 ; AVX2-FAST-NEXT: vmovups %ymm0, 32(%rdi)
1599 ; AVX2-FAST-NEXT: vmovups %ymm4, 64(%rdi)
1600 ; AVX2-FAST-NEXT: vmovups %ymm3, (%rdi)
1601 ; AVX2-FAST-NEXT: vzeroupper
1602 ; AVX2-FAST-NEXT: retq
1604 ; XOP-LABEL: interleave_24i32_in:
1606 ; XOP-NEXT: vmovupd (%rsi), %ymm0
1607 ; XOP-NEXT: vmovups (%rcx), %ymm1
1608 ; XOP-NEXT: vmovups (%rdx), %xmm2
1609 ; XOP-NEXT: vmovups 16(%rdx), %xmm3
1610 ; XOP-NEXT: vmovups (%rsi), %xmm4
1611 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,0],xmm2[2,0]
1612 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm2[1,1],xmm5[0,2]
1613 ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,0],xmm4[0,0]
1614 ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1]
1615 ; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
1616 ; XOP-NEXT: vpermilps {{.*#+}} xmm4 = mem[0,1,0,1]
1617 ; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
1618 ; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
1619 ; XOP-NEXT: vmovups 16(%rcx), %xmm4
1620 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm3[3,0],xmm4[3,0]
1621 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,1],xmm5[0,2]
1622 ; XOP-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,0],xmm3[1,0]
1623 ; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,0],xmm3[2,2]
1624 ; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3
1625 ; XOP-NEXT: vpermilpd {{.*#+}} ymm4 = ymm0[1,1,3,3]
1626 ; XOP-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
1627 ; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1628 ; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm1[2],ymm0[3],ymm1[2,3],ymm0[4],ymm1[5,4],ymm0[5]
1629 ; XOP-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
1630 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1631 ; XOP-NEXT: vmovups %ymm0, 32(%rdi)
1632 ; XOP-NEXT: vmovups %ymm3, 64(%rdi)
1633 ; XOP-NEXT: vmovups %ymm2, (%rdi)
1634 ; XOP-NEXT: vzeroupper
1636 %s1 = load <8 x i32>, <8 x i32>* %q1, align 4
1637 %s2 = load <8 x i32>, <8 x i32>* %q2, align 4
1638 %s3 = load <8 x i32>, <8 x i32>* %q3, align 4
1639 %t1 = shufflevector <8 x i32> %s1, <8 x i32> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1640 %t2 = shufflevector <8 x i32> %s3, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1641 %interleaved = shufflevector <16 x i32> %t1, <16 x i32> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
1642 store <24 x i32> %interleaved, <24 x i32>* %p, align 4
1646 ; Repeat each element x 3 of <16 x i8> a0 + a1 to create a <96 x i8>.
1647 define void @splat3_128(<16 x i8> %a0, <16 x i8> %a1, <96 x i8> *%a2) {
1648 ; SSE2-LABEL: splat3_128:
1650 ; SSE2-NEXT: pxor %xmm4, %xmm4
1651 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1652 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
1653 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
1654 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,5]
1655 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
1656 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,1,4,5,6,7]
1657 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,6]
1658 ; SSE2-NEXT: packuswb %xmm5, %xmm2
1659 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
1660 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
1661 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,1,4,5,6,7]
1662 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6]
1663 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
1664 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
1665 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,7,7,7]
1666 ; SSE2-NEXT: packuswb %xmm5, %xmm3
1667 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7]
1668 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,5]
1669 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1670 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
1671 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
1672 ; SSE2-NEXT: packuswb %xmm0, %xmm5
1673 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1674 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
1675 ; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[2,3,3,3,4,5,6,7]
1676 ; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,5]
1677 ; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1]
1678 ; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,1,4,5,6,7]
1679 ; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
1680 ; SSE2-NEXT: packuswb %xmm6, %xmm7
1681 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
1682 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,0,1]
1683 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,1,4,5,6,7]
1684 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
1685 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1686 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
1687 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
1688 ; SSE2-NEXT: packuswb %xmm4, %xmm0
1689 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[2,3,3,3,4,5,6,7]
1690 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,5]
1691 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
1692 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
1693 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
1694 ; SSE2-NEXT: packuswb %xmm1, %xmm4
1695 ; SSE2-NEXT: movdqa %xmm4, 80(%rdi)
1696 ; SSE2-NEXT: movdqa %xmm0, 64(%rdi)
1697 ; SSE2-NEXT: movdqa %xmm7, 48(%rdi)
1698 ; SSE2-NEXT: movdqa %xmm5, 32(%rdi)
1699 ; SSE2-NEXT: movdqa %xmm3, 16(%rdi)
1700 ; SSE2-NEXT: movdqa %xmm2, (%rdi)
1703 ; SSE42-LABEL: splat3_128:
1705 ; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5]
1706 ; SSE42-NEXT: movdqa %xmm0, %xmm3
1707 ; SSE42-NEXT: pshufb %xmm2, %xmm3
1708 ; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [5,5,6,6,6,7,7,7,8,8,8,9,9,9,10,10]
1709 ; SSE42-NEXT: movdqa %xmm0, %xmm5
1710 ; SSE42-NEXT: pshufb %xmm4, %xmm5
1711 ; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15]
1712 ; SSE42-NEXT: pshufb %xmm6, %xmm0
1713 ; SSE42-NEXT: movdqa %xmm1, %xmm7
1714 ; SSE42-NEXT: pshufb %xmm2, %xmm7
1715 ; SSE42-NEXT: movdqa %xmm1, %xmm2
1716 ; SSE42-NEXT: pshufb %xmm4, %xmm2
1717 ; SSE42-NEXT: pshufb %xmm6, %xmm1
1718 ; SSE42-NEXT: movdqa %xmm1, 80(%rdi)
1719 ; SSE42-NEXT: movdqa %xmm2, 64(%rdi)
1720 ; SSE42-NEXT: movdqa %xmm7, 48(%rdi)
1721 ; SSE42-NEXT: movdqa %xmm0, 32(%rdi)
1722 ; SSE42-NEXT: movdqa %xmm5, 16(%rdi)
1723 ; SSE42-NEXT: movdqa %xmm3, (%rdi)
1726 ; AVX1-LABEL: splat3_128:
1728 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1729 ; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1730 ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1731 ; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1732 ; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
1733 ; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
1734 ; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
1735 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
1736 ; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
1737 ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
1738 ; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
1739 ; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
1740 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
1741 ; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
1742 ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
1743 ; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
1744 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
1745 ; AVX1-NEXT: vpshufb %xmm6, %xmm3, %xmm3
1746 ; AVX1-NEXT: vpshufb %xmm6, %xmm0, %xmm0
1747 ; AVX1-NEXT: vpshufb %xmm6, %xmm5, %xmm5
1748 ; AVX1-NEXT: vpshufb %xmm6, %xmm1, %xmm1
1749 ; AVX1-NEXT: vpshufb %xmm6, %xmm2, %xmm2
1750 ; AVX1-NEXT: vpshufb %xmm6, %xmm4, %xmm4
1751 ; AVX1-NEXT: vmovdqa %xmm4, 80(%rdi)
1752 ; AVX1-NEXT: vmovdqa %xmm2, 64(%rdi)
1753 ; AVX1-NEXT: vmovdqa %xmm1, 48(%rdi)
1754 ; AVX1-NEXT: vmovdqa %xmm5, 32(%rdi)
1755 ; AVX1-NEXT: vmovdqa %xmm3, 16(%rdi)
1756 ; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
1759 ; AVX2-LABEL: splat3_128:
1761 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
1762 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
1763 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
1764 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
1765 ; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
1766 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20]
1767 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
1768 ; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm3[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm3[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
1769 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
1770 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
1771 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
1772 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
1773 ; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
1774 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1775 ; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm0
1776 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
1777 ; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
1778 ; AVX2-NEXT: vmovdqa %ymm1, 64(%rdi)
1779 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rdi)
1780 ; AVX2-NEXT: vmovdqa %ymm3, (%rdi)
1781 ; AVX2-NEXT: vzeroupper
1784 ; XOP-LABEL: splat3_128:
1786 ; XOP-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1787 ; XOP-NEXT: vpalignr {{.*#+}} xmm3 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1788 ; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1789 ; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1790 ; XOP-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
1791 ; XOP-NEXT: vpalignr {{.*#+}} xmm7 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
1792 ; XOP-NEXT: vpalignr {{.*#+}} xmm8 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
1793 ; XOP-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
1794 ; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
1795 ; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
1796 ; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [5,16,11,6,17,12,7,18,13,8,19,14,9,20,15,10]
1797 ; XOP-NEXT: vpperm %xmm3, %xmm4, %xmm2, %xmm2
1798 ; XOP-NEXT: vpperm %xmm3, %xmm0, %xmm7, %xmm0
1799 ; XOP-NEXT: vpperm %xmm3, %xmm7, %xmm4, %xmm4
1800 ; XOP-NEXT: vpperm %xmm3, %xmm1, %xmm6, %xmm1
1801 ; XOP-NEXT: vpperm %xmm3, %xmm5, %xmm8, %xmm7
1802 ; XOP-NEXT: vpperm %xmm3, %xmm6, %xmm5, %xmm3
1803 ; XOP-NEXT: vmovdqa %xmm3, 80(%rdi)
1804 ; XOP-NEXT: vmovdqa %xmm7, 64(%rdi)
1805 ; XOP-NEXT: vmovdqa %xmm1, 48(%rdi)
1806 ; XOP-NEXT: vmovdqa %xmm4, 32(%rdi)
1807 ; XOP-NEXT: vmovdqa %xmm2, 16(%rdi)
1808 ; XOP-NEXT: vmovdqa %xmm0, (%rdi)
1810 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1811 %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1812 %3 = shufflevector <64 x i8> %1, <64 x i8> %2, <96 x i32> <i32 0, i32 32, i32 64, i32 1, i32 33, i32 65, i32 2, i32 34, i32 66, i32 3, i32 35, i32 67, i32 4, i32 36, i32 68, i32 5, i32 37, i32 69, i32 6, i32 38, i32 70, i32 7, i32 39, i32 71, i32 8, i32 40, i32 72, i32 9, i32 41, i32 73, i32 10, i32 42, i32 74, i32 11, i32 43, i32 75, i32 12, i32 44, i32 76, i32 13, i32 45, i32 77, i32 14, i32 46, i32 78, i32 15, i32 47, i32 79, i32 16, i32 48, i32 80, i32 17, i32 49, i32 81, i32 18, i32 50, i32 82, i32 19, i32 51, i32 83, i32 20, i32 52, i32 84, i32 21, i32 53, i32 85, i32 22, i32 54, i32 86, i32 23, i32 55, i32 87, i32 24, i32 56, i32 88, i32 25, i32 57, i32 89, i32 26, i32 58, i32 90, i32 27, i32 59, i32 91, i32 28, i32 60, i32 92, i32 29, i32 61, i32 93, i32 30, i32 62, i32 94, i32 31, i32 63, i32 95>
1813 store <96 x i8> %3, <96 x i8>* %a2
1817 ; Repeat each element x 3 of <32 x i8> a0 to create a <96 x i8>.
1818 define void @splat3_256(<32 x i8> %a0, <96 x i8> *%a1) {
1819 ; SSE2-LABEL: splat3_256:
1821 ; SSE2-NEXT: pxor %xmm4, %xmm4
1822 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1823 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
1824 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
1825 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,5]
1826 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
1827 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,1,4,5,6,7]
1828 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,6]
1829 ; SSE2-NEXT: packuswb %xmm5, %xmm2
1830 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
1831 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
1832 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,1,4,5,6,7]
1833 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6]
1834 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
1835 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
1836 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,7,7,7]
1837 ; SSE2-NEXT: packuswb %xmm5, %xmm3
1838 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7]
1839 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,5]
1840 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1841 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
1842 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
1843 ; SSE2-NEXT: packuswb %xmm0, %xmm5
1844 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1845 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
1846 ; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[2,3,3,3,4,5,6,7]
1847 ; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,5]
1848 ; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1]
1849 ; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,1,4,5,6,7]
1850 ; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
1851 ; SSE2-NEXT: packuswb %xmm6, %xmm7
1852 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
1853 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,0,1]
1854 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,1,4,5,6,7]
1855 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
1856 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1857 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
1858 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
1859 ; SSE2-NEXT: packuswb %xmm4, %xmm0
1860 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[2,3,3,3,4,5,6,7]
1861 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,5]
1862 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
1863 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
1864 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
1865 ; SSE2-NEXT: packuswb %xmm1, %xmm4
1866 ; SSE2-NEXT: movdqa %xmm4, 80(%rdi)
1867 ; SSE2-NEXT: movdqa %xmm0, 64(%rdi)
1868 ; SSE2-NEXT: movdqa %xmm7, 48(%rdi)
1869 ; SSE2-NEXT: movdqa %xmm5, 32(%rdi)
1870 ; SSE2-NEXT: movdqa %xmm3, 16(%rdi)
1871 ; SSE2-NEXT: movdqa %xmm2, (%rdi)
1874 ; SSE42-LABEL: splat3_256:
1876 ; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5]
1877 ; SSE42-NEXT: movdqa %xmm0, %xmm3
1878 ; SSE42-NEXT: pshufb %xmm2, %xmm3
1879 ; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [5,5,6,6,6,7,7,7,8,8,8,9,9,9,10,10]
1880 ; SSE42-NEXT: movdqa %xmm0, %xmm5
1881 ; SSE42-NEXT: pshufb %xmm4, %xmm5
1882 ; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15]
1883 ; SSE42-NEXT: pshufb %xmm6, %xmm0
1884 ; SSE42-NEXT: movdqa %xmm1, %xmm7
1885 ; SSE42-NEXT: pshufb %xmm2, %xmm7
1886 ; SSE42-NEXT: movdqa %xmm1, %xmm2
1887 ; SSE42-NEXT: pshufb %xmm4, %xmm2
1888 ; SSE42-NEXT: pshufb %xmm6, %xmm1
1889 ; SSE42-NEXT: movdqa %xmm1, 80(%rdi)
1890 ; SSE42-NEXT: movdqa %xmm2, 64(%rdi)
1891 ; SSE42-NEXT: movdqa %xmm7, 48(%rdi)
1892 ; SSE42-NEXT: movdqa %xmm0, 32(%rdi)
1893 ; SSE42-NEXT: movdqa %xmm5, 16(%rdi)
1894 ; SSE42-NEXT: movdqa %xmm3, (%rdi)
1897 ; AVX1-LABEL: splat3_256:
1899 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1900 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1901 ; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1902 ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1903 ; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1904 ; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
1905 ; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
1906 ; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
1907 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
1908 ; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
1909 ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
1910 ; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
1911 ; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
1912 ; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
1913 ; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
1914 ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
1915 ; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
1916 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
1917 ; AVX1-NEXT: vpshufb %xmm6, %xmm3, %xmm3
1918 ; AVX1-NEXT: vpshufb %xmm6, %xmm0, %xmm0
1919 ; AVX1-NEXT: vpshufb %xmm6, %xmm5, %xmm5
1920 ; AVX1-NEXT: vpshufb %xmm6, %xmm1, %xmm1
1921 ; AVX1-NEXT: vpshufb %xmm6, %xmm2, %xmm2
1922 ; AVX1-NEXT: vpshufb %xmm6, %xmm4, %xmm4
1923 ; AVX1-NEXT: vmovdqa %xmm4, 80(%rdi)
1924 ; AVX1-NEXT: vmovdqa %xmm2, 64(%rdi)
1925 ; AVX1-NEXT: vmovdqa %xmm1, 48(%rdi)
1926 ; AVX1-NEXT: vmovdqa %xmm5, 32(%rdi)
1927 ; AVX1-NEXT: vmovdqa %xmm3, 16(%rdi)
1928 ; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
1929 ; AVX1-NEXT: vzeroupper
1932 ; AVX2-LABEL: splat3_256:
1934 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
1935 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
1936 ; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
1937 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20]
1938 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
1939 ; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm3[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm3[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
1940 ; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
1941 ; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
1942 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
1943 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
1944 ; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
1945 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
1946 ; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm0
1947 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
1948 ; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
1949 ; AVX2-NEXT: vmovdqa %ymm1, 64(%rdi)
1950 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rdi)
1951 ; AVX2-NEXT: vmovdqa %ymm3, (%rdi)
1952 ; AVX2-NEXT: vzeroupper
1955 ; XOP-LABEL: splat3_256:
1957 ; XOP-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1958 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
1959 ; XOP-NEXT: vpalignr {{.*#+}} xmm3 = xmm2[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
1960 ; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1961 ; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
1962 ; XOP-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
1963 ; XOP-NEXT: vpalignr {{.*#+}} xmm7 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
1964 ; XOP-NEXT: vpalignr {{.*#+}} xmm8 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
1965 ; XOP-NEXT: vpalignr {{.*#+}} xmm1 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
1966 ; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
1967 ; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
1968 ; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [5,16,11,6,17,12,7,18,13,8,19,14,9,20,15,10]
1969 ; XOP-NEXT: vpperm %xmm3, %xmm4, %xmm1, %xmm1
1970 ; XOP-NEXT: vpperm %xmm3, %xmm0, %xmm7, %xmm0
1971 ; XOP-NEXT: vpperm %xmm3, %xmm7, %xmm4, %xmm4
1972 ; XOP-NEXT: vpperm %xmm3, %xmm2, %xmm6, %xmm2
1973 ; XOP-NEXT: vpperm %xmm3, %xmm5, %xmm8, %xmm7
1974 ; XOP-NEXT: vpperm %xmm3, %xmm6, %xmm5, %xmm3
1975 ; XOP-NEXT: vmovdqa %xmm3, 80(%rdi)
1976 ; XOP-NEXT: vmovdqa %xmm7, 64(%rdi)
1977 ; XOP-NEXT: vmovdqa %xmm2, 48(%rdi)
1978 ; XOP-NEXT: vmovdqa %xmm4, 32(%rdi)
1979 ; XOP-NEXT: vmovdqa %xmm1, 16(%rdi)
1980 ; XOP-NEXT: vmovdqa %xmm0, (%rdi)
1981 ; XOP-NEXT: vzeroupper
1983 %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1984 %2 = shufflevector <32 x i8> %a0, <32 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1985 %3 = shufflevector <64 x i8> %1, <64 x i8> %2, <96 x i32> <i32 0, i32 32, i32 64, i32 1, i32 33, i32 65, i32 2, i32 34, i32 66, i32 3, i32 35, i32 67, i32 4, i32 36, i32 68, i32 5, i32 37, i32 69, i32 6, i32 38, i32 70, i32 7, i32 39, i32 71, i32 8, i32 40, i32 72, i32 9, i32 41, i32 73, i32 10, i32 42, i32 74, i32 11, i32 43, i32 75, i32 12, i32 44, i32 76, i32 13, i32 45, i32 77, i32 14, i32 46, i32 78, i32 15, i32 47, i32 79, i32 16, i32 48, i32 80, i32 17, i32 49, i32 81, i32 18, i32 50, i32 82, i32 19, i32 51, i32 83, i32 20, i32 52, i32 84, i32 21, i32 53, i32 85, i32 22, i32 54, i32 86, i32 23, i32 55, i32 87, i32 24, i32 56, i32 88, i32 25, i32 57, i32 89, i32 26, i32 58, i32 90, i32 27, i32 59, i32 91, i32 28, i32 60, i32 92, i32 29, i32 61, i32 93, i32 30, i32 62, i32 94, i32 31, i32 63, i32 95>
1986 store <96 x i8> %3, <96 x i8>* %a1
1990 define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
1991 ; SSE2-LABEL: wrongorder:
1993 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
1994 ; SSE2-NEXT: movaps %xmm0, 48(%rdi)
1995 ; SSE2-NEXT: movaps %xmm0, 32(%rdi)
1996 ; SSE2-NEXT: movaps %xmm0, 16(%rdi)
1997 ; SSE2-NEXT: movaps %xmm0, (%rdi)
2000 ; SSE42-LABEL: wrongorder:
2002 ; SSE42-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2003 ; SSE42-NEXT: movapd %xmm0, 48(%rdi)
2004 ; SSE42-NEXT: movapd %xmm0, 32(%rdi)
2005 ; SSE42-NEXT: movapd %xmm0, 16(%rdi)
2006 ; SSE42-NEXT: movapd %xmm0, (%rdi)
2009 ; AVX1-LABEL: wrongorder:
2011 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
2012 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
2013 ; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
2014 ; AVX1-NEXT: vmovaps %ymm1, (%rdi)
2015 ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
2016 ; AVX1-NEXT: vzeroupper
2019 ; AVX2-LABEL: wrongorder:
2021 ; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
2022 ; AVX2-NEXT: vmovaps %ymm0, 32(%rdi)
2023 ; AVX2-NEXT: vmovaps %ymm0, (%rdi)
2024 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
2025 ; AVX2-NEXT: vzeroupper
2028 ; XOP-LABEL: wrongorder:
2030 ; XOP-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
2031 ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
2032 ; XOP-NEXT: vmovaps %ymm1, 32(%rdi)
2033 ; XOP-NEXT: vmovaps %ymm1, (%rdi)
2034 ; XOP-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
2035 ; XOP-NEXT: vzeroupper
2037 %shuffle = shufflevector <4 x double> %A, <4 x double> %A, <8 x i32> zeroinitializer
2038 store <8 x double> %shuffle, <8 x double>* %P, align 64
2039 %m2 = load <8 x double>, <8 x double>* %P, align 64
2040 store <8 x double> %m2, <8 x double>* %P, align 64
2041 %m3 = load <8 x double>, <8 x double>* %P, align 64
2042 %m4 = shufflevector <8 x double> %m3, <8 x double> undef, <2 x i32> <i32 2, i32 0>
2043 ret <2 x double> %m4
2046 define void @PR41097() {
2047 ; SSE2-LABEL: PR41097:
2049 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2050 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2051 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
2052 ; SSE2-NEXT: psrad $24, %xmm0
2053 ; SSE2-NEXT: pxor %xmm1, %xmm1
2054 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2055 ; SSE2-NEXT: movdqu %xmm0, (%rax)
2058 ; SSE42-LABEL: PR41097:
2060 ; SSE42-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2061 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
2062 ; SSE42-NEXT: pmovsxbd %xmm0, %xmm0
2063 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
2064 ; SSE42-NEXT: movdqu %xmm0, (%rax)
2067 ; AVX-LABEL: PR41097:
2069 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2070 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
2071 ; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
2072 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
2073 ; AVX-NEXT: vmovdqu %xmm0, (%rax)
2076 ; XOP-LABEL: PR41097:
2078 ; XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
2079 ; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
2080 ; XOP-NEXT: vpmovsxbd %xmm0, %xmm0
2081 ; XOP-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
2082 ; XOP-NEXT: vmovdqu %xmm0, (%rax)
2084 %wide.vec = load <6 x i8>, <6 x i8>* undef, align 1
2085 %strided.vec = shufflevector <6 x i8> %wide.vec, <6 x i8> undef, <2 x i32> <i32 0, i32 3>
2086 %tmp = sext <2 x i8> %strided.vec to <2 x i32>
2087 %tmp7 = zext <2 x i32> %tmp to <2 x i64>
2088 store <2 x i64> %tmp7, <2 x i64>* undef, align 8