1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
4 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
6 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
7 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+xop | FileCheck %s --check-prefix=XOP
9 define void @v3i64(<2 x i64> %a, <2 x i64> %b, <3 x i64>* %p) nounwind {
12 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
13 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
14 ; SSE2-NEXT: movq %xmm2, 16(%rdi)
15 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
20 ; SSE42-NEXT: pextrq $1, %xmm0, 16(%rdi)
21 ; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
22 ; SSE42-NEXT: movdqa %xmm0, (%rdi)
27 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
28 ; AVX-NEXT: vpextrq $1, %xmm0, 16(%rdi)
29 ; AVX-NEXT: vmovdqa %xmm1, (%rdi)
34 ; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
35 ; XOP-NEXT: vpextrq $1, %xmm0, 16(%rdi)
36 ; XOP-NEXT: vmovdqa %xmm1, (%rdi)
38 %r = shufflevector <2 x i64> %a, <2 x i64> %b, <3 x i32> <i32 0, i32 2, i32 1>
39 store <3 x i64> %r, <3 x i64>* %p
42 define void @v3f64(<2 x double> %a, <2 x double> %b, <3 x double>* %p) nounwind {
45 ; SSE-NEXT: movhpd %xmm0, 16(%rdi)
46 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
47 ; SSE-NEXT: movapd %xmm0, (%rdi)
52 ; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],xmm1[0]
53 ; AVX-NEXT: vmovhpd %xmm0, 16(%rdi)
54 ; AVX-NEXT: vmovapd %xmm1, (%rdi)
59 ; XOP-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],xmm1[0]
60 ; XOP-NEXT: vmovhpd %xmm0, 16(%rdi)
61 ; XOP-NEXT: vmovapd %xmm1, (%rdi)
63 %r = shufflevector <2 x double> %a, <2 x double> %b, <3 x i32> <i32 0, i32 2, i32 1>
64 store <3 x double> %r, <3 x double>* %p
68 define void @v3i32(<2 x i32> %a, <2 x i32> %b, <3 x i32>* %p) nounwind {
71 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
72 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
73 ; SSE2-NEXT: movd %xmm2, 8(%rdi)
74 ; SSE2-NEXT: movq %xmm0, (%rdi)
79 ; SSE42-NEXT: extractps $2, %xmm0, 8(%rdi)
80 ; SSE42-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
81 ; SSE42-NEXT: movlps %xmm0, (%rdi)
86 ; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
87 ; AVX-NEXT: vextractps $2, %xmm0, 8(%rdi)
88 ; AVX-NEXT: vmovlps %xmm1, (%rdi)
93 ; XOP-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
94 ; XOP-NEXT: vextractps $2, %xmm0, 8(%rdi)
95 ; XOP-NEXT: vmovlps %xmm1, (%rdi)
97 %r = shufflevector <2 x i32> %a, <2 x i32> %b, <3 x i32> <i32 0, i32 2, i32 1>
98 store <3 x i32> %r, <3 x i32>* %p
102 define void @v5i16(<4 x i16> %a, <4 x i16> %b, <5 x i16>* %p) nounwind {
105 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
106 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
107 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
108 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
109 ; SSE2-NEXT: pextrw $6, %xmm0, %eax
110 ; SSE2-NEXT: movw %ax, 8(%rdi)
111 ; SSE2-NEXT: movq %xmm2, (%rdi)
114 ; SSE42-LABEL: v5i16:
116 ; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
117 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
118 ; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
119 ; SSE42-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
120 ; SSE42-NEXT: pextrw $6, %xmm0, 8(%rdi)
121 ; SSE42-NEXT: movq %xmm2, (%rdi)
126 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
127 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
128 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
129 ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
130 ; AVX1-NEXT: vpextrw $6, %xmm0, 8(%rdi)
131 ; AVX1-NEXT: vmovq %xmm1, (%rdi)
134 ; AVX2-SLOW-LABEL: v5i16:
135 ; AVX2-SLOW: # %bb.0:
136 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
137 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
138 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
139 ; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
140 ; AVX2-SLOW-NEXT: vpextrw $6, %xmm0, 8(%rdi)
141 ; AVX2-SLOW-NEXT: vmovq %xmm1, (%rdi)
142 ; AVX2-SLOW-NEXT: retq
144 ; AVX2-FAST-LABEL: v5i16:
145 ; AVX2-FAST: # %bb.0:
146 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
147 ; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
148 ; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
149 ; AVX2-FAST-NEXT: vpextrw $6, %xmm0, 8(%rdi)
150 ; AVX2-FAST-NEXT: vmovq %xmm1, (%rdi)
151 ; AVX2-FAST-NEXT: retq
155 ; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm0[0,1],xmm1[4,5],xmm0[4,5],xmm1[8,9],xmm0[4,5],xmm1[4,5],xmm0[6,7],xmm1[6,7]
156 ; XOP-NEXT: vpextrw $6, %xmm0, 8(%rdi)
157 ; XOP-NEXT: vmovq %xmm1, (%rdi)
159 %r = shufflevector <4 x i16> %a, <4 x i16> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
160 store <5 x i16> %r, <5 x i16>* %p
164 define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
167 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
168 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
169 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
170 ; SSE2-NEXT: movd %xmm2, 16(%rdi)
171 ; SSE2-NEXT: movdqa %xmm0, (%rdi)
174 ; SSE42-LABEL: v5i32:
176 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
177 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
178 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
179 ; SSE42-NEXT: pextrd $3, %xmm0, 16(%rdi)
180 ; SSE42-NEXT: movdqa %xmm2, (%rdi)
185 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
186 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
187 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
188 ; AVX1-NEXT: vpextrd $3, %xmm0, 16(%rdi)
189 ; AVX1-NEXT: vmovdqa %xmm1, (%rdi)
194 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
195 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
196 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
197 ; AVX2-NEXT: vpextrd $3, %xmm0, 16(%rdi)
198 ; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
203 ; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7],xmm0[4,5,6,7],xmm1[8,9,10,11]
204 ; XOP-NEXT: vpextrd $3, %xmm0, 16(%rdi)
205 ; XOP-NEXT: vmovdqa %xmm1, (%rdi)
207 %r = shufflevector <4 x i32> %a, <4 x i32> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
208 store <5 x i32> %r, <5 x i32>* %p
212 define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
215 ; SSE2-NEXT: movaps %xmm0, %xmm2
216 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[1,2]
217 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
218 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
219 ; SSE2-NEXT: movss %xmm0, 16(%rdi)
220 ; SSE2-NEXT: movaps %xmm2, (%rdi)
223 ; SSE42-LABEL: v5f32:
225 ; SSE42-NEXT: extractps $3, %xmm0, 16(%rdi)
226 ; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,2]
227 ; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
228 ; SSE42-NEXT: movaps %xmm0, (%rdi)
233 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
234 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
235 ; AVX-NEXT: vextractps $3, %xmm0, 16(%rdi)
236 ; AVX-NEXT: vmovaps %xmm1, (%rdi)
241 ; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
242 ; XOP-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
243 ; XOP-NEXT: vextractps $3, %xmm0, 16(%rdi)
244 ; XOP-NEXT: vmovaps %xmm1, (%rdi)
246 %r = shufflevector <4 x float> %a, <4 x float> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
247 store <5 x float> %r, <5 x float>* %p
251 define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind {
254 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
255 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,0,65535,0,65535,65535,65535]
256 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
257 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,0,4,5,6,7]
258 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,4,7]
259 ; SSE2-NEXT: pand %xmm2, %xmm1
260 ; SSE2-NEXT: pandn %xmm0, %xmm2
261 ; SSE2-NEXT: por %xmm1, %xmm2
262 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
263 ; SSE2-NEXT: pand %xmm2, %xmm0
264 ; SSE2-NEXT: packuswb %xmm0, %xmm0
265 ; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
266 ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
267 ; SSE2-NEXT: movb %al, 6(%rdi)
268 ; SSE2-NEXT: movd %xmm0, (%rdi)
269 ; SSE2-NEXT: pextrw $2, %xmm0, %eax
270 ; SSE2-NEXT: movw %ax, 4(%rdi)
275 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
276 ; SSE42-NEXT: pextrb $0, %xmm1, 6(%rdi)
277 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
278 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
279 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,u,u,u,u,u,u,u,u,u]
280 ; SSE42-NEXT: pextrw $2, %xmm1, 4(%rdi)
281 ; SSE42-NEXT: movd %xmm1, (%rdi)
286 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
287 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
288 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5,6,7]
289 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,u,u,u,u,u,u,u,u,u]
290 ; AVX-NEXT: vpextrb $0, %xmm1, 6(%rdi)
291 ; AVX-NEXT: vpextrw $2, %xmm0, 4(%rdi)
292 ; AVX-NEXT: vmovd %xmm0, (%rdi)
297 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm1[8],xmm0[12],xmm1[8],xmm0[4],xmm1[12,0,u,u,u,u,u,u,u,u,u]
298 ; XOP-NEXT: vpextrb $0, %xmm1, 6(%rdi)
299 ; XOP-NEXT: vpextrw $2, %xmm0, 4(%rdi)
300 ; XOP-NEXT: vmovd %xmm0, (%rdi)
302 %r = shufflevector <4 x i8> %a, <4 x i8> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
303 store <7 x i8> %r, <7 x i8>* %p
307 define void @v7i16(<4 x i16> %a, <4 x i16> %b, <7 x i16>* %p) nounwind {
310 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
311 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,0,65535,0,65535,65535,65535]
312 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
313 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,0,2,0,4,5,6,7]
314 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,4,7]
315 ; SSE2-NEXT: pand %xmm2, %xmm3
316 ; SSE2-NEXT: pandn %xmm0, %xmm2
317 ; SSE2-NEXT: por %xmm3, %xmm2
318 ; SSE2-NEXT: movd %xmm1, %eax
319 ; SSE2-NEXT: movw %ax, 12(%rdi)
320 ; SSE2-NEXT: movq %xmm2, (%rdi)
321 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
322 ; SSE2-NEXT: movd %xmm0, 8(%rdi)
325 ; SSE42-LABEL: v7i16:
327 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
328 ; SSE42-NEXT: pextrw $0, %xmm1, 12(%rdi)
329 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
330 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
331 ; SSE42-NEXT: pextrd $2, %xmm1, 8(%rdi)
332 ; SSE42-NEXT: movq %xmm1, (%rdi)
337 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,3]
338 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15]
339 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5,6,7]
340 ; AVX-NEXT: vpextrw $0, %xmm1, 12(%rdi)
341 ; AVX-NEXT: vpextrd $2, %xmm0, 8(%rdi)
342 ; AVX-NEXT: vmovq %xmm0, (%rdi)
347 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,1],xmm1[8,9],xmm0[12,13],xmm1[8,9],xmm0[4,5],xmm1[12,13,0,1,14,15]
348 ; XOP-NEXT: vpextrw $0, %xmm1, 12(%rdi)
349 ; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
350 ; XOP-NEXT: vmovq %xmm0, (%rdi)
352 %r = shufflevector <4 x i16> %a, <4 x i16> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
353 store <7 x i16> %r, <7 x i16>* %p
358 define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
361 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,2,2]
362 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
363 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
364 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
365 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
366 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
367 ; SSE2-NEXT: movd %xmm1, 24(%rdi)
368 ; SSE2-NEXT: movq %xmm0, 16(%rdi)
369 ; SSE2-NEXT: movdqa %xmm3, (%rdi)
372 ; SSE42-LABEL: v7i32:
374 ; SSE42-NEXT: movdqa %xmm0, %xmm2
375 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
376 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,2]
377 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
378 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
379 ; SSE42-NEXT: movd %xmm1, 24(%rdi)
380 ; SSE42-NEXT: movq %xmm0, 16(%rdi)
381 ; SSE42-NEXT: movdqa %xmm2, (%rdi)
386 ; AVX1-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
387 ; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,2]
388 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
389 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,0,3]
390 ; AVX1-NEXT: vmovss %xmm1, 24(%rdi)
391 ; AVX1-NEXT: vmovlps %xmm0, 16(%rdi)
392 ; AVX1-NEXT: vmovaps %xmm2, (%rdi)
397 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
398 ; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
399 ; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,6,3,6,1,7,4,u>
400 ; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
401 ; AVX2-NEXT: vmovss %xmm1, 24(%rdi)
402 ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
403 ; AVX2-NEXT: vmovlps %xmm1, 16(%rdi)
404 ; AVX2-NEXT: vmovaps %xmm0, (%rdi)
405 ; AVX2-NEXT: vzeroupper
410 ; XOP-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
411 ; XOP-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,2]
412 ; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
413 ; XOP-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,0,3]
414 ; XOP-NEXT: vmovss %xmm1, 24(%rdi)
415 ; XOP-NEXT: vmovlps %xmm0, 16(%rdi)
416 ; XOP-NEXT: vmovaps %xmm2, (%rdi)
418 %r = shufflevector <4 x i32> %a, <4 x i32> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
419 store <7 x i32> %r, <7 x i32>* %p
423 define void @v12i8(<8 x i8> %a, <8 x i8> %b, <12 x i8>* %p) nounwind {
426 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
427 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
428 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
429 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,255]
430 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
431 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
432 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
433 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
434 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
435 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
436 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,4]
437 ; SSE2-NEXT: packuswb %xmm3, %xmm0
438 ; SSE2-NEXT: pand %xmm2, %xmm0
439 ; SSE2-NEXT: pandn %xmm1, %xmm2
440 ; SSE2-NEXT: por %xmm0, %xmm2
441 ; SSE2-NEXT: movq %xmm2, (%rdi)
442 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
443 ; SSE2-NEXT: movd %xmm0, 8(%rdi)
446 ; SSE42-LABEL: v12i8:
448 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,xmm1[2],zero,zero,xmm1[4],zero,zero,xmm1[6,u,u,u,u]
449 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8],zero,xmm0[2,10],zero,xmm0[4,12],zero,xmm0[6,14],zero,xmm0[u,u,u,u]
450 ; SSE42-NEXT: por %xmm1, %xmm0
451 ; SSE42-NEXT: pextrd $2, %xmm0, 8(%rdi)
452 ; SSE42-NEXT: movq %xmm0, (%rdi)
457 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,xmm1[2],zero,zero,xmm1[4],zero,zero,xmm1[6,u,u,u,u]
458 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8],zero,xmm0[2,10],zero,xmm0[4,12],zero,xmm0[6,14],zero,xmm0[u,u,u,u]
459 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
460 ; AVX-NEXT: vpextrd $2, %xmm0, 8(%rdi)
461 ; AVX-NEXT: vmovq %xmm0, (%rdi)
466 ; XOP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[0],zero,zero,xmm1[2],zero,zero,xmm1[4],zero,zero,xmm1[6,u,u,u,u]
467 ; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8],zero,xmm0[2,10],zero,xmm0[4,12],zero,xmm0[6,14],zero,xmm0[u,u,u,u]
468 ; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0
469 ; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
470 ; XOP-NEXT: vmovq %xmm0, (%rdi)
472 %r = shufflevector <8 x i8> %a, <8 x i8> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
473 store <12 x i8> %r, <12 x i8>* %p
477 define void @v12i16(<8 x i16> %a, <8 x i16> %b, <12 x i16>* %p) nounwind {
478 ; SSE2-LABEL: v12i16:
480 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,3]
481 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
482 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,6,5,4,7]
483 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
484 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,1,4,5,6,7]
485 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,4]
486 ; SSE2-NEXT: pand %xmm3, %xmm4
487 ; SSE2-NEXT: pandn %xmm2, %xmm3
488 ; SSE2-NEXT: por %xmm4, %xmm3
489 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
490 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,65535,65535,65535,65535,65535]
491 ; SSE2-NEXT: pand %xmm2, %xmm1
492 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
493 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,1,3,4,5,6,7]
494 ; SSE2-NEXT: pandn %xmm0, %xmm2
495 ; SSE2-NEXT: por %xmm1, %xmm2
496 ; SSE2-NEXT: movq %xmm2, 16(%rdi)
497 ; SSE2-NEXT: movdqa %xmm3, (%rdi)
500 ; SSE42-LABEL: v12i16:
502 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
503 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
504 ; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
505 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1,2],xmm2[3,4,5,6,7]
506 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,3]
507 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
508 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
509 ; SSE42-NEXT: movdqa %xmm0, (%rdi)
510 ; SSE42-NEXT: movq %xmm3, 16(%rdi)
513 ; AVX1-LABEL: v12i16:
515 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
516 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
517 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
518 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
519 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,3]
520 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
521 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
522 ; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
523 ; AVX1-NEXT: vmovq %xmm2, 16(%rdi)
526 ; AVX2-SLOW-LABEL: v12i16:
527 ; AVX2-SLOW: # %bb.0:
528 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
529 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
530 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
531 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
532 ; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %xmm1
533 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
534 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
535 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%rdi)
536 ; AVX2-SLOW-NEXT: vmovq %xmm2, 16(%rdi)
537 ; AVX2-SLOW-NEXT: retq
539 ; AVX2-FAST-LABEL: v12i16:
540 ; AVX2-FAST: # %bb.0:
541 ; AVX2-FAST-NEXT: vpbroadcastd %xmm1, %xmm2
542 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,1,8,9,8,9,2,3,10,11,10,11,4,5,12,13]
543 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4],xmm2[5],xmm3[6,7]
544 ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
545 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,14,15,6,7,8,9,10,11,12,13,14,15]
546 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
547 ; AVX2-FAST-NEXT: vmovq %xmm0, 16(%rdi)
548 ; AVX2-FAST-NEXT: vmovdqa %xmm2, (%rdi)
549 ; AVX2-FAST-NEXT: retq
553 ; XOP-NEXT: vpperm {{.*#+}} xmm2 = xmm0[0,1,8,9],xmm1[0,1],xmm0[2,3,10,11],xmm1[2,3],xmm0[4,5,12,13]
554 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[4,5],xmm0[6,7,14,15],xmm1[6,7],xmm0[8,9,10,11,12,13,14,15]
555 ; XOP-NEXT: vmovq %xmm0, 16(%rdi)
556 ; XOP-NEXT: vmovdqa %xmm2, (%rdi)
558 %r = shufflevector <8 x i16> %a, <8 x i16> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
559 store <12 x i16> %r, <12 x i16>* %p
563 define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
564 ; SSE2-LABEL: v12i32:
566 ; SSE2-NEXT: movaps %xmm2, %xmm3
567 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[1,0]
568 ; SSE2-NEXT: movaps %xmm0, %xmm4
569 ; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
570 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
571 ; SSE2-NEXT: movaps %xmm0, %xmm3
572 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,1]
573 ; SSE2-NEXT: movaps %xmm2, %xmm5
574 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[1,0]
575 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2]
576 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,2],xmm2[3,2]
577 ; SSE2-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
578 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[0,2]
579 ; SSE2-NEXT: movaps %xmm2, 32(%rdi)
580 ; SSE2-NEXT: movaps %xmm5, 16(%rdi)
581 ; SSE2-NEXT: movaps %xmm4, (%rdi)
584 ; SSE42-LABEL: v12i32:
586 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
587 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
588 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5,6,7]
589 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,0,1]
590 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5],xmm4[6,7]
591 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
592 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7]
593 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5],xmm4[6,7]
594 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
595 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
596 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7]
597 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
598 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
599 ; SSE42-NEXT: movdqa %xmm0, 32(%rdi)
600 ; SSE42-NEXT: movdqa %xmm4, 16(%rdi)
601 ; SSE42-NEXT: movdqa %xmm3, (%rdi)
604 ; AVX1-LABEL: v12i32:
606 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
607 ; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm2[0,0,2,2,4,4,6,6]
608 ; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = ymm0[0,u,u,1,5,u,u,6]
609 ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
610 ; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = xmm1[0,0]
611 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
612 ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
613 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
614 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[3,3]
615 ; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,1]
616 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
617 ; AVX1-NEXT: vmovaps %xmm0, 32(%rdi)
618 ; AVX1-NEXT: vmovaps %ymm2, (%rdi)
619 ; AVX1-NEXT: vzeroupper
622 ; AVX2-SLOW-LABEL: v12i32:
623 ; AVX2-SLOW: # %bb.0:
624 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,3,2,3]
625 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm3 = ymm0[3,3,2,3,7,7,6,7]
626 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,3]
627 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
628 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm3 = <0,4,u,1,5,u,2,6>
629 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm3, %ymm0
630 ; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm1
631 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
632 ; AVX2-SLOW-NEXT: vmovaps %ymm0, (%rdi)
633 ; AVX2-SLOW-NEXT: vmovaps %xmm2, 32(%rdi)
634 ; AVX2-SLOW-NEXT: vzeroupper
635 ; AVX2-SLOW-NEXT: retq
637 ; AVX2-FAST-LABEL: v12i32:
638 ; AVX2-FAST: # %bb.0:
639 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = <0,4,u,1,5,u,2,6>
640 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm2
641 ; AVX2-FAST-NEXT: vbroadcastsd %xmm1, %ymm3
642 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
643 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [3,3,7,7,7,7,6,7]
644 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm3, %ymm0
645 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
646 ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
647 ; AVX2-FAST-NEXT: vmovaps %xmm0, 32(%rdi)
648 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rdi)
649 ; AVX2-FAST-NEXT: vzeroupper
650 ; AVX2-FAST-NEXT: retq
654 ; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
655 ; XOP-NEXT: vpermil2ps {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[u,1,5,u],ymm2[6],ymm0[6]
656 ; XOP-NEXT: vmovddup {{.*#+}} xmm3 = xmm1[0,0]
657 ; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm3
658 ; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
659 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
660 ; XOP-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[3,3]
661 ; XOP-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,1]
662 ; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
663 ; XOP-NEXT: vmovaps %xmm0, 32(%rdi)
664 ; XOP-NEXT: vmovaps %ymm2, (%rdi)
665 ; XOP-NEXT: vzeroupper
667 %r = shufflevector <8 x i32> %a, <8 x i32> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
668 store <12 x i32> %r, <12 x i32>* %p
672 define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <12 x i8> *%p) nounwind {
673 ; SSE2-LABEL: pr29025:
675 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255]
676 ; SSE2-NEXT: pand %xmm3, %xmm1
677 ; SSE2-NEXT: pand %xmm3, %xmm0
678 ; SSE2-NEXT: packuswb %xmm1, %xmm0
679 ; SSE2-NEXT: packuswb %xmm0, %xmm0
680 ; SSE2-NEXT: pxor %xmm1, %xmm1
681 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
682 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
683 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
684 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
685 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
686 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
687 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,4]
688 ; SSE2-NEXT: packuswb %xmm1, %xmm0
689 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,255]
690 ; SSE2-NEXT: pand %xmm1, %xmm0
691 ; SSE2-NEXT: pand %xmm3, %xmm2
692 ; SSE2-NEXT: packuswb %xmm2, %xmm2
693 ; SSE2-NEXT: packuswb %xmm2, %xmm2
694 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,1,1,4,5,6,7]
695 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,3]
696 ; SSE2-NEXT: pandn %xmm2, %xmm1
697 ; SSE2-NEXT: por %xmm0, %xmm1
698 ; SSE2-NEXT: movq %xmm1, (%rdi)
699 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
700 ; SSE2-NEXT: movd %xmm0, 8(%rdi)
703 ; SSE42-LABEL: pr29025:
705 ; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
706 ; SSE42-NEXT: pshufb %xmm3, %xmm1
707 ; SSE42-NEXT: pshufb %xmm3, %xmm0
708 ; SSE42-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
709 ; SSE42-NEXT: pshufb %xmm3, %xmm2
710 ; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
711 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
712 ; SSE42-NEXT: pextrd $2, %xmm0, 8(%rdi)
713 ; SSE42-NEXT: movq %xmm0, (%rdi)
716 ; AVX-LABEL: pr29025:
718 ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
719 ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
720 ; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
721 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
722 ; AVX-NEXT: vpshufb %xmm3, %xmm2, %xmm1
723 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
724 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
725 ; AVX-NEXT: vpextrd $2, %xmm0, 8(%rdi)
726 ; AVX-NEXT: vmovq %xmm0, (%rdi)
729 ; XOP-LABEL: pr29025:
731 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4,8,12],xmm1[0,4,8,12],xmm0[u,u,u,u,u,u,u,u]
732 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4],xmm2[0],xmm0[1,5],xmm2[4],xmm0[2,6],xmm2[8],xmm0[3,7],xmm2[12],xmm0[u,u,u,u]
733 ; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
734 ; XOP-NEXT: vmovq %xmm0, (%rdi)
736 %s1 = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
737 %s2 = shufflevector <4 x i8> %c, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
738 %r = shufflevector <8 x i8> %s1, <8 x i8> %s2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
739 store <12 x i8> %r, <12 x i8>* %p, align 1
743 define void @interleave_24i8_out(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 x i8>* %q3) nounwind {
744 ; SSE2-LABEL: interleave_24i8_out:
746 ; SSE2-NEXT: movdqu (%rdi), %xmm0
747 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
748 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,255,255,255,255,255,255,255,255]
749 ; SSE2-NEXT: movdqa %xmm0, %xmm2
750 ; SSE2-NEXT: pand %xmm4, %xmm2
751 ; SSE2-NEXT: pandn %xmm1, %xmm4
752 ; SSE2-NEXT: por %xmm2, %xmm4
753 ; SSE2-NEXT: pxor %xmm2, %xmm2
754 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
755 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
756 ; SSE2-NEXT: pand %xmm5, %xmm4
757 ; SSE2-NEXT: movdqa %xmm0, %xmm3
758 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
759 ; SSE2-NEXT: pandn %xmm3, %xmm5
760 ; SSE2-NEXT: por %xmm4, %xmm5
761 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,1,3]
762 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
763 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
764 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
765 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7]
766 ; SSE2-NEXT: packuswb %xmm0, %xmm4
767 ; SSE2-NEXT: movq %xmm4, (%rsi)
768 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [0,255,255,0,255,255,0,255,255,255,255,255,255,255,255,255]
769 ; SSE2-NEXT: movdqa %xmm0, %xmm5
770 ; SSE2-NEXT: pand %xmm4, %xmm5
771 ; SSE2-NEXT: pandn %xmm1, %xmm4
772 ; SSE2-NEXT: por %xmm5, %xmm4
773 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
774 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535]
775 ; SSE2-NEXT: pand %xmm5, %xmm4
776 ; SSE2-NEXT: pandn %xmm3, %xmm5
777 ; SSE2-NEXT: por %xmm4, %xmm5
778 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[2,1,0,3,4,5,6,7]
779 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7]
780 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
781 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,2,3,0,4,5,6,7]
782 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
783 ; SSE2-NEXT: packuswb %xmm0, %xmm4
784 ; SSE2-NEXT: movq %xmm4, (%rdx)
785 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
786 ; SSE2-NEXT: pand %xmm4, %xmm0
787 ; SSE2-NEXT: pandn %xmm1, %xmm4
788 ; SSE2-NEXT: por %xmm0, %xmm4
789 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
790 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,65535,65535,0,65535,65535,0,65535]
791 ; SSE2-NEXT: pand %xmm0, %xmm4
792 ; SSE2-NEXT: pandn %xmm3, %xmm0
793 ; SSE2-NEXT: por %xmm4, %xmm0
794 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
795 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
796 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
797 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
798 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
799 ; SSE2-NEXT: packuswb %xmm0, %xmm0
800 ; SSE2-NEXT: movq %xmm0, (%rcx)
803 ; SSE42-LABEL: interleave_24i8_out:
805 ; SSE42-NEXT: movdqu (%rdi), %xmm0
806 ; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
807 ; SSE42-NEXT: movdqa %xmm1, %xmm2
808 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm2[2,5,u,u,u,u,u,u,u,u]
809 ; SSE42-NEXT: movdqa %xmm0, %xmm3
810 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[0,3,6,9,12,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
811 ; SSE42-NEXT: por %xmm2, %xmm3
812 ; SSE42-NEXT: movq %xmm3, (%rsi)
813 ; SSE42-NEXT: movdqa %xmm1, %xmm2
814 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,xmm2[0,3,6,u,u,u,u,u,u,u,u]
815 ; SSE42-NEXT: movdqa %xmm0, %xmm3
816 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[1,4,7,10,13],zero,zero,zero,xmm3[u,u,u,u,u,u,u,u]
817 ; SSE42-NEXT: por %xmm2, %xmm3
818 ; SSE42-NEXT: movq %xmm3, (%rdx)
819 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
820 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
821 ; SSE42-NEXT: por %xmm1, %xmm0
822 ; SSE42-NEXT: movq %xmm0, (%rcx)
825 ; AVX-LABEL: interleave_24i8_out:
827 ; AVX-NEXT: vmovdqu (%rdi), %xmm0
828 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
829 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
830 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
831 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
832 ; AVX-NEXT: vmovq %xmm2, (%rsi)
833 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
834 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
835 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
836 ; AVX-NEXT: vmovq %xmm2, (%rdx)
837 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
838 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
839 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
840 ; AVX-NEXT: vmovq %xmm0, (%rcx)
843 ; XOP-LABEL: interleave_24i8_out:
845 ; XOP-NEXT: vmovdqu (%rdi), %xmm0
846 ; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
847 ; XOP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
848 ; XOP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
849 ; XOP-NEXT: vpor %xmm2, %xmm3, %xmm2
850 ; XOP-NEXT: vmovq %xmm2, (%rsi)
851 ; XOP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
852 ; XOP-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
853 ; XOP-NEXT: vpor %xmm2, %xmm3, %xmm2
854 ; XOP-NEXT: vmovq %xmm2, (%rdx)
855 ; XOP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
856 ; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
857 ; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0
858 ; XOP-NEXT: vmovq %xmm0, (%rcx)
860 %wide.vec = load <24 x i8>, <24 x i8>* %p, align 4
861 %s1 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
862 %s2 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
863 %s3 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
864 store <8 x i8> %s1, <8 x i8>* %q1, align 4
865 store <8 x i8> %s2, <8 x i8>* %q2, align 4
866 store <8 x i8> %s3, <8 x i8>* %q3, align 4
870 define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8 x i8>* %q3) nounwind {
871 ; SSE2-LABEL: interleave_24i8_in:
873 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
874 ; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
875 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
876 ; SSE2-NEXT: pxor %xmm3, %xmm3
877 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
878 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
879 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535]
880 ; SSE2-NEXT: pand %xmm5, %xmm4
881 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
882 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[0,1,3,3,4,5,6,7]
883 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
884 ; SSE2-NEXT: pandn %xmm3, %xmm5
885 ; SSE2-NEXT: por %xmm4, %xmm5
886 ; SSE2-NEXT: movdqa %xmm1, %xmm3
887 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
888 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
889 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
890 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
891 ; SSE2-NEXT: packuswb %xmm5, %xmm3
892 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
893 ; SSE2-NEXT: pand %xmm4, %xmm3
894 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
895 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,3,4,5,6,7]
896 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6]
897 ; SSE2-NEXT: pandn %xmm5, %xmm4
898 ; SSE2-NEXT: por %xmm3, %xmm4
899 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
900 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
901 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,1,0,4,5,6,7]
902 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
903 ; SSE2-NEXT: packuswb %xmm0, %xmm1
904 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
905 ; SSE2-NEXT: pand %xmm2, %xmm1
906 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,3,4,5,6,7]
907 ; SSE2-NEXT: pandn %xmm0, %xmm2
908 ; SSE2-NEXT: por %xmm1, %xmm2
909 ; SSE2-NEXT: movq %xmm2, 16(%rdi)
910 ; SSE2-NEXT: movdqu %xmm4, (%rdi)
913 ; SSE42-LABEL: interleave_24i8_in:
915 ; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
916 ; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
917 ; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
918 ; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
919 ; SSE42-NEXT: movdqa %xmm0, %xmm2
920 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,8],zero,xmm2[1,9],zero,xmm2[2,10],zero,xmm2[3,11],zero,xmm2[4,12],zero,xmm2[5]
921 ; SSE42-NEXT: movdqa %xmm1, %xmm3
922 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = zero,zero,xmm3[0],zero,zero,xmm3[1],zero,zero,xmm3[2],zero,zero,xmm3[3],zero,zero,xmm3[4],zero
923 ; SSE42-NEXT: por %xmm2, %xmm3
924 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
925 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
926 ; SSE42-NEXT: por %xmm0, %xmm1
927 ; SSE42-NEXT: movq %xmm1, 16(%rdi)
928 ; SSE42-NEXT: movdqu %xmm3, (%rdi)
931 ; AVX-LABEL: interleave_24i8_in:
933 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
934 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
935 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
936 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
937 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
938 ; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[1],zero,zero,xmm1[2],zero,zero,xmm1[3],zero,zero,xmm1[4],zero
939 ; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
940 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
941 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
942 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
943 ; AVX-NEXT: vmovq %xmm0, 16(%rdi)
944 ; AVX-NEXT: vmovdqu %xmm2, (%rdi)
947 ; XOP-LABEL: interleave_24i8_in:
949 ; XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
950 ; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
951 ; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
952 ; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
953 ; XOP-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
954 ; XOP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[1],zero,zero,xmm1[2],zero,zero,xmm1[3],zero,zero,xmm1[4],zero
955 ; XOP-NEXT: vpor %xmm3, %xmm2, %xmm2
956 ; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
957 ; XOP-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
958 ; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0
959 ; XOP-NEXT: vmovq %xmm0, 16(%rdi)
960 ; XOP-NEXT: vmovdqu %xmm2, (%rdi)
962 %s1 = load <8 x i8>, <8 x i8>* %q1, align 4
963 %s2 = load <8 x i8>, <8 x i8>* %q2, align 4
964 %s3 = load <8 x i8>, <8 x i8>* %q3, align 4
965 %t1 = shufflevector <8 x i8> %s1, <8 x i8> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
966 %t2 = shufflevector <8 x i8> %s3, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
967 %interleaved = shufflevector <16 x i8> %t1, <16 x i8> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
968 store <24 x i8> %interleaved, <24 x i8>* %p, align 4
973 define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
974 ; SSE2-LABEL: interleave_24i16_out:
976 ; SSE2-NEXT: movdqu (%rdi), %xmm3
977 ; SSE2-NEXT: movdqu 16(%rdi), %xmm2
978 ; SSE2-NEXT: movdqu 32(%rdi), %xmm8
979 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,65535,0]
980 ; SSE2-NEXT: movdqa %xmm3, %xmm4
981 ; SSE2-NEXT: pand %xmm1, %xmm4
982 ; SSE2-NEXT: pandn %xmm2, %xmm1
983 ; SSE2-NEXT: por %xmm4, %xmm1
984 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
985 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
986 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
987 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
988 ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,7,6,7]
989 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[0,1,2,1]
990 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,5]
991 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,0],xmm4[2,0]
992 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,0]
993 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
994 ; SSE2-NEXT: movdqa %xmm4, %xmm5
995 ; SSE2-NEXT: pandn %xmm2, %xmm5
996 ; SSE2-NEXT: movdqa %xmm3, %xmm6
997 ; SSE2-NEXT: pand %xmm4, %xmm6
998 ; SSE2-NEXT: por %xmm5, %xmm6
999 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,2,3,4,5,6,7]
1000 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
1001 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
1002 ; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
1003 ; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,7]
1004 ; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
1005 ; SSE2-NEXT: pand %xmm6, %xmm5
1006 ; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm8[0,3,2,3,4,5,6,7]
1007 ; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
1008 ; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
1009 ; SSE2-NEXT: movdqa %xmm6, %xmm0
1010 ; SSE2-NEXT: pandn %xmm7, %xmm0
1011 ; SSE2-NEXT: por %xmm5, %xmm0
1012 ; SSE2-NEXT: pand %xmm4, %xmm2
1013 ; SSE2-NEXT: pandn %xmm3, %xmm4
1014 ; SSE2-NEXT: por %xmm2, %xmm4
1015 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
1016 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
1017 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
1018 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
1019 ; SSE2-NEXT: pand %xmm6, %xmm2
1020 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
1021 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
1022 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
1023 ; SSE2-NEXT: pandn %xmm3, %xmm6
1024 ; SSE2-NEXT: por %xmm2, %xmm6
1025 ; SSE2-NEXT: movups %xmm1, (%rsi)
1026 ; SSE2-NEXT: movdqu %xmm0, (%rdx)
1027 ; SSE2-NEXT: movdqu %xmm6, (%rcx)
1030 ; SSE42-LABEL: interleave_24i16_out:
1032 ; SSE42-NEXT: movdqu (%rdi), %xmm0
1033 ; SSE42-NEXT: movdqu 16(%rdi), %xmm1
1034 ; SSE42-NEXT: movdqu 32(%rdi), %xmm2
1035 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
1036 ; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5]
1037 ; SSE42-NEXT: movdqa %xmm0, %xmm4
1038 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6],xmm1[7]
1039 ; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
1040 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5],xmm3[6,7]
1041 ; SSE42-NEXT: movdqa %xmm2, %xmm3
1042 ; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,4,5,6,7,0,1,0,1,6,7,12,13]
1043 ; SSE42-NEXT: movdqa %xmm0, %xmm5
1044 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm1[2],xmm5[3,4],xmm1[5],xmm5[6,7]
1045 ; SSE42-NEXT: pshufb {{.*#+}} xmm5 = xmm5[2,3,8,9,14,15,4,5,10,11,10,11,8,9,14,15]
1046 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm3[5,6,7]
1047 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7,8,9,2,3,8,9,14,15]
1048 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
1049 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
1050 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
1051 ; SSE42-NEXT: movdqu %xmm4, (%rsi)
1052 ; SSE42-NEXT: movdqu %xmm5, (%rdx)
1053 ; SSE42-NEXT: movdqu %xmm1, (%rcx)
1056 ; AVX1-LABEL: interleave_24i16_out:
1058 ; AVX1-NEXT: vmovdqu (%rdi), %xmm0
1059 ; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
1060 ; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
1061 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
1062 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5]
1063 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
1064 ; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
1065 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,5],xmm3[6,7]
1066 ; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[0,1,6,7,4,5,6,7,0,1,0,1,6,7,12,13]
1067 ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
1068 ; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[2,3,8,9,14,15,4,5,10,11,u,u,u,u,u,u]
1069 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3,4],xmm4[5,6,7]
1070 ; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7,8,9,2,3,8,9,14,15]
1071 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
1072 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
1073 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
1074 ; AVX1-NEXT: vmovdqu %xmm3, (%rsi)
1075 ; AVX1-NEXT: vmovdqu %xmm4, (%rdx)
1076 ; AVX1-NEXT: vmovdqu %xmm0, (%rcx)
1079 ; AVX2-LABEL: interleave_24i16_out:
1081 ; AVX2-NEXT: vmovdqu (%rdi), %ymm0
1082 ; AVX2-NEXT: vmovdqu 32(%rdi), %xmm1
1083 ; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
1084 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
1085 ; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6],xmm3[7]
1086 ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11]
1087 ; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
1088 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
1089 ; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
1090 ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13]
1091 ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
1092 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1093 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
1094 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15]
1095 ; AVX2-NEXT: vmovdqu %xmm2, (%rsi)
1096 ; AVX2-NEXT: vmovdqu %xmm3, (%rdx)
1097 ; AVX2-NEXT: vmovdqu %xmm0, (%rcx)
1098 ; AVX2-NEXT: vzeroupper
1101 ; XOP-LABEL: interleave_24i16_out:
1103 ; XOP-NEXT: vmovdqu (%rdi), %xmm0
1104 ; XOP-NEXT: vmovdqu 16(%rdi), %xmm1
1105 ; XOP-NEXT: vmovdqu 32(%rdi), %xmm2
1106 ; XOP-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
1107 ; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm3[0,1,6,7,12,13,2,3,8,9,14,15],xmm2[4,5,10,11]
1108 ; XOP-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
1109 ; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm4[2,3,8,9,14,15,4,5,10,11],xmm2[0,1,6,7,12,13]
1110 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[4,5,10,11],xmm1[0,1,6,7,12,13,14,15,0,1,2,3]
1111 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,8,9],xmm2[2,3,8,9,14,15]
1112 ; XOP-NEXT: vmovdqu %xmm3, (%rsi)
1113 ; XOP-NEXT: vmovdqu %xmm4, (%rdx)
1114 ; XOP-NEXT: vmovdqu %xmm0, (%rcx)
1116 %wide.vec = load <24 x i16>, <24 x i16>* %p, align 4
1117 %s1 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
1118 %s2 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
1119 %s3 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
1120 store <8 x i16> %s1, <8 x i16>* %q1, align 4
1121 store <8 x i16> %s2, <8 x i16>* %q2, align 4
1122 store <8 x i16> %s3, <8 x i16>* %q3, align 4
1126 define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
1127 ; SSE2-LABEL: interleave_24i16_in:
1129 ; SSE2-NEXT: movdqu (%rsi), %xmm3
1130 ; SSE2-NEXT: movdqu (%rdx), %xmm2
1131 ; SSE2-NEXT: movdqu (%rcx), %xmm1
1132 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,3]
1133 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
1134 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1135 ; SSE2-NEXT: pandn %xmm4, %xmm5
1136 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,3,3,3]
1137 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
1138 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
1139 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
1140 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
1141 ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
1142 ; SSE2-NEXT: pand %xmm0, %xmm3
1143 ; SSE2-NEXT: por %xmm5, %xmm3
1144 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1145 ; SSE2-NEXT: pandn %xmm4, %xmm5
1146 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,3,3,4,5,6,7]
1147 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
1148 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
1149 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7]
1150 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
1151 ; SSE2-NEXT: pand %xmm0, %xmm2
1152 ; SSE2-NEXT: por %xmm5, %xmm2
1153 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
1154 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
1155 ; SSE2-NEXT: pand %xmm5, %xmm1
1156 ; SSE2-NEXT: pandn %xmm6, %xmm5
1157 ; SSE2-NEXT: por %xmm1, %xmm5
1158 ; SSE2-NEXT: pand %xmm0, %xmm5
1159 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,4,6,7]
1160 ; SSE2-NEXT: pandn %xmm1, %xmm0
1161 ; SSE2-NEXT: por %xmm5, %xmm0
1162 ; SSE2-NEXT: movdqu %xmm0, 16(%rdi)
1163 ; SSE2-NEXT: movdqu %xmm2, 32(%rdi)
1164 ; SSE2-NEXT: movdqu %xmm3, (%rdi)
1167 ; SSE42-LABEL: interleave_24i16_in:
1169 ; SSE42-NEXT: movdqu (%rsi), %xmm0
1170 ; SSE42-NEXT: movdqu (%rdx), %xmm1
1171 ; SSE42-NEXT: movdqu (%rcx), %xmm2
1172 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
1173 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,3,3,3]
1174 ; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1175 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
1176 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,0,3]
1177 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm0[0,1],xmm5[2],xmm0[3,4],xmm5[5],xmm0[6,7]
1178 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,2]
1179 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6],xmm3[7]
1180 ; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[0,1,3,3,4,5,6,7]
1181 ; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
1182 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7]
1183 ; SSE42-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
1184 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,6,7,4,5,8,9,10,11,10,11,12,13,14,15]
1185 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
1186 ; SSE42-NEXT: movdqu %xmm4, 32(%rdi)
1187 ; SSE42-NEXT: movdqu %xmm3, 16(%rdi)
1188 ; SSE42-NEXT: movdqu %xmm5, (%rdi)
1191 ; AVX1-LABEL: interleave_24i16_in:
1193 ; AVX1-NEXT: vmovdqu (%rsi), %xmm0
1194 ; AVX1-NEXT: vmovdqu (%rdx), %xmm1
1195 ; AVX1-NEXT: vmovdqu (%rcx), %xmm2
1196 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
1197 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[0,1,3,3,4,5,6,7]
1198 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,6,7]
1199 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
1200 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
1201 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
1202 ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1203 ; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
1204 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,0,3]
1205 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
1206 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
1207 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1208 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
1209 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
1210 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
1211 ; AVX1-NEXT: vmovdqu %xmm0, 32(%rdi)
1212 ; AVX1-NEXT: vmovups %ymm3, (%rdi)
1213 ; AVX1-NEXT: vzeroupper
1216 ; AVX2-LABEL: interleave_24i16_in:
1218 ; AVX2-NEXT: vmovdqu (%rsi), %xmm0
1219 ; AVX2-NEXT: vmovdqu (%rdx), %xmm1
1220 ; AVX2-NEXT: vmovdqu (%rcx), %xmm2
1221 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
1222 ; AVX2-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[0,1,2,3,6,7,2,3,8,9,8,9,4,5,6,7,16,17,18,19,22,23,18,19,24,25,24,25,20,21,22,23]
1223 ; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
1224 ; AVX2-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,1,u,u,u,u,2,3,u,u,u,u,4,5,u,u,22,23,u,u,u,u,24,25,u,u,u,u,26,27]
1225 ; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
1226 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
1227 ; AVX2-NEXT: vpermd %ymm2, %ymm4, %ymm4
1228 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255]
1229 ; AVX2-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
1230 ; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1231 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
1232 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
1233 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
1234 ; AVX2-NEXT: vmovdqu %xmm0, 32(%rdi)
1235 ; AVX2-NEXT: vmovdqu %ymm3, (%rdi)
1236 ; AVX2-NEXT: vzeroupper
1239 ; XOP-LABEL: interleave_24i16_in:
1241 ; XOP-NEXT: vmovdqu (%rsi), %xmm0
1242 ; XOP-NEXT: vmovdqu (%rdx), %xmm1
1243 ; XOP-NEXT: vmovdqu (%rcx), %xmm2
1244 ; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm0[4,5,6,7],xmm1[6,7],xmm0[6,7,8,9],xmm1[8,9],xmm0[8,9,10,11]
1245 ; XOP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
1246 ; XOP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
1247 ; XOP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1248 ; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm2[0,1],xmm4[4,5,6,7],xmm2[2,3],xmm4[8,9,10,11]
1249 ; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
1250 ; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[10,11],xmm0[12,13,12,13],xmm1[12,13,12,13],xmm0[14,15],xmm1[14,15],xmm0[14,15]
1251 ; XOP-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
1252 ; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
1253 ; XOP-NEXT: vmovdqu %xmm0, 32(%rdi)
1254 ; XOP-NEXT: vmovups %ymm3, (%rdi)
1255 ; XOP-NEXT: vzeroupper
1257 %s1 = load <8 x i16>, <8 x i16>* %q1, align 4
1258 %s2 = load <8 x i16>, <8 x i16>* %q2, align 4
1259 %s3 = load <8 x i16>, <8 x i16>* %q3, align 4
1260 %t1 = shufflevector <8 x i16> %s1, <8 x i16> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1261 %t2 = shufflevector <8 x i16> %s3, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1262 %interleaved = shufflevector <16 x i16> %t1, <16 x i16> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
1263 store <24 x i16> %interleaved, <24 x i16>* %p, align 4
1267 define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind {
1268 ; SSE2-LABEL: interleave_24i32_out:
1270 ; SSE2-NEXT: movdqu 64(%rdi), %xmm10
1271 ; SSE2-NEXT: movups 80(%rdi), %xmm8
1272 ; SSE2-NEXT: movdqu (%rdi), %xmm0
1273 ; SSE2-NEXT: movdqu 16(%rdi), %xmm11
1274 ; SSE2-NEXT: movups 32(%rdi), %xmm5
1275 ; SSE2-NEXT: movdqu 48(%rdi), %xmm9
1276 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,0,1]
1277 ; SSE2-NEXT: movaps %xmm5, %xmm7
1278 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,1,2,3]
1279 ; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
1280 ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,3]
1281 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm11[2,0]
1282 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1283 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm5[2,0]
1284 ; SSE2-NEXT: movaps %xmm8, %xmm5
1285 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm10[1,1,2,3]
1286 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm9[2,3,0,1]
1287 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
1288 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm8[0,3]
1289 ; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm10[2,0]
1290 ; SSE2-NEXT: movdqa %xmm9, %xmm2
1291 ; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,3],xmm8[2,0]
1292 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm10[0,0]
1293 ; SSE2-NEXT: movaps %xmm2, %xmm4
1294 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm10[3,3]
1295 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm4[2,0]
1296 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
1297 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm11[0,0]
1298 ; SSE2-NEXT: movaps %xmm0, %xmm4
1299 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm11[3,3]
1300 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm4[2,0]
1301 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm7[2,0]
1302 ; SSE2-NEXT: movups %xmm9, 16(%rsi)
1303 ; SSE2-NEXT: movups %xmm3, (%rsi)
1304 ; SSE2-NEXT: movups %xmm2, 16(%rdx)
1305 ; SSE2-NEXT: movups %xmm0, (%rdx)
1306 ; SSE2-NEXT: movups %xmm1, 16(%rcx)
1307 ; SSE2-NEXT: movups %xmm6, (%rcx)
1310 ; SSE42-LABEL: interleave_24i32_out:
1312 ; SSE42-NEXT: movdqu 80(%rdi), %xmm9
1313 ; SSE42-NEXT: movdqu 64(%rdi), %xmm10
1314 ; SSE42-NEXT: movdqu (%rdi), %xmm4
1315 ; SSE42-NEXT: movdqu 16(%rdi), %xmm2
1316 ; SSE42-NEXT: movdqu 32(%rdi), %xmm11
1317 ; SSE42-NEXT: movdqu 48(%rdi), %xmm5
1318 ; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm11[0,1,0,1]
1319 ; SSE42-NEXT: movdqa %xmm2, %xmm7
1320 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm4[2,3],xmm7[4,5,6,7]
1321 ; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
1322 ; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm2[2,3]
1323 ; SSE42-NEXT: blendps {{.*#+}} xmm4 = xmm4[0,1,2],xmm8[3]
1324 ; SSE42-NEXT: movdqa %xmm10, %xmm1
1325 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3],xmm1[4,5,6,7]
1326 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,0,1]
1327 ; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm10[2,3]
1328 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,1,0,1]
1329 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3,4,5],xmm3[6,7]
1330 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm11[0,1,2,2]
1331 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,0,3,3]
1332 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5],xmm5[6,7]
1333 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,3]
1334 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[0,1,2,2]
1335 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm1[0,1,2,3,4,5],xmm5[6,7]
1336 ; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm2[2,3],xmm6[4,5,6,7]
1337 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,3]
1338 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4,5,6,7]
1339 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm10[2,3],xmm0[4,5,6,7]
1340 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,1,0,3]
1341 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
1342 ; SSE42-NEXT: movdqu %xmm3, 16(%rsi)
1343 ; SSE42-NEXT: movups %xmm4, (%rsi)
1344 ; SSE42-NEXT: movdqu %xmm5, 16(%rdx)
1345 ; SSE42-NEXT: movdqu %xmm7, (%rdx)
1346 ; SSE42-NEXT: movdqu %xmm2, 16(%rcx)
1347 ; SSE42-NEXT: movdqu %xmm1, (%rcx)
1350 ; AVX1-LABEL: interleave_24i32_out:
1352 ; AVX1-NEXT: vmovups (%rdi), %ymm0
1353 ; AVX1-NEXT: vmovups 32(%rdi), %ymm1
1354 ; AVX1-NEXT: vmovups 80(%rdi), %xmm2
1355 ; AVX1-NEXT: vmovups 64(%rdi), %xmm3
1356 ; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[1]
1357 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
1358 ; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1359 ; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
1360 ; AVX1-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3]
1361 ; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,3,2,1]
1362 ; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,3,2,3]
1363 ; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
1364 ; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
1365 ; AVX1-NEXT: vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
1366 ; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,0,3,2]
1367 ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
1368 ; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1369 ; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
1370 ; AVX1-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
1371 ; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
1372 ; AVX1-NEXT: vmovshdup {{.*#+}} xmm7 = xmm7[1,1,3,3]
1373 ; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
1374 ; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1375 ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0,3]
1376 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
1377 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1378 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1379 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
1380 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
1381 ; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1382 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1383 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
1384 ; AVX1-NEXT: vmovups %ymm4, (%rsi)
1385 ; AVX1-NEXT: vmovups %ymm5, (%rdx)
1386 ; AVX1-NEXT: vmovups %ymm0, (%rcx)
1387 ; AVX1-NEXT: vzeroupper
1390 ; AVX2-SLOW-LABEL: interleave_24i32_out:
1391 ; AVX2-SLOW: # %bb.0:
1392 ; AVX2-SLOW-NEXT: vmovups (%rdi), %ymm0
1393 ; AVX2-SLOW-NEXT: vmovups 32(%rdi), %ymm1
1394 ; AVX2-SLOW-NEXT: vmovups 64(%rdi), %ymm2
1395 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm3 = <u,u,u,u,u,u,2,5>
1396 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm3, %ymm3
1397 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1398 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm5 = <0,3,6,1,4,7,u,u>
1399 ; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm5, %ymm4
1400 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
1401 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm4 = <u,u,u,u,u,0,3,6>
1402 ; AVX2-SLOW-NEXT: vpermps %ymm2, %ymm4, %ymm4
1403 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1404 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm6 = <1,4,7,2,5,u,u,u>
1405 ; AVX2-SLOW-NEXT: vpermps %ymm5, %ymm6, %ymm5
1406 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1407 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1408 ; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
1409 ; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm0
1410 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm2[0,1,0,3,4,5,4,7]
1411 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
1412 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
1413 ; AVX2-SLOW-NEXT: vmovups %ymm3, (%rsi)
1414 ; AVX2-SLOW-NEXT: vmovups %ymm4, (%rdx)
1415 ; AVX2-SLOW-NEXT: vmovups %ymm0, (%rcx)
1416 ; AVX2-SLOW-NEXT: vzeroupper
1417 ; AVX2-SLOW-NEXT: retq
1419 ; AVX2-FAST-LABEL: interleave_24i32_out:
1420 ; AVX2-FAST: # %bb.0:
1421 ; AVX2-FAST-NEXT: vmovups (%rdi), %ymm0
1422 ; AVX2-FAST-NEXT: vmovups 32(%rdi), %ymm1
1423 ; AVX2-FAST-NEXT: vmovups 64(%rdi), %ymm2
1424 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = <u,u,u,u,u,u,2,5>
1425 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm3, %ymm3
1426 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1427 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm5 = <0,3,6,1,4,7,u,u>
1428 ; AVX2-FAST-NEXT: vpermps %ymm4, %ymm5, %ymm4
1429 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
1430 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = <u,u,u,u,u,0,3,6>
1431 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm4
1432 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1433 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm6 = <1,4,7,2,5,u,u,u>
1434 ; AVX2-FAST-NEXT: vpermps %ymm5, %ymm6, %ymm5
1435 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7]
1436 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm5 = [0,1,0,3,0,1,4,7]
1437 ; AVX2-FAST-NEXT: vpermps %ymm2, %ymm5, %ymm2
1438 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1439 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
1440 ; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
1441 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
1442 ; AVX2-FAST-NEXT: vmovups %ymm3, (%rsi)
1443 ; AVX2-FAST-NEXT: vmovups %ymm4, (%rdx)
1444 ; AVX2-FAST-NEXT: vmovups %ymm0, (%rcx)
1445 ; AVX2-FAST-NEXT: vzeroupper
1446 ; AVX2-FAST-NEXT: retq
1448 ; XOP-LABEL: interleave_24i32_out:
1450 ; XOP-NEXT: vmovups (%rdi), %ymm0
1451 ; XOP-NEXT: vmovups 32(%rdi), %ymm1
1452 ; XOP-NEXT: vmovups 80(%rdi), %xmm2
1453 ; XOP-NEXT: vmovups 64(%rdi), %xmm3
1454 ; XOP-NEXT: vinsertps {{.*#+}} xmm4 = zero,zero,xmm3[2],xmm2[1]
1455 ; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
1456 ; XOP-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
1457 ; XOP-NEXT: vextractf128 $1, %ymm5, %xmm6
1458 ; XOP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3]
1459 ; XOP-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,3,2,1]
1460 ; XOP-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,3,2,3]
1461 ; XOP-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
1462 ; XOP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
1463 ; XOP-NEXT: vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
1464 ; XOP-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,0,3,2]
1465 ; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
1466 ; XOP-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1467 ; XOP-NEXT: vextractf128 $1, %ymm6, %xmm7
1468 ; XOP-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3]
1469 ; XOP-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[1,0,3,2]
1470 ; XOP-NEXT: vmovshdup {{.*#+}} xmm7 = xmm7[1,1,3,3]
1471 ; XOP-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
1472 ; XOP-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
1473 ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0,3]
1474 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
1475 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
1476 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
1477 ; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
1478 ; XOP-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
1479 ; XOP-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
1480 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1481 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
1482 ; XOP-NEXT: vmovups %ymm4, (%rsi)
1483 ; XOP-NEXT: vmovups %ymm5, (%rdx)
1484 ; XOP-NEXT: vmovups %ymm0, (%rcx)
1485 ; XOP-NEXT: vzeroupper
1487 %wide.vec = load <24 x i32>, <24 x i32>* %p, align 4
1488 %s1 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
1489 %s2 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
1490 %s3 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
1491 store <8 x i32> %s1, <8 x i32>* %q1, align 4
1492 store <8 x i32> %s2, <8 x i32>* %q2, align 4
1493 store <8 x i32> %s3, <8 x i32>* %q3, align 4
1497 define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind {
1498 ; SSE2-LABEL: interleave_24i32_in:
1500 ; SSE2-NEXT: movups (%rsi), %xmm5
1501 ; SSE2-NEXT: movups 16(%rsi), %xmm8
1502 ; SSE2-NEXT: movups (%rdx), %xmm6
1503 ; SSE2-NEXT: movups 16(%rdx), %xmm3
1504 ; SSE2-NEXT: movups (%rcx), %xmm0
1505 ; SSE2-NEXT: movups 16(%rcx), %xmm4
1506 ; SSE2-NEXT: movaps %xmm0, %xmm7
1507 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm5[1,0]
1508 ; SSE2-NEXT: movaps %xmm5, %xmm1
1509 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
1510 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[0,2]
1511 ; SSE2-NEXT: movaps %xmm5, %xmm7
1512 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm6[2,1]
1513 ; SSE2-NEXT: movaps %xmm0, %xmm2
1514 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm6[1,0]
1515 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[0,2]
1516 ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,2],xmm0[3,2]
1517 ; SSE2-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
1518 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[0,2]
1519 ; SSE2-NEXT: movaps %xmm4, %xmm5
1520 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm8[1,0]
1521 ; SSE2-NEXT: movaps %xmm8, %xmm6
1522 ; SSE2-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
1523 ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,2]
1524 ; SSE2-NEXT: movaps %xmm8, %xmm5
1525 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[2,1]
1526 ; SSE2-NEXT: movaps %xmm4, %xmm7
1527 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm3[1,0]
1528 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[0,2]
1529 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,2],xmm4[3,2]
1530 ; SSE2-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3]
1531 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm3[0,2]
1532 ; SSE2-NEXT: movups %xmm4, 80(%rdi)
1533 ; SSE2-NEXT: movups %xmm7, 64(%rdi)
1534 ; SSE2-NEXT: movups %xmm6, 48(%rdi)
1535 ; SSE2-NEXT: movups %xmm0, 32(%rdi)
1536 ; SSE2-NEXT: movups %xmm2, 16(%rdi)
1537 ; SSE2-NEXT: movups %xmm1, (%rdi)
1540 ; SSE42-LABEL: interleave_24i32_in:
1542 ; SSE42-NEXT: movdqu (%rsi), %xmm5
1543 ; SSE42-NEXT: movdqu 16(%rsi), %xmm2
1544 ; SSE42-NEXT: movdqu (%rdx), %xmm6
1545 ; SSE42-NEXT: movdqu 16(%rdx), %xmm1
1546 ; SSE42-NEXT: movdqu (%rcx), %xmm7
1547 ; SSE42-NEXT: movdqu 16(%rcx), %xmm4
1548 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,1,1]
1549 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,1,0,1]
1550 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5,6,7]
1551 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,0,1]
1552 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5],xmm3[6,7]
1553 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,2,2]
1554 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,3],xmm3[4,5,6,7]
1555 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5],xmm3[6,7]
1556 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1]
1557 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
1558 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm5[2,3],xmm7[4,5,6,7]
1559 ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,2,3,3]
1560 ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5],xmm7[6,7]
1561 ; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,1,1]
1562 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,1,0,1]
1563 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6,7]
1564 ; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,1,0,1]
1565 ; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4,5],xmm7[6,7]
1566 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,2,2]
1567 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm4[2,3],xmm7[4,5,6,7]
1568 ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm2[4,5],xmm7[6,7]
1569 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
1570 ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
1571 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7]
1572 ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
1573 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5],xmm4[6,7]
1574 ; SSE42-NEXT: movdqu %xmm1, 80(%rdi)
1575 ; SSE42-NEXT: movdqu %xmm7, 64(%rdi)
1576 ; SSE42-NEXT: movdqu %xmm6, 48(%rdi)
1577 ; SSE42-NEXT: movdqu %xmm5, 32(%rdi)
1578 ; SSE42-NEXT: movdqu %xmm3, 16(%rdi)
1579 ; SSE42-NEXT: movdqu %xmm0, (%rdi)
1582 ; AVX1-LABEL: interleave_24i32_in:
1584 ; AVX1-NEXT: vmovupd (%rsi), %ymm0
1585 ; AVX1-NEXT: vmovupd (%rcx), %ymm1
1586 ; AVX1-NEXT: vmovups 16(%rcx), %xmm2
1587 ; AVX1-NEXT: vmovups (%rdx), %xmm3
1588 ; AVX1-NEXT: vmovups 16(%rdx), %xmm4
1589 ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,0],xmm2[3,0]
1590 ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm2[2,1],xmm5[0,2]
1591 ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm4[1,0]
1592 ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,2]
1593 ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
1594 ; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = ymm0[1,1,3,3]
1595 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
1596 ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
1597 ; AVX1-NEXT: vmovups (%rsi), %xmm4
1598 ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,0],xmm3[2,0]
1599 ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm3[1,1],xmm5[0,2]
1600 ; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,0],xmm4[0,0]
1601 ; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1]
1602 ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3
1603 ; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = xmm1[0,0]
1604 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
1605 ; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1606 ; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2]
1607 ; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
1608 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
1609 ; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
1610 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1611 ; AVX1-NEXT: vmovups %ymm0, 32(%rdi)
1612 ; AVX1-NEXT: vmovups %ymm3, (%rdi)
1613 ; AVX1-NEXT: vmovups %ymm2, 64(%rdi)
1614 ; AVX1-NEXT: vzeroupper
1617 ; AVX2-SLOW-LABEL: interleave_24i32_in:
1618 ; AVX2-SLOW: # %bb.0:
1619 ; AVX2-SLOW-NEXT: vmovups (%rsi), %ymm0
1620 ; AVX2-SLOW-NEXT: vmovups (%rdx), %ymm1
1621 ; AVX2-SLOW-NEXT: vmovups (%rcx), %ymm2
1622 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm2[2,1,3,3]
1623 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
1624 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
1625 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
1626 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,3,3,3]
1627 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1628 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
1629 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
1630 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
1631 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
1632 ; AVX2-SLOW-NEXT: vbroadcastsd (%rcx), %ymm5
1633 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1634 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
1635 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
1636 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
1637 ; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
1638 ; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1639 ; AVX2-SLOW-NEXT: vmovups %ymm0, 32(%rdi)
1640 ; AVX2-SLOW-NEXT: vmovups %ymm4, (%rdi)
1641 ; AVX2-SLOW-NEXT: vmovups %ymm3, 64(%rdi)
1642 ; AVX2-SLOW-NEXT: vzeroupper
1643 ; AVX2-SLOW-NEXT: retq
1645 ; AVX2-FAST-LABEL: interleave_24i32_in:
1646 ; AVX2-FAST: # %bb.0:
1647 ; AVX2-FAST-NEXT: vmovups (%rsi), %ymm0
1648 ; AVX2-FAST-NEXT: vmovups (%rdx), %ymm1
1649 ; AVX2-FAST-NEXT: vmovups (%rcx), %ymm2
1650 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [5,6,5,6,5,6,7,7]
1651 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm3, %ymm3
1652 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
1653 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
1654 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,3,3,3]
1655 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1656 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2]
1657 ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm4
1658 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
1659 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
1660 ; AVX2-FAST-NEXT: vbroadcastsd (%rcx), %ymm5
1661 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
1662 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
1663 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
1664 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
1665 ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
1666 ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1667 ; AVX2-FAST-NEXT: vmovups %ymm0, 32(%rdi)
1668 ; AVX2-FAST-NEXT: vmovups %ymm4, (%rdi)
1669 ; AVX2-FAST-NEXT: vmovups %ymm3, 64(%rdi)
1670 ; AVX2-FAST-NEXT: vzeroupper
1671 ; AVX2-FAST-NEXT: retq
1673 ; XOP-LABEL: interleave_24i32_in:
1675 ; XOP-NEXT: vmovupd (%rsi), %ymm0
1676 ; XOP-NEXT: vmovups (%rcx), %ymm1
1677 ; XOP-NEXT: vmovups 16(%rcx), %xmm2
1678 ; XOP-NEXT: vmovups (%rdx), %xmm3
1679 ; XOP-NEXT: vmovups 16(%rdx), %xmm4
1680 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,0],xmm2[3,0]
1681 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm2[2,1],xmm5[0,2]
1682 ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm4[1,0]
1683 ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,2]
1684 ; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
1685 ; XOP-NEXT: vpermilpd {{.*#+}} ymm4 = ymm0[1,1,3,3]
1686 ; XOP-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
1687 ; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
1688 ; XOP-NEXT: vmovups (%rsi), %xmm4
1689 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,0],xmm3[2,0]
1690 ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm3[1,1],xmm5[0,2]
1691 ; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,0],xmm4[0,0]
1692 ; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1]
1693 ; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3
1694 ; XOP-NEXT: vmovddup {{.*#+}} xmm4 = xmm1[0,0]
1695 ; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4
1696 ; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
1697 ; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm1[2],ymm0[3],ymm1[2,3],ymm0[4],ymm1[5,4],ymm0[5]
1698 ; XOP-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7]
1699 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
1700 ; XOP-NEXT: vmovups %ymm0, 32(%rdi)
1701 ; XOP-NEXT: vmovups %ymm3, (%rdi)
1702 ; XOP-NEXT: vmovups %ymm2, 64(%rdi)
1703 ; XOP-NEXT: vzeroupper
1705 %s1 = load <8 x i32>, <8 x i32>* %q1, align 4
1706 %s2 = load <8 x i32>, <8 x i32>* %q2, align 4
1707 %s3 = load <8 x i32>, <8 x i32>* %q3, align 4
1708 %t1 = shufflevector <8 x i32> %s1, <8 x i32> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1709 %t2 = shufflevector <8 x i32> %s3, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1710 %interleaved = shufflevector <16 x i32> %t1, <16 x i32> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
1711 store <24 x i32> %interleaved, <24 x i32>* %p, align 4
1715 define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
1716 ; SSE2-LABEL: wrongorder:
1718 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
1719 ; SSE2-NEXT: movaps %xmm0, 48(%rdi)
1720 ; SSE2-NEXT: movaps %xmm0, 32(%rdi)
1721 ; SSE2-NEXT: movaps %xmm0, 16(%rdi)
1722 ; SSE2-NEXT: movaps %xmm0, (%rdi)
1725 ; SSE42-LABEL: wrongorder:
1727 ; SSE42-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
1728 ; SSE42-NEXT: movapd %xmm0, 48(%rdi)
1729 ; SSE42-NEXT: movapd %xmm0, 32(%rdi)
1730 ; SSE42-NEXT: movapd %xmm0, 16(%rdi)
1731 ; SSE42-NEXT: movapd %xmm0, (%rdi)
1734 ; AVX1-LABEL: wrongorder:
1736 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
1737 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
1738 ; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
1739 ; AVX1-NEXT: vmovaps %ymm1, (%rdi)
1740 ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1741 ; AVX1-NEXT: vzeroupper
1744 ; AVX2-LABEL: wrongorder:
1746 ; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
1747 ; AVX2-NEXT: vmovaps %ymm0, 32(%rdi)
1748 ; AVX2-NEXT: vmovaps %ymm0, (%rdi)
1749 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1750 ; AVX2-NEXT: vzeroupper
1753 ; XOP-LABEL: wrongorder:
1755 ; XOP-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
1756 ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
1757 ; XOP-NEXT: vmovaps %ymm1, 32(%rdi)
1758 ; XOP-NEXT: vmovaps %ymm1, (%rdi)
1759 ; XOP-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1760 ; XOP-NEXT: vzeroupper
1762 %shuffle = shufflevector <4 x double> %A, <4 x double> %A, <8 x i32> zeroinitializer
1763 store <8 x double> %shuffle, <8 x double>* %P, align 64
1764 %m2 = load <8 x double>, <8 x double>* %P, align 64
1765 store <8 x double> %m2, <8 x double>* %P, align 64
1766 %m3 = load <8 x double>, <8 x double>* %P, align 64
1767 %m4 = shufflevector <8 x double> %m3, <8 x double> undef, <2 x i32> <i32 2, i32 0>
1768 ret <2 x double> %m4