1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX2-SLOW
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX2-FAST
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
14 ; Pairs of shufflevector:trunc functions with functional equivalence.
15 ; Ideally, the shuffles should be lowered to code with the same quality as the truncates.
17 define void @shuffle_v16i8_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
18 ; SSE2-LABEL: shuffle_v16i8_to_v8i8:
20 ; SSE2-NEXT: movdqa (%rdi), %xmm0
21 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
22 ; SSE2-NEXT: packuswb %xmm0, %xmm0
23 ; SSE2-NEXT: movq %xmm0, (%rsi)
26 ; SSE42-LABEL: shuffle_v16i8_to_v8i8:
28 ; SSE42-NEXT: movdqa (%rdi), %xmm0
29 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
30 ; SSE42-NEXT: movq %xmm0, (%rsi)
33 ; AVX-LABEL: shuffle_v16i8_to_v8i8:
35 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
36 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
37 ; AVX-NEXT: vmovq %xmm0, (%rsi)
40 ; AVX512-LABEL: shuffle_v16i8_to_v8i8:
42 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
43 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
44 ; AVX512-NEXT: vmovq %xmm0, (%rsi)
46 %vec = load <16 x i8>, <16 x i8>* %L
47 %strided.vec = shufflevector <16 x i8> %vec, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
48 store <8 x i8> %strided.vec, <8 x i8>* %S
52 define void @trunc_v8i16_to_v8i8(<16 x i8>* %L, <8 x i8>* %S) nounwind {
53 ; SSE2-LABEL: trunc_v8i16_to_v8i8:
55 ; SSE2-NEXT: movdqa (%rdi), %xmm0
56 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
57 ; SSE2-NEXT: packuswb %xmm0, %xmm0
58 ; SSE2-NEXT: movq %xmm0, (%rsi)
61 ; SSE42-LABEL: trunc_v8i16_to_v8i8:
63 ; SSE42-NEXT: movdqa (%rdi), %xmm0
64 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
65 ; SSE42-NEXT: movq %xmm0, (%rsi)
68 ; AVX-LABEL: trunc_v8i16_to_v8i8:
70 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
71 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
72 ; AVX-NEXT: vmovq %xmm0, (%rsi)
75 ; AVX512F-LABEL: trunc_v8i16_to_v8i8:
77 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
78 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
79 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
82 ; AVX512VL-LABEL: trunc_v8i16_to_v8i8:
84 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
85 ; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
86 ; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
89 ; AVX512BW-LABEL: trunc_v8i16_to_v8i8:
91 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
92 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
93 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
96 ; AVX512BWVL-LABEL: trunc_v8i16_to_v8i8:
97 ; AVX512BWVL: # %bb.0:
98 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
99 ; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
100 ; AVX512BWVL-NEXT: retq
101 %vec = load <16 x i8>, <16 x i8>* %L
102 %bc = bitcast <16 x i8> %vec to <8 x i16>
103 %strided.vec = trunc <8 x i16> %bc to <8 x i8>
104 store <8 x i8> %strided.vec, <8 x i8>* %S
108 define void @shuffle_v8i16_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
109 ; SSE2-LABEL: shuffle_v8i16_to_v4i16:
111 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
112 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
113 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
114 ; SSE2-NEXT: movq %xmm0, (%rsi)
117 ; SSE42-LABEL: shuffle_v8i16_to_v4i16:
119 ; SSE42-NEXT: movdqa (%rdi), %xmm0
120 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
121 ; SSE42-NEXT: movq %xmm0, (%rsi)
124 ; AVX-LABEL: shuffle_v8i16_to_v4i16:
126 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
127 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
128 ; AVX-NEXT: vmovq %xmm0, (%rsi)
131 ; AVX512-LABEL: shuffle_v8i16_to_v4i16:
133 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
134 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
135 ; AVX512-NEXT: vmovq %xmm0, (%rsi)
137 %vec = load <8 x i16>, <8 x i16>* %L
138 %strided.vec = shufflevector <8 x i16> %vec, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
139 store <4 x i16> %strided.vec, <4 x i16>* %S
143 define void @trunc_v4i32_to_v4i16(<8 x i16>* %L, <4 x i16>* %S) nounwind {
144 ; SSE2-LABEL: trunc_v4i32_to_v4i16:
146 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
147 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
148 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
149 ; SSE2-NEXT: movq %xmm0, (%rsi)
152 ; SSE42-LABEL: trunc_v4i32_to_v4i16:
154 ; SSE42-NEXT: movdqa (%rdi), %xmm0
155 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
156 ; SSE42-NEXT: movq %xmm0, (%rsi)
159 ; AVX-LABEL: trunc_v4i32_to_v4i16:
161 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
162 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
163 ; AVX-NEXT: vmovq %xmm0, (%rsi)
166 ; AVX512F-LABEL: trunc_v4i32_to_v4i16:
168 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
169 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
170 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
173 ; AVX512VL-LABEL: trunc_v4i32_to_v4i16:
175 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
176 ; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
177 ; AVX512VL-NEXT: retq
179 ; AVX512BW-LABEL: trunc_v4i32_to_v4i16:
181 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
182 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
183 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
184 ; AVX512BW-NEXT: retq
186 ; AVX512BWVL-LABEL: trunc_v4i32_to_v4i16:
187 ; AVX512BWVL: # %bb.0:
188 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
189 ; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
190 ; AVX512BWVL-NEXT: retq
191 %vec = load <8 x i16>, <8 x i16>* %L
192 %bc = bitcast <8 x i16> %vec to <4 x i32>
193 %strided.vec = trunc <4 x i32> %bc to <4 x i16>
194 store <4 x i16> %strided.vec, <4 x i16>* %S
198 define void @shuffle_v4i32_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
199 ; SSE-LABEL: shuffle_v4i32_to_v2i32:
201 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
202 ; SSE-NEXT: movq %xmm0, (%rsi)
205 ; AVX-LABEL: shuffle_v4i32_to_v2i32:
207 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
208 ; AVX-NEXT: vmovlps %xmm0, (%rsi)
211 ; AVX512-LABEL: shuffle_v4i32_to_v2i32:
213 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
214 ; AVX512-NEXT: vmovlps %xmm0, (%rsi)
216 %vec = load <4 x i32>, <4 x i32>* %L
217 %strided.vec = shufflevector <4 x i32> %vec, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
218 store <2 x i32> %strided.vec, <2 x i32>* %S
222 define void @trunc_v2i64_to_v2i32(<4 x i32>* %L, <2 x i32>* %S) nounwind {
223 ; SSE-LABEL: trunc_v2i64_to_v2i32:
225 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
226 ; SSE-NEXT: movq %xmm0, (%rsi)
229 ; AVX-LABEL: trunc_v2i64_to_v2i32:
231 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
232 ; AVX-NEXT: vmovlps %xmm0, (%rsi)
235 ; AVX512F-LABEL: trunc_v2i64_to_v2i32:
237 ; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
238 ; AVX512F-NEXT: vmovlps %xmm0, (%rsi)
241 ; AVX512VL-LABEL: trunc_v2i64_to_v2i32:
243 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
244 ; AVX512VL-NEXT: vpmovqd %xmm0, (%rsi)
245 ; AVX512VL-NEXT: retq
247 ; AVX512BW-LABEL: trunc_v2i64_to_v2i32:
249 ; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
250 ; AVX512BW-NEXT: vmovlps %xmm0, (%rsi)
251 ; AVX512BW-NEXT: retq
253 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i32:
254 ; AVX512BWVL: # %bb.0:
255 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
256 ; AVX512BWVL-NEXT: vpmovqd %xmm0, (%rsi)
257 ; AVX512BWVL-NEXT: retq
258 %vec = load <4 x i32>, <4 x i32>* %L
259 %bc = bitcast <4 x i32> %vec to <2 x i64>
260 %strided.vec = trunc <2 x i64> %bc to <2 x i32>
261 store <2 x i32> %strided.vec, <2 x i32>* %S
265 define void @shuffle_v16i8_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
266 ; SSE2-LABEL: shuffle_v16i8_to_v4i8:
268 ; SSE2-NEXT: movdqa (%rdi), %xmm0
269 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
270 ; SSE2-NEXT: packuswb %xmm0, %xmm0
271 ; SSE2-NEXT: packuswb %xmm0, %xmm0
272 ; SSE2-NEXT: movd %xmm0, (%rsi)
275 ; SSE42-LABEL: shuffle_v16i8_to_v4i8:
277 ; SSE42-NEXT: movdqa (%rdi), %xmm0
278 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
279 ; SSE42-NEXT: movd %xmm0, (%rsi)
282 ; AVX-LABEL: shuffle_v16i8_to_v4i8:
284 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
285 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
286 ; AVX-NEXT: vmovd %xmm0, (%rsi)
289 ; AVX512-LABEL: shuffle_v16i8_to_v4i8:
291 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
292 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
293 ; AVX512-NEXT: vmovd %xmm0, (%rsi)
295 %vec = load <16 x i8>, <16 x i8>* %L
296 %strided.vec = shufflevector <16 x i8> %vec, <16 x i8> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
297 store <4 x i8> %strided.vec, <4 x i8>* %S
301 define void @trunc_v4i32_to_v4i8(<16 x i8>* %L, <4 x i8>* %S) nounwind {
302 ; SSE2-LABEL: trunc_v4i32_to_v4i8:
304 ; SSE2-NEXT: movdqa (%rdi), %xmm0
305 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
306 ; SSE2-NEXT: packuswb %xmm0, %xmm0
307 ; SSE2-NEXT: packuswb %xmm0, %xmm0
308 ; SSE2-NEXT: movd %xmm0, (%rsi)
311 ; SSE42-LABEL: trunc_v4i32_to_v4i8:
313 ; SSE42-NEXT: movdqa (%rdi), %xmm0
314 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
315 ; SSE42-NEXT: movd %xmm0, (%rsi)
318 ; AVX-LABEL: trunc_v4i32_to_v4i8:
320 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
321 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
322 ; AVX-NEXT: vmovd %xmm0, (%rsi)
325 ; AVX512F-LABEL: trunc_v4i32_to_v4i8:
327 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
328 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
329 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
332 ; AVX512VL-LABEL: trunc_v4i32_to_v4i8:
334 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
335 ; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
336 ; AVX512VL-NEXT: retq
338 ; AVX512BW-LABEL: trunc_v4i32_to_v4i8:
340 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
341 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
342 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
343 ; AVX512BW-NEXT: retq
345 ; AVX512BWVL-LABEL: trunc_v4i32_to_v4i8:
346 ; AVX512BWVL: # %bb.0:
347 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
348 ; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
349 ; AVX512BWVL-NEXT: retq
350 %vec = load <16 x i8>, <16 x i8>* %L
351 %bc = bitcast <16 x i8> %vec to <4 x i32>
352 %strided.vec = trunc <4 x i32> %bc to <4 x i8>
353 store <4 x i8> %strided.vec, <4 x i8>* %S
357 define void @shuffle_v8i16_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
358 ; SSE-LABEL: shuffle_v8i16_to_v2i16:
360 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
361 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
362 ; SSE-NEXT: movd %xmm0, (%rsi)
365 ; AVX1-LABEL: shuffle_v8i16_to_v2i16:
367 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
368 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
369 ; AVX1-NEXT: vmovd %xmm0, (%rsi)
372 ; AVX2-SLOW-LABEL: shuffle_v8i16_to_v2i16:
373 ; AVX2-SLOW: # %bb.0:
374 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
375 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
376 ; AVX2-SLOW-NEXT: vmovd %xmm0, (%rsi)
377 ; AVX2-SLOW-NEXT: retq
379 ; AVX2-FAST-LABEL: shuffle_v8i16_to_v2i16:
380 ; AVX2-FAST: # %bb.0:
381 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
382 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
383 ; AVX2-FAST-NEXT: vmovd %xmm0, (%rsi)
384 ; AVX2-FAST-NEXT: retq
386 ; AVX512F-LABEL: shuffle_v8i16_to_v2i16:
388 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
389 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
390 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
393 ; AVX512VL-LABEL: shuffle_v8i16_to_v2i16:
395 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
396 ; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
397 ; AVX512VL-NEXT: vmovd %xmm0, (%rsi)
398 ; AVX512VL-NEXT: retq
400 ; AVX512BW-LABEL: shuffle_v8i16_to_v2i16:
402 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
403 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
404 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
405 ; AVX512BW-NEXT: retq
407 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16:
408 ; AVX512BWVL: # %bb.0:
409 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
410 ; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
411 ; AVX512BWVL-NEXT: vmovd %xmm0, (%rsi)
412 ; AVX512BWVL-NEXT: retq
413 %vec = load <8 x i16>, <8 x i16>* %L
414 %strided.vec = shufflevector <8 x i16> %vec, <8 x i16> undef, <2 x i32> <i32 0, i32 4>
415 store <2 x i16> %strided.vec, <2 x i16>* %S
419 define void @trunc_v2i64_to_v2i16(<8 x i16>* %L, <2 x i16>* %S) nounwind {
420 ; SSE-LABEL: trunc_v2i64_to_v2i16:
422 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
423 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
424 ; SSE-NEXT: movd %xmm0, (%rsi)
427 ; AVX1-LABEL: trunc_v2i64_to_v2i16:
429 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
430 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
431 ; AVX1-NEXT: vmovd %xmm0, (%rsi)
434 ; AVX2-SLOW-LABEL: trunc_v2i64_to_v2i16:
435 ; AVX2-SLOW: # %bb.0:
436 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
437 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
438 ; AVX2-SLOW-NEXT: vmovd %xmm0, (%rsi)
439 ; AVX2-SLOW-NEXT: retq
441 ; AVX2-FAST-LABEL: trunc_v2i64_to_v2i16:
442 ; AVX2-FAST: # %bb.0:
443 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
444 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
445 ; AVX2-FAST-NEXT: vmovd %xmm0, (%rsi)
446 ; AVX2-FAST-NEXT: retq
448 ; AVX512F-LABEL: trunc_v2i64_to_v2i16:
450 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
451 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
452 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
455 ; AVX512VL-LABEL: trunc_v2i64_to_v2i16:
457 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
458 ; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
459 ; AVX512VL-NEXT: retq
461 ; AVX512BW-LABEL: trunc_v2i64_to_v2i16:
463 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
464 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
465 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
466 ; AVX512BW-NEXT: retq
468 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i16:
469 ; AVX512BWVL: # %bb.0:
470 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
471 ; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
472 ; AVX512BWVL-NEXT: retq
473 %vec = load <8 x i16>, <8 x i16>* %L
474 %bc = bitcast <8 x i16> %vec to <2 x i64>
475 %strided.vec = trunc <2 x i64> %bc to <2 x i16>
476 store <2 x i16> %strided.vec, <2 x i16>* %S
480 define void @shuffle_v16i8_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
481 ; SSE2-LABEL: shuffle_v16i8_to_v2i8:
483 ; SSE2-NEXT: movdqa (%rdi), %xmm0
484 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
485 ; SSE2-NEXT: packuswb %xmm0, %xmm0
486 ; SSE2-NEXT: packuswb %xmm0, %xmm0
487 ; SSE2-NEXT: packuswb %xmm0, %xmm0
488 ; SSE2-NEXT: movd %xmm0, %eax
489 ; SSE2-NEXT: movw %ax, (%rsi)
492 ; SSE42-LABEL: shuffle_v16i8_to_v2i8:
494 ; SSE42-NEXT: movdqa (%rdi), %xmm0
495 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
496 ; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
499 ; AVX-LABEL: shuffle_v16i8_to_v2i8:
501 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
502 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
503 ; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
506 ; AVX512-LABEL: shuffle_v16i8_to_v2i8:
508 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
509 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
510 ; AVX512-NEXT: vpextrw $0, %xmm0, (%rsi)
512 %vec = load <16 x i8>, <16 x i8>* %L
513 %strided.vec = shufflevector <16 x i8> %vec, <16 x i8> undef, <2 x i32> <i32 0, i32 8>
514 store <2 x i8> %strided.vec, <2 x i8>* %S
518 define void @trunc_v2i64_to_v2i8(<16 x i8>* %L, <2 x i8>* %S) nounwind {
519 ; SSE2-LABEL: trunc_v2i64_to_v2i8:
521 ; SSE2-NEXT: movdqa (%rdi), %xmm0
522 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
523 ; SSE2-NEXT: packuswb %xmm0, %xmm0
524 ; SSE2-NEXT: packuswb %xmm0, %xmm0
525 ; SSE2-NEXT: packuswb %xmm0, %xmm0
526 ; SSE2-NEXT: movd %xmm0, %eax
527 ; SSE2-NEXT: movw %ax, (%rsi)
530 ; SSE42-LABEL: trunc_v2i64_to_v2i8:
532 ; SSE42-NEXT: movdqa (%rdi), %xmm0
533 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
534 ; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
537 ; AVX-LABEL: trunc_v2i64_to_v2i8:
539 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
540 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
541 ; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
544 ; AVX512F-LABEL: trunc_v2i64_to_v2i8:
546 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
547 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
548 ; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
551 ; AVX512VL-LABEL: trunc_v2i64_to_v2i8:
553 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
554 ; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
555 ; AVX512VL-NEXT: retq
557 ; AVX512BW-LABEL: trunc_v2i64_to_v2i8:
559 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
560 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
561 ; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
562 ; AVX512BW-NEXT: retq
564 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i8:
565 ; AVX512BWVL: # %bb.0:
566 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
567 ; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
568 ; AVX512BWVL-NEXT: retq
569 %vec = load <16 x i8>, <16 x i8>* %L
570 %bc = bitcast <16 x i8> %vec to <2 x i64>
571 %strided.vec = trunc <2 x i64> %bc to <2 x i8>
572 store <2 x i8> %strided.vec, <2 x i8>* %S