1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2-SLOW
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2-FAST
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2-FAST
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
12 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
13 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
14 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
15 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
16 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
19 ; Pairs of shufflevector:trunc functions with functional equivalence.
20 ; Ideally, the shuffles should be lowered to code with the same quality as the truncates.
22 define void @shuffle_v16i8_to_v8i8(ptr %L, ptr %S) nounwind {
23 ; SSE2-LABEL: shuffle_v16i8_to_v8i8:
25 ; SSE2-NEXT: movdqa (%rdi), %xmm0
26 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
27 ; SSE2-NEXT: packuswb %xmm0, %xmm0
28 ; SSE2-NEXT: movq %xmm0, (%rsi)
31 ; SSE42-LABEL: shuffle_v16i8_to_v8i8:
33 ; SSE42-NEXT: movdqa (%rdi), %xmm0
34 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
35 ; SSE42-NEXT: movq %xmm0, (%rsi)
38 ; AVX-LABEL: shuffle_v16i8_to_v8i8:
40 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
41 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
42 ; AVX-NEXT: vmovq %xmm0, (%rsi)
45 ; AVX512F-LABEL: shuffle_v16i8_to_v8i8:
47 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
48 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
49 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
52 ; AVX512VL-LABEL: shuffle_v16i8_to_v8i8:
54 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
55 ; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
56 ; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
59 ; AVX512BW-LABEL: shuffle_v16i8_to_v8i8:
61 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
62 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
63 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
66 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v8i8:
67 ; AVX512BWVL: # %bb.0:
68 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
69 ; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
70 ; AVX512BWVL-NEXT: retq
71 %vec = load <16 x i8>, ptr %L
72 %strided.vec = shufflevector <16 x i8> %vec, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
73 store <8 x i8> %strided.vec, ptr %S
77 define void @trunc_v8i16_to_v8i8(ptr %L, ptr %S) nounwind {
78 ; SSE2-LABEL: trunc_v8i16_to_v8i8:
80 ; SSE2-NEXT: movdqa (%rdi), %xmm0
81 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
82 ; SSE2-NEXT: packuswb %xmm0, %xmm0
83 ; SSE2-NEXT: movq %xmm0, (%rsi)
86 ; SSE42-LABEL: trunc_v8i16_to_v8i8:
88 ; SSE42-NEXT: movdqa (%rdi), %xmm0
89 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
90 ; SSE42-NEXT: movq %xmm0, (%rsi)
93 ; AVX-LABEL: trunc_v8i16_to_v8i8:
95 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
96 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
97 ; AVX-NEXT: vmovq %xmm0, (%rsi)
100 ; AVX512F-LABEL: trunc_v8i16_to_v8i8:
102 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
103 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
104 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
107 ; AVX512VL-LABEL: trunc_v8i16_to_v8i8:
109 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
110 ; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
111 ; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
112 ; AVX512VL-NEXT: retq
114 ; AVX512BW-LABEL: trunc_v8i16_to_v8i8:
116 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
117 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
118 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
119 ; AVX512BW-NEXT: retq
121 ; AVX512BWVL-LABEL: trunc_v8i16_to_v8i8:
122 ; AVX512BWVL: # %bb.0:
123 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
124 ; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi)
125 ; AVX512BWVL-NEXT: retq
126 %vec = load <16 x i8>, ptr %L
127 %bc = bitcast <16 x i8> %vec to <8 x i16>
128 %strided.vec = trunc <8 x i16> %bc to <8 x i8>
129 store <8 x i8> %strided.vec, ptr %S
133 define void @shuffle_v8i16_to_v4i16(ptr %L, ptr %S) nounwind {
134 ; SSE2-LABEL: shuffle_v8i16_to_v4i16:
136 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
137 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
138 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
139 ; SSE2-NEXT: movq %xmm0, (%rsi)
142 ; SSE42-LABEL: shuffle_v8i16_to_v4i16:
144 ; SSE42-NEXT: movdqa (%rdi), %xmm0
145 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
146 ; SSE42-NEXT: movq %xmm0, (%rsi)
149 ; AVX-LABEL: shuffle_v8i16_to_v4i16:
151 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
152 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
153 ; AVX-NEXT: vmovq %xmm0, (%rsi)
156 ; AVX512F-LABEL: shuffle_v8i16_to_v4i16:
158 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
159 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
160 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
163 ; AVX512VL-LABEL: shuffle_v8i16_to_v4i16:
165 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
166 ; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
167 ; AVX512VL-NEXT: retq
169 ; AVX512BW-LABEL: shuffle_v8i16_to_v4i16:
171 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
172 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
173 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
174 ; AVX512BW-NEXT: retq
176 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v4i16:
177 ; AVX512BWVL: # %bb.0:
178 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
179 ; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
180 ; AVX512BWVL-NEXT: retq
181 %vec = load <8 x i16>, ptr %L
182 %strided.vec = shufflevector <8 x i16> %vec, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
183 store <4 x i16> %strided.vec, ptr %S
187 define void @trunc_v4i32_to_v4i16(ptr %L, ptr %S) nounwind {
188 ; SSE2-LABEL: trunc_v4i32_to_v4i16:
190 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
191 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
192 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
193 ; SSE2-NEXT: movq %xmm0, (%rsi)
196 ; SSE42-LABEL: trunc_v4i32_to_v4i16:
198 ; SSE42-NEXT: movdqa (%rdi), %xmm0
199 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
200 ; SSE42-NEXT: movq %xmm0, (%rsi)
203 ; AVX-LABEL: trunc_v4i32_to_v4i16:
205 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
206 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
207 ; AVX-NEXT: vmovq %xmm0, (%rsi)
210 ; AVX512F-LABEL: trunc_v4i32_to_v4i16:
212 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
213 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
214 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
217 ; AVX512VL-LABEL: trunc_v4i32_to_v4i16:
219 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
220 ; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi)
221 ; AVX512VL-NEXT: retq
223 ; AVX512BW-LABEL: trunc_v4i32_to_v4i16:
225 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
226 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
227 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
228 ; AVX512BW-NEXT: retq
230 ; AVX512BWVL-LABEL: trunc_v4i32_to_v4i16:
231 ; AVX512BWVL: # %bb.0:
232 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
233 ; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi)
234 ; AVX512BWVL-NEXT: retq
235 %vec = load <8 x i16>, ptr %L
236 %bc = bitcast <8 x i16> %vec to <4 x i32>
237 %strided.vec = trunc <4 x i32> %bc to <4 x i16>
238 store <4 x i16> %strided.vec, ptr %S
242 define void @shuffle_v4i32_to_v2i32(ptr %L, ptr %S) nounwind {
243 ; SSE-LABEL: shuffle_v4i32_to_v2i32:
245 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
246 ; SSE-NEXT: movq %xmm0, (%rsi)
249 ; AVX-LABEL: shuffle_v4i32_to_v2i32:
251 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
252 ; AVX-NEXT: vmovlps %xmm0, (%rsi)
255 ; AVX512-LABEL: shuffle_v4i32_to_v2i32:
257 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
258 ; AVX512-NEXT: vmovlps %xmm0, (%rsi)
260 %vec = load <4 x i32>, ptr %L
261 %strided.vec = shufflevector <4 x i32> %vec, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
262 store <2 x i32> %strided.vec, ptr %S
266 define void @trunc_v2i64_to_v2i32(ptr %L, ptr %S) nounwind {
267 ; SSE-LABEL: trunc_v2i64_to_v2i32:
269 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
270 ; SSE-NEXT: movq %xmm0, (%rsi)
273 ; AVX-LABEL: trunc_v2i64_to_v2i32:
275 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
276 ; AVX-NEXT: vmovlps %xmm0, (%rsi)
279 ; AVX512F-LABEL: trunc_v2i64_to_v2i32:
281 ; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
282 ; AVX512F-NEXT: vmovlps %xmm0, (%rsi)
285 ; AVX512VL-LABEL: trunc_v2i64_to_v2i32:
287 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
288 ; AVX512VL-NEXT: vpmovqd %xmm0, (%rsi)
289 ; AVX512VL-NEXT: retq
291 ; AVX512BW-LABEL: trunc_v2i64_to_v2i32:
293 ; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,2,2,3]
294 ; AVX512BW-NEXT: vmovlps %xmm0, (%rsi)
295 ; AVX512BW-NEXT: retq
297 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i32:
298 ; AVX512BWVL: # %bb.0:
299 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
300 ; AVX512BWVL-NEXT: vpmovqd %xmm0, (%rsi)
301 ; AVX512BWVL-NEXT: retq
302 %vec = load <4 x i32>, ptr %L
303 %bc = bitcast <4 x i32> %vec to <2 x i64>
304 %strided.vec = trunc <2 x i64> %bc to <2 x i32>
305 store <2 x i32> %strided.vec, ptr %S
309 define void @shuffle_v16i8_to_v4i8(ptr %L, ptr %S) nounwind {
310 ; SSE2-LABEL: shuffle_v16i8_to_v4i8:
312 ; SSE2-NEXT: movdqa (%rdi), %xmm0
313 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
314 ; SSE2-NEXT: packuswb %xmm0, %xmm0
315 ; SSE2-NEXT: packuswb %xmm0, %xmm0
316 ; SSE2-NEXT: movd %xmm0, (%rsi)
319 ; SSE42-LABEL: shuffle_v16i8_to_v4i8:
321 ; SSE42-NEXT: movdqa (%rdi), %xmm0
322 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
323 ; SSE42-NEXT: movd %xmm0, (%rsi)
326 ; AVX-LABEL: shuffle_v16i8_to_v4i8:
328 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
329 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
330 ; AVX-NEXT: vmovd %xmm0, (%rsi)
333 ; AVX512F-LABEL: shuffle_v16i8_to_v4i8:
335 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
336 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
337 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
340 ; AVX512VL-LABEL: shuffle_v16i8_to_v4i8:
342 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
343 ; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
344 ; AVX512VL-NEXT: retq
346 ; AVX512BW-LABEL: shuffle_v16i8_to_v4i8:
348 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
349 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
350 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
351 ; AVX512BW-NEXT: retq
353 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v4i8:
354 ; AVX512BWVL: # %bb.0:
355 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
356 ; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
357 ; AVX512BWVL-NEXT: retq
358 %vec = load <16 x i8>, ptr %L
359 %strided.vec = shufflevector <16 x i8> %vec, <16 x i8> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
360 store <4 x i8> %strided.vec, ptr %S
364 define void @trunc_v4i32_to_v4i8(ptr %L, ptr %S) nounwind {
365 ; SSE2-LABEL: trunc_v4i32_to_v4i8:
367 ; SSE2-NEXT: movdqa (%rdi), %xmm0
368 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
369 ; SSE2-NEXT: packuswb %xmm0, %xmm0
370 ; SSE2-NEXT: packuswb %xmm0, %xmm0
371 ; SSE2-NEXT: movd %xmm0, (%rsi)
374 ; SSE42-LABEL: trunc_v4i32_to_v4i8:
376 ; SSE42-NEXT: movdqa (%rdi), %xmm0
377 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
378 ; SSE42-NEXT: movd %xmm0, (%rsi)
381 ; AVX-LABEL: trunc_v4i32_to_v4i8:
383 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
384 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
385 ; AVX-NEXT: vmovd %xmm0, (%rsi)
388 ; AVX512F-LABEL: trunc_v4i32_to_v4i8:
390 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
391 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
392 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
395 ; AVX512VL-LABEL: trunc_v4i32_to_v4i8:
397 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
398 ; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi)
399 ; AVX512VL-NEXT: retq
401 ; AVX512BW-LABEL: trunc_v4i32_to_v4i8:
403 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
404 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
405 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
406 ; AVX512BW-NEXT: retq
408 ; AVX512BWVL-LABEL: trunc_v4i32_to_v4i8:
409 ; AVX512BWVL: # %bb.0:
410 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
411 ; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi)
412 ; AVX512BWVL-NEXT: retq
413 %vec = load <16 x i8>, ptr %L
414 %bc = bitcast <16 x i8> %vec to <4 x i32>
415 %strided.vec = trunc <4 x i32> %bc to <4 x i8>
416 store <4 x i8> %strided.vec, ptr %S
420 define void @shuffle_v8i16_to_v2i16(ptr %L, ptr %S) nounwind {
421 ; SSE-LABEL: shuffle_v8i16_to_v2i16:
423 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
424 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
425 ; SSE-NEXT: movd %xmm0, (%rsi)
428 ; AVX1-LABEL: shuffle_v8i16_to_v2i16:
430 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
431 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
432 ; AVX1-NEXT: vmovd %xmm0, (%rsi)
435 ; AVX2-SLOW-LABEL: shuffle_v8i16_to_v2i16:
436 ; AVX2-SLOW: # %bb.0:
437 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
438 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
439 ; AVX2-SLOW-NEXT: vmovd %xmm0, (%rsi)
440 ; AVX2-SLOW-NEXT: retq
442 ; AVX2-FAST-LABEL: shuffle_v8i16_to_v2i16:
443 ; AVX2-FAST: # %bb.0:
444 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
445 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
446 ; AVX2-FAST-NEXT: vmovd %xmm0, (%rsi)
447 ; AVX2-FAST-NEXT: retq
449 ; AVX512F-LABEL: shuffle_v8i16_to_v2i16:
451 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
452 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
453 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
456 ; AVX512VL-LABEL: shuffle_v8i16_to_v2i16:
458 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
459 ; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
460 ; AVX512VL-NEXT: retq
462 ; AVX512BW-LABEL: shuffle_v8i16_to_v2i16:
464 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
465 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
466 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
467 ; AVX512BW-NEXT: retq
469 ; AVX512BWVL-LABEL: shuffle_v8i16_to_v2i16:
470 ; AVX512BWVL: # %bb.0:
471 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
472 ; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
473 ; AVX512BWVL-NEXT: retq
474 %vec = load <8 x i16>, ptr %L
475 %strided.vec = shufflevector <8 x i16> %vec, <8 x i16> undef, <2 x i32> <i32 0, i32 4>
476 store <2 x i16> %strided.vec, ptr %S
480 define void @trunc_v2i64_to_v2i16(ptr %L, ptr %S) nounwind {
481 ; SSE-LABEL: trunc_v2i64_to_v2i16:
483 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,2,2,3]
484 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
485 ; SSE-NEXT: movd %xmm0, (%rsi)
488 ; AVX1-LABEL: trunc_v2i64_to_v2i16:
490 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
491 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
492 ; AVX1-NEXT: vmovd %xmm0, (%rsi)
495 ; AVX2-SLOW-LABEL: trunc_v2i64_to_v2i16:
496 ; AVX2-SLOW: # %bb.0:
497 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
498 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
499 ; AVX2-SLOW-NEXT: vmovd %xmm0, (%rsi)
500 ; AVX2-SLOW-NEXT: retq
502 ; AVX2-FAST-LABEL: trunc_v2i64_to_v2i16:
503 ; AVX2-FAST: # %bb.0:
504 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
505 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
506 ; AVX2-FAST-NEXT: vmovd %xmm0, (%rsi)
507 ; AVX2-FAST-NEXT: retq
509 ; AVX512F-LABEL: trunc_v2i64_to_v2i16:
511 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
512 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
513 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
516 ; AVX512VL-LABEL: trunc_v2i64_to_v2i16:
518 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
519 ; AVX512VL-NEXT: vpmovqw %xmm0, (%rsi)
520 ; AVX512VL-NEXT: retq
522 ; AVX512BW-LABEL: trunc_v2i64_to_v2i16:
524 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
525 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
526 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
527 ; AVX512BW-NEXT: retq
529 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i16:
530 ; AVX512BWVL: # %bb.0:
531 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
532 ; AVX512BWVL-NEXT: vpmovqw %xmm0, (%rsi)
533 ; AVX512BWVL-NEXT: retq
534 %vec = load <8 x i16>, ptr %L
535 %bc = bitcast <8 x i16> %vec to <2 x i64>
536 %strided.vec = trunc <2 x i64> %bc to <2 x i16>
537 store <2 x i16> %strided.vec, ptr %S
541 define void @shuffle_v16i8_to_v2i8(ptr %L, ptr %S) nounwind {
542 ; SSE2-LABEL: shuffle_v16i8_to_v2i8:
544 ; SSE2-NEXT: movdqa (%rdi), %xmm0
545 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
546 ; SSE2-NEXT: packuswb %xmm0, %xmm0
547 ; SSE2-NEXT: packuswb %xmm0, %xmm0
548 ; SSE2-NEXT: packuswb %xmm0, %xmm0
549 ; SSE2-NEXT: movd %xmm0, %eax
550 ; SSE2-NEXT: movw %ax, (%rsi)
553 ; SSE42-LABEL: shuffle_v16i8_to_v2i8:
555 ; SSE42-NEXT: movdqa (%rdi), %xmm0
556 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
557 ; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
560 ; AVX-LABEL: shuffle_v16i8_to_v2i8:
562 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
563 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
564 ; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
567 ; AVX512F-LABEL: shuffle_v16i8_to_v2i8:
569 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
570 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
571 ; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
574 ; AVX512VL-LABEL: shuffle_v16i8_to_v2i8:
576 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
577 ; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
578 ; AVX512VL-NEXT: retq
580 ; AVX512BW-LABEL: shuffle_v16i8_to_v2i8:
582 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
583 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
584 ; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
585 ; AVX512BW-NEXT: retq
587 ; AVX512BWVL-LABEL: shuffle_v16i8_to_v2i8:
588 ; AVX512BWVL: # %bb.0:
589 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
590 ; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
591 ; AVX512BWVL-NEXT: retq
592 %vec = load <16 x i8>, ptr %L
593 %strided.vec = shufflevector <16 x i8> %vec, <16 x i8> undef, <2 x i32> <i32 0, i32 8>
594 store <2 x i8> %strided.vec, ptr %S
598 define void @trunc_v2i64_to_v2i8(ptr %L, ptr %S) nounwind {
599 ; SSE2-LABEL: trunc_v2i64_to_v2i8:
601 ; SSE2-NEXT: movdqa (%rdi), %xmm0
602 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
603 ; SSE2-NEXT: packuswb %xmm0, %xmm0
604 ; SSE2-NEXT: packuswb %xmm0, %xmm0
605 ; SSE2-NEXT: packuswb %xmm0, %xmm0
606 ; SSE2-NEXT: movd %xmm0, %eax
607 ; SSE2-NEXT: movw %ax, (%rsi)
610 ; SSE42-LABEL: trunc_v2i64_to_v2i8:
612 ; SSE42-NEXT: movdqa (%rdi), %xmm0
613 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
614 ; SSE42-NEXT: pextrw $0, %xmm0, (%rsi)
617 ; AVX-LABEL: trunc_v2i64_to_v2i8:
619 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
620 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
621 ; AVX-NEXT: vpextrw $0, %xmm0, (%rsi)
624 ; AVX512F-LABEL: trunc_v2i64_to_v2i8:
626 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
627 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
628 ; AVX512F-NEXT: vpextrw $0, %xmm0, (%rsi)
631 ; AVX512VL-LABEL: trunc_v2i64_to_v2i8:
633 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
634 ; AVX512VL-NEXT: vpmovqb %xmm0, (%rsi)
635 ; AVX512VL-NEXT: retq
637 ; AVX512BW-LABEL: trunc_v2i64_to_v2i8:
639 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
640 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
641 ; AVX512BW-NEXT: vpextrw $0, %xmm0, (%rsi)
642 ; AVX512BW-NEXT: retq
644 ; AVX512BWVL-LABEL: trunc_v2i64_to_v2i8:
645 ; AVX512BWVL: # %bb.0:
646 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
647 ; AVX512BWVL-NEXT: vpmovqb %xmm0, (%rsi)
648 ; AVX512BWVL-NEXT: retq
649 %vec = load <16 x i8>, ptr %L
650 %bc = bitcast <16 x i8> %vec to <2 x i64>
651 %strided.vec = trunc <2 x i64> %bc to <2 x i8>
652 store <2 x i8> %strided.vec, ptr %S