1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
3 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX2-SLOW
4 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX2-FAST
5 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
6 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
7 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
8 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
9 ; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMIVL
12 ; Pairs of shufflevector:trunc functions with functional equivalence.
13 ; Ideally, the shuffles should be lowered to code with the same quality as the truncates.
15 define void @shuffle_v32i8_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
16 ; AVX-LABEL: shuffle_v32i8_to_v16i8:
18 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
19 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
20 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
21 ; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
22 ; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
23 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
24 ; AVX-NEXT: vmovdqa %xmm0, (%rsi)
27 ; AVX512F-LABEL: shuffle_v32i8_to_v16i8:
29 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
30 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm1
31 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
32 ; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
33 ; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
34 ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
35 ; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
38 ; AVX512VL-LABEL: shuffle_v32i8_to_v16i8:
40 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
41 ; AVX512VL-NEXT: vmovdqa 16(%rdi), %xmm1
42 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
43 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
44 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
45 ; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
46 ; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
49 ; AVX512BW-LABEL: shuffle_v32i8_to_v16i8:
51 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
52 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
53 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
54 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
55 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
56 ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
57 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
60 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v16i8:
61 ; AVX512BWVL: # %bb.0:
62 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
63 ; AVX512BWVL-NEXT: vmovdqa 16(%rdi), %xmm1
64 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
65 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
66 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
67 ; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
68 ; AVX512BWVL-NEXT: vmovdqa %xmm0, (%rsi)
69 ; AVX512BWVL-NEXT: retq
71 ; AVX512VBMIVL-LABEL: shuffle_v32i8_to_v16i8:
72 ; AVX512VBMIVL: # %bb.0:
73 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %xmm0
74 ; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
75 ; AVX512VBMIVL-NEXT: vpermi2b 16(%rdi), %xmm0, %xmm1
76 ; AVX512VBMIVL-NEXT: vmovdqa %xmm1, (%rsi)
77 ; AVX512VBMIVL-NEXT: retq
78 %vec = load <32 x i8>, <32 x i8>* %L
79 %strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
80 store <16 x i8> %strided.vec, <16 x i8>* %S
84 define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind {
85 ; AVX1-LABEL: trunc_v16i16_to_v16i8:
87 ; AVX1-NEXT: vmovaps (%rdi), %ymm0
88 ; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
89 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
90 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
91 ; AVX1-NEXT: vmovdqa %xmm0, (%rsi)
92 ; AVX1-NEXT: vzeroupper
95 ; AVX2-LABEL: trunc_v16i16_to_v16i8:
97 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
98 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
99 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
100 ; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
101 ; AVX2-NEXT: vmovdqa %xmm0, (%rsi)
102 ; AVX2-NEXT: vzeroupper
105 ; AVX512F-LABEL: trunc_v16i16_to_v16i8:
107 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
108 ; AVX512F-NEXT: vpmovdb %zmm0, (%rsi)
109 ; AVX512F-NEXT: vzeroupper
112 ; AVX512VL-LABEL: trunc_v16i16_to_v16i8:
114 ; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
115 ; AVX512VL-NEXT: vpmovdb %zmm0, (%rsi)
116 ; AVX512VL-NEXT: vzeroupper
117 ; AVX512VL-NEXT: retq
119 ; AVX512BW-LABEL: trunc_v16i16_to_v16i8:
121 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
122 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
123 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
124 ; AVX512BW-NEXT: vzeroupper
125 ; AVX512BW-NEXT: retq
127 ; AVX512BWVL-LABEL: trunc_v16i16_to_v16i8:
128 ; AVX512BWVL: # %bb.0:
129 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
130 ; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi)
131 ; AVX512BWVL-NEXT: vzeroupper
132 ; AVX512BWVL-NEXT: retq
134 ; AVX512VBMIVL-LABEL: trunc_v16i16_to_v16i8:
135 ; AVX512VBMIVL: # %bb.0:
136 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0
137 ; AVX512VBMIVL-NEXT: vpmovwb %ymm0, (%rsi)
138 ; AVX512VBMIVL-NEXT: vzeroupper
139 ; AVX512VBMIVL-NEXT: retq
140 %vec = load <32 x i8>, <32 x i8>* %L
141 %bc = bitcast <32 x i8> %vec to <16 x i16>
142 %strided.vec = trunc <16 x i16> %bc to <16 x i8>
143 store <16 x i8> %strided.vec, <16 x i8>* %S
147 define void @shuffle_v16i16_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
148 ; AVX-LABEL: shuffle_v16i16_to_v8i16:
150 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
151 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
152 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
153 ; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
154 ; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
155 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
156 ; AVX-NEXT: vmovdqa %xmm0, (%rsi)
159 ; AVX512F-LABEL: shuffle_v16i16_to_v8i16:
161 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
162 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm1
163 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
164 ; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
165 ; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
166 ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
167 ; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
170 ; AVX512VL-LABEL: shuffle_v16i16_to_v8i16:
172 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
173 ; AVX512VL-NEXT: vmovdqa 16(%rdi), %xmm1
174 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
175 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
176 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
177 ; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
178 ; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi)
179 ; AVX512VL-NEXT: retq
181 ; AVX512BW-LABEL: shuffle_v16i16_to_v8i16:
183 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
184 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
185 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
186 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
187 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
188 ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
189 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
190 ; AVX512BW-NEXT: retq
192 ; AVX512BWVL-LABEL: shuffle_v16i16_to_v8i16:
193 ; AVX512BWVL: # %bb.0:
194 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
195 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2,4,6,8,10,12,14]
196 ; AVX512BWVL-NEXT: vpermi2w 16(%rdi), %xmm0, %xmm1
197 ; AVX512BWVL-NEXT: vmovdqa %xmm1, (%rsi)
198 ; AVX512BWVL-NEXT: retq
200 ; AVX512VBMIVL-LABEL: shuffle_v16i16_to_v8i16:
201 ; AVX512VBMIVL: # %bb.0:
202 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %xmm0
203 ; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2,4,6,8,10,12,14]
204 ; AVX512VBMIVL-NEXT: vpermi2w 16(%rdi), %xmm0, %xmm1
205 ; AVX512VBMIVL-NEXT: vmovdqa %xmm1, (%rsi)
206 ; AVX512VBMIVL-NEXT: retq
207 %vec = load <16 x i16>, <16 x i16>* %L
208 %strided.vec = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
209 store <8 x i16> %strided.vec, <8 x i16>* %S
213 define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind {
214 ; AVX1-LABEL: trunc_v8i32_to_v8i16:
216 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
217 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
218 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
219 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
220 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
221 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
222 ; AVX1-NEXT: vmovdqa %xmm0, (%rsi)
225 ; AVX2-LABEL: trunc_v8i32_to_v8i16:
227 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
228 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
229 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
230 ; AVX2-NEXT: vmovdqa %xmm0, (%rsi)
231 ; AVX2-NEXT: vzeroupper
234 ; AVX512F-LABEL: trunc_v8i32_to_v8i16:
236 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
237 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
238 ; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
239 ; AVX512F-NEXT: vzeroupper
242 ; AVX512VL-LABEL: trunc_v8i32_to_v8i16:
244 ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
245 ; AVX512VL-NEXT: vpmovdw %ymm0, (%rsi)
246 ; AVX512VL-NEXT: vzeroupper
247 ; AVX512VL-NEXT: retq
249 ; AVX512BW-LABEL: trunc_v8i32_to_v8i16:
251 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
252 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
253 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
254 ; AVX512BW-NEXT: vzeroupper
255 ; AVX512BW-NEXT: retq
257 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i16:
258 ; AVX512BWVL: # %bb.0:
259 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
260 ; AVX512BWVL-NEXT: vpmovdw %ymm0, (%rsi)
261 ; AVX512BWVL-NEXT: vzeroupper
262 ; AVX512BWVL-NEXT: retq
264 ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i16:
265 ; AVX512VBMIVL: # %bb.0:
266 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0
267 ; AVX512VBMIVL-NEXT: vpmovdw %ymm0, (%rsi)
268 ; AVX512VBMIVL-NEXT: vzeroupper
269 ; AVX512VBMIVL-NEXT: retq
270 %vec = load <16 x i16>, <16 x i16>* %L
271 %bc = bitcast <16 x i16> %vec to <8 x i32>
272 %strided.vec = trunc <8 x i32> %bc to <8 x i16>
273 store <8 x i16> %strided.vec, <8 x i16>* %S
277 define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
278 ; AVX-LABEL: shuffle_v8i32_to_v4i32:
280 ; AVX-NEXT: vmovaps (%rdi), %xmm0
281 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
282 ; AVX-NEXT: vmovaps %xmm0, (%rsi)
285 ; AVX512-LABEL: shuffle_v8i32_to_v4i32:
287 ; AVX512-NEXT: vmovaps (%rdi), %xmm0
288 ; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
289 ; AVX512-NEXT: vmovaps %xmm0, (%rsi)
291 %vec = load <8 x i32>, <8 x i32>* %L
292 %strided.vec = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
293 store <4 x i32> %strided.vec, <4 x i32>* %S
297 define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind {
298 ; AVX1-LABEL: trunc_v4i64_to_v4i32:
300 ; AVX1-NEXT: vmovaps (%rdi), %xmm0
301 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
302 ; AVX1-NEXT: vmovaps %xmm0, (%rsi)
305 ; AVX2-SLOW-LABEL: trunc_v4i64_to_v4i32:
306 ; AVX2-SLOW: # %bb.0:
307 ; AVX2-SLOW-NEXT: vmovaps (%rdi), %xmm0
308 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
309 ; AVX2-SLOW-NEXT: vmovaps %xmm0, (%rsi)
310 ; AVX2-SLOW-NEXT: retq
312 ; AVX2-FAST-LABEL: trunc_v4i64_to_v4i32:
313 ; AVX2-FAST: # %bb.0:
314 ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm0 = [0,2,4,6,4,6,6,7]
315 ; AVX2-FAST-NEXT: vpermps (%rdi), %ymm0, %ymm0
316 ; AVX2-FAST-NEXT: vmovaps %xmm0, (%rsi)
317 ; AVX2-FAST-NEXT: vzeroupper
318 ; AVX2-FAST-NEXT: retq
320 ; AVX512F-LABEL: trunc_v4i64_to_v4i32:
322 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
323 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
324 ; AVX512F-NEXT: vmovdqa %xmm0, (%rsi)
325 ; AVX512F-NEXT: vzeroupper
328 ; AVX512VL-LABEL: trunc_v4i64_to_v4i32:
330 ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
331 ; AVX512VL-NEXT: vpmovqd %ymm0, (%rsi)
332 ; AVX512VL-NEXT: vzeroupper
333 ; AVX512VL-NEXT: retq
335 ; AVX512BW-LABEL: trunc_v4i64_to_v4i32:
337 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
338 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
339 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi)
340 ; AVX512BW-NEXT: vzeroupper
341 ; AVX512BW-NEXT: retq
343 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i32:
344 ; AVX512BWVL: # %bb.0:
345 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
346 ; AVX512BWVL-NEXT: vpmovqd %ymm0, (%rsi)
347 ; AVX512BWVL-NEXT: vzeroupper
348 ; AVX512BWVL-NEXT: retq
350 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i32:
351 ; AVX512VBMIVL: # %bb.0:
352 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0
353 ; AVX512VBMIVL-NEXT: vpmovqd %ymm0, (%rsi)
354 ; AVX512VBMIVL-NEXT: vzeroupper
355 ; AVX512VBMIVL-NEXT: retq
356 %vec = load <8 x i32>, <8 x i32>* %L
357 %bc = bitcast <8 x i32> %vec to <4 x i64>
358 %strided.vec = trunc <4 x i64> %bc to <4 x i32>
359 store <4 x i32> %strided.vec, <4 x i32>* %S
363 define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
364 ; AVX-LABEL: shuffle_v32i8_to_v8i8:
366 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
367 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
368 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
369 ; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
370 ; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
371 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
372 ; AVX-NEXT: vmovq %xmm0, (%rsi)
375 ; AVX512F-LABEL: shuffle_v32i8_to_v8i8:
377 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
378 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm1
379 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
380 ; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
381 ; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
382 ; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
383 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
386 ; AVX512VL-LABEL: shuffle_v32i8_to_v8i8:
388 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
389 ; AVX512VL-NEXT: vmovdqa 16(%rdi), %xmm1
390 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
391 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
392 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
393 ; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
394 ; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
395 ; AVX512VL-NEXT: retq
397 ; AVX512BW-LABEL: shuffle_v32i8_to_v8i8:
399 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
400 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
401 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
402 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
403 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
404 ; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
405 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
406 ; AVX512BW-NEXT: retq
408 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8:
409 ; AVX512BWVL: # %bb.0:
410 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
411 ; AVX512BWVL-NEXT: vmovdqa 16(%rdi), %xmm1
412 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
413 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
414 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
415 ; AVX512BWVL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
416 ; AVX512BWVL-NEXT: vmovq %xmm0, (%rsi)
417 ; AVX512BWVL-NEXT: retq
419 ; AVX512VBMIVL-LABEL: shuffle_v32i8_to_v8i8:
420 ; AVX512VBMIVL: # %bb.0:
421 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %xmm0
422 ; AVX512VBMIVL-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2024390091656922112,2024390091656922112]
423 ; AVX512VBMIVL-NEXT: vpermi2b 16(%rdi), %xmm0, %xmm1
424 ; AVX512VBMIVL-NEXT: vmovq %xmm1, (%rsi)
425 ; AVX512VBMIVL-NEXT: retq
426 %vec = load <32 x i8>, <32 x i8>* %L
427 %strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
428 store <8 x i8> %strided.vec, <8 x i8>* %S
432 define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind {
433 ; AVX-LABEL: trunc_v8i32_to_v8i8:
435 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
436 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
437 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
438 ; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
439 ; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
440 ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
441 ; AVX-NEXT: vmovq %xmm0, (%rsi)
444 ; AVX512F-LABEL: trunc_v8i32_to_v8i8:
446 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
447 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
448 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
449 ; AVX512F-NEXT: vzeroupper
452 ; AVX512VL-LABEL: trunc_v8i32_to_v8i8:
454 ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
455 ; AVX512VL-NEXT: vpmovdb %ymm0, (%rsi)
456 ; AVX512VL-NEXT: vzeroupper
457 ; AVX512VL-NEXT: retq
459 ; AVX512BW-LABEL: trunc_v8i32_to_v8i8:
461 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
462 ; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
463 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
464 ; AVX512BW-NEXT: vzeroupper
465 ; AVX512BW-NEXT: retq
467 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8:
468 ; AVX512BWVL: # %bb.0:
469 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
470 ; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rsi)
471 ; AVX512BWVL-NEXT: vzeroupper
472 ; AVX512BWVL-NEXT: retq
474 ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8:
475 ; AVX512VBMIVL: # %bb.0:
476 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0
477 ; AVX512VBMIVL-NEXT: vpmovdb %ymm0, (%rsi)
478 ; AVX512VBMIVL-NEXT: vzeroupper
479 ; AVX512VBMIVL-NEXT: retq
480 %vec = load <32 x i8>, <32 x i8>* %L
481 %bc = bitcast <32 x i8> %vec to <8 x i32>
482 %strided.vec = trunc <8 x i32> %bc to <8 x i8>
483 store <8 x i8> %strided.vec, <8 x i8>* %S
487 define <2 x i64> @trunc_v8i32_to_v8i8_return_v2i64(<8 x i32> %vec) nounwind {
489 ; return (__m128i) {(long long)__builtin_convertvector((__v8si)__A, __v8qi), 0};
490 ; AVX1-LABEL: trunc_v8i32_to_v8i8_return_v2i64:
492 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
493 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
494 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
495 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
496 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
497 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
498 ; AVX1-NEXT: vzeroupper
501 ; AVX2-LABEL: trunc_v8i32_to_v8i8_return_v2i64:
503 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
504 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
505 ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
506 ; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
507 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
508 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
509 ; AVX2-NEXT: vzeroupper
512 ; AVX512F-LABEL: trunc_v8i32_to_v8i8_return_v2i64:
514 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
515 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
516 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
517 ; AVX512F-NEXT: vzeroupper
520 ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_return_v2i64:
522 ; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
523 ; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
524 ; AVX512VL-NEXT: vzeroupper
525 ; AVX512VL-NEXT: retq
527 ; AVX512BW-LABEL: trunc_v8i32_to_v8i8_return_v2i64:
529 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
530 ; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
531 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
532 ; AVX512BW-NEXT: vzeroupper
533 ; AVX512BW-NEXT: retq
535 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_return_v2i64:
536 ; AVX512BWVL: # %bb.0:
537 ; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
538 ; AVX512BWVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
539 ; AVX512BWVL-NEXT: vzeroupper
540 ; AVX512BWVL-NEXT: retq
542 ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_return_v2i64:
543 ; AVX512VBMIVL: # %bb.0:
544 ; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0
545 ; AVX512VBMIVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
546 ; AVX512VBMIVL-NEXT: vzeroupper
547 ; AVX512VBMIVL-NEXT: retq
548 %truncated.vec = trunc <8 x i32> %vec to <8 x i8>
549 %bc = bitcast <8 x i8> %truncated.vec to i64
550 %result = insertelement <2 x i64> zeroinitializer, i64 %bc, i32 0
551 ret <2 x i64> %result
554 define <16 x i8> @trunc_v8i32_to_v8i8_with_zext_return_v16i8(<8 x i32> %vec) nounwind {
555 ; AVX1-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8:
557 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
558 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
559 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
560 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
561 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
562 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
563 ; AVX1-NEXT: vzeroupper
566 ; AVX2-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8:
568 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
569 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
570 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
571 ; AVX2-NEXT: vzeroupper
574 ; AVX512F-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8:
576 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
577 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
578 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
579 ; AVX512F-NEXT: vzeroupper
582 ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8:
584 ; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
585 ; AVX512VL-NEXT: vzeroupper
586 ; AVX512VL-NEXT: retq
588 ; AVX512BW-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8:
590 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
591 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
592 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
593 ; AVX512BW-NEXT: vzeroupper
594 ; AVX512BW-NEXT: retq
596 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8:
597 ; AVX512BWVL: # %bb.0:
598 ; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
599 ; AVX512BWVL-NEXT: vzeroupper
600 ; AVX512BWVL-NEXT: retq
602 ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8:
603 ; AVX512VBMIVL: # %bb.0:
604 ; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0
605 ; AVX512VBMIVL-NEXT: vzeroupper
606 ; AVX512VBMIVL-NEXT: retq
607 %truncated = trunc <8 x i32> %vec to <8 x i8>
608 %truncated.ext = zext <8 x i8> %truncated to <8 x i16>
609 %bc = bitcast <8 x i16> %truncated.ext to <16 x i8>
610 %result = shufflevector <16 x i8> %bc, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
611 ret <16 x i8> %result
614 define <16 x i8> @trunc_v8i32_to_v8i8_via_v8i16_return_v16i8(<8 x i32> %vec) nounwind {
615 ; AVX1-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8:
617 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
618 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
619 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
620 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
621 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
622 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
623 ; AVX1-NEXT: vzeroupper
626 ; AVX2-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8:
628 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
629 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
630 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
631 ; AVX2-NEXT: vzeroupper
634 ; AVX512F-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8:
636 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
637 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
638 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
639 ; AVX512F-NEXT: vzeroupper
642 ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8:
644 ; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
645 ; AVX512VL-NEXT: vzeroupper
646 ; AVX512VL-NEXT: retq
648 ; AVX512BW-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8:
650 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
651 ; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
652 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
653 ; AVX512BW-NEXT: vzeroupper
654 ; AVX512BW-NEXT: retq
656 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8:
657 ; AVX512BWVL: # %bb.0:
658 ; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
659 ; AVX512BWVL-NEXT: vzeroupper
660 ; AVX512BWVL-NEXT: retq
662 ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8:
663 ; AVX512VBMIVL: # %bb.0:
664 ; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0
665 ; AVX512VBMIVL-NEXT: vzeroupper
666 ; AVX512VBMIVL-NEXT: retq
667 %truncated = trunc <8 x i32> %vec to <8 x i16>
668 %bc = bitcast <8 x i16> %truncated to <16 x i8>
669 %result = shufflevector <16 x i8> %bc, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 20, i32 24, i32 22, i32 31, i32 28, i32 28, i32 29>
670 ret <16 x i8> %result
673 define <16 x i8> @trunc_v8i32_to_v8i8_return_v16i8(<8 x i32> %vec) nounwind {
674 ; AVX1-LABEL: trunc_v8i32_to_v8i8_return_v16i8:
676 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
677 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
678 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
679 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
680 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
681 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
682 ; AVX1-NEXT: vzeroupper
685 ; AVX2-LABEL: trunc_v8i32_to_v8i8_return_v16i8:
687 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
688 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
689 ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
690 ; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
691 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
692 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
693 ; AVX2-NEXT: vzeroupper
696 ; AVX512F-LABEL: trunc_v8i32_to_v8i8_return_v16i8:
698 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
699 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
700 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
701 ; AVX512F-NEXT: vzeroupper
704 ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_return_v16i8:
706 ; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
707 ; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
708 ; AVX512VL-NEXT: vzeroupper
709 ; AVX512VL-NEXT: retq
711 ; AVX512BW-LABEL: trunc_v8i32_to_v8i8_return_v16i8:
713 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
714 ; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
715 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
716 ; AVX512BW-NEXT: vzeroupper
717 ; AVX512BW-NEXT: retq
719 ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_return_v16i8:
720 ; AVX512BWVL: # %bb.0:
721 ; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
722 ; AVX512BWVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
723 ; AVX512BWVL-NEXT: vzeroupper
724 ; AVX512BWVL-NEXT: retq
726 ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_return_v16i8:
727 ; AVX512VBMIVL: # %bb.0:
728 ; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0
729 ; AVX512VBMIVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
730 ; AVX512VBMIVL-NEXT: vzeroupper
731 ; AVX512VBMIVL-NEXT: retq
732 %truncated = trunc <8 x i32> %vec to <8 x i8>
733 %result = shufflevector <8 x i8> %truncated, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
734 ret <16 x i8> %result
737 define <2 x i64> @trunc_v4i64_to_v4i16_return_v2i64(<4 x i64> %vec) nounwind {
739 ; return (__m128i) {(long long)__builtin_convertvector((__v4di)x, __v4hi), 0};
740 ; AVX1-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
742 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
743 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
744 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
745 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
746 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
747 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
748 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
749 ; AVX1-NEXT: vzeroupper
752 ; AVX2-SLOW-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
753 ; AVX2-SLOW: # %bb.0:
754 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
755 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
756 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
757 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
758 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
759 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
760 ; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
761 ; AVX2-SLOW-NEXT: vzeroupper
762 ; AVX2-SLOW-NEXT: retq
764 ; AVX2-FAST-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
765 ; AVX2-FAST: # %bb.0:
766 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm1
767 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
768 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm1, %xmm1
769 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm0
770 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
771 ; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
772 ; AVX2-FAST-NEXT: vzeroupper
773 ; AVX2-FAST-NEXT: retq
775 ; AVX512F-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
777 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
778 ; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
779 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
780 ; AVX512F-NEXT: vzeroupper
783 ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
785 ; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0
786 ; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
787 ; AVX512VL-NEXT: vzeroupper
788 ; AVX512VL-NEXT: retq
790 ; AVX512BW-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
792 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
793 ; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
794 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
795 ; AVX512BW-NEXT: vzeroupper
796 ; AVX512BW-NEXT: retq
798 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
799 ; AVX512BWVL: # %bb.0:
800 ; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0
801 ; AVX512BWVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
802 ; AVX512BWVL-NEXT: vzeroupper
803 ; AVX512BWVL-NEXT: retq
805 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_return_v2i64:
806 ; AVX512VBMIVL: # %bb.0:
807 ; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0
808 ; AVX512VBMIVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
809 ; AVX512VBMIVL-NEXT: vzeroupper
810 ; AVX512VBMIVL-NEXT: retq
811 %truncated = trunc <4 x i64> %vec to <4 x i16>
812 %bc = bitcast <4 x i16> %truncated to i64
813 %result = insertelement <2 x i64> zeroinitializer, i64 %bc, i32 0
814 ret <2 x i64> %result
817 define <8 x i16> @trunc_v4i64_to_v4i16_with_zext_return_v8i16(<4 x i64> %vec) nounwind {
818 ; AVX1-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
820 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
821 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
822 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
823 ; AVX1-NEXT: vzeroupper
826 ; AVX2-SLOW-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
827 ; AVX2-SLOW: # %bb.0:
828 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
829 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
830 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
831 ; AVX2-SLOW-NEXT: vzeroupper
832 ; AVX2-SLOW-NEXT: retq
834 ; AVX2-FAST-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
835 ; AVX2-FAST: # %bb.0:
836 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
837 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
838 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
839 ; AVX2-FAST-NEXT: vzeroupper
840 ; AVX2-FAST-NEXT: retq
842 ; AVX512F-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
844 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
845 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
846 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
847 ; AVX512F-NEXT: vzeroupper
850 ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
852 ; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0
853 ; AVX512VL-NEXT: vzeroupper
854 ; AVX512VL-NEXT: retq
856 ; AVX512BW-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
858 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
859 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
860 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
861 ; AVX512BW-NEXT: vzeroupper
862 ; AVX512BW-NEXT: retq
864 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
865 ; AVX512BWVL: # %bb.0:
866 ; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0
867 ; AVX512BWVL-NEXT: vzeroupper
868 ; AVX512BWVL-NEXT: retq
870 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16:
871 ; AVX512VBMIVL: # %bb.0:
872 ; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0
873 ; AVX512VBMIVL-NEXT: vzeroupper
874 ; AVX512VBMIVL-NEXT: retq
875 %truncated = trunc <4 x i64> %vec to <4 x i16>
876 %truncated.ext = zext <4 x i16> %truncated to <4 x i32>
877 %bc = bitcast <4 x i32> %truncated.ext to <8 x i16>
878 %result = shufflevector <8 x i16> %bc, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
879 ret <8 x i16> %result
882 define <8 x i16> @trunc_v4i64_to_v4i16_via_v4i32_return_v8i16(<4 x i64> %vec) nounwind {
883 ; AVX1-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
885 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
886 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
887 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
888 ; AVX1-NEXT: vzeroupper
891 ; AVX2-SLOW-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
892 ; AVX2-SLOW: # %bb.0:
893 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
894 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
895 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
896 ; AVX2-SLOW-NEXT: vzeroupper
897 ; AVX2-SLOW-NEXT: retq
899 ; AVX2-FAST-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
900 ; AVX2-FAST: # %bb.0:
901 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
902 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
903 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
904 ; AVX2-FAST-NEXT: vzeroupper
905 ; AVX2-FAST-NEXT: retq
907 ; AVX512F-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
909 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
910 ; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
911 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
912 ; AVX512F-NEXT: vzeroupper
915 ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
917 ; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0
918 ; AVX512VL-NEXT: vzeroupper
919 ; AVX512VL-NEXT: retq
921 ; AVX512BW-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
923 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
924 ; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
925 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
926 ; AVX512BW-NEXT: vzeroupper
927 ; AVX512BW-NEXT: retq
929 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
930 ; AVX512BWVL: # %bb.0:
931 ; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0
932 ; AVX512BWVL-NEXT: vzeroupper
933 ; AVX512BWVL-NEXT: retq
935 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16:
936 ; AVX512VBMIVL: # %bb.0:
937 ; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0
938 ; AVX512VBMIVL-NEXT: vzeroupper
939 ; AVX512VBMIVL-NEXT: retq
940 %truncated = trunc <4 x i64> %vec to <4 x i32>
941 %bc = bitcast <4 x i32> %truncated to <8 x i16>
942 %result = shufflevector <8 x i16> %bc, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 8, i32 undef, i32 13>
943 ret <8 x i16> %result
946 define <8 x i16> @trunc_v4i64_to_v4i16_return_v8i16(<4 x i64> %vec) nounwind {
947 ; AVX1-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
949 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
950 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
951 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
952 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
953 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
954 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
955 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
956 ; AVX1-NEXT: vzeroupper
959 ; AVX2-SLOW-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
960 ; AVX2-SLOW: # %bb.0:
961 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
962 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
963 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
964 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
965 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
966 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
967 ; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
968 ; AVX2-SLOW-NEXT: vzeroupper
969 ; AVX2-SLOW-NEXT: retq
971 ; AVX2-FAST-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
972 ; AVX2-FAST: # %bb.0:
973 ; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm1
974 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
975 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm1, %xmm1
976 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm0
977 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
978 ; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
979 ; AVX2-FAST-NEXT: vzeroupper
980 ; AVX2-FAST-NEXT: retq
982 ; AVX512F-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
984 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
985 ; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
986 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
987 ; AVX512F-NEXT: vzeroupper
990 ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
992 ; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0
993 ; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
994 ; AVX512VL-NEXT: vzeroupper
995 ; AVX512VL-NEXT: retq
997 ; AVX512BW-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
999 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
1000 ; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
1001 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1002 ; AVX512BW-NEXT: vzeroupper
1003 ; AVX512BW-NEXT: retq
1005 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
1006 ; AVX512BWVL: # %bb.0:
1007 ; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0
1008 ; AVX512BWVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1009 ; AVX512BWVL-NEXT: vzeroupper
1010 ; AVX512BWVL-NEXT: retq
1012 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_return_v8i16:
1013 ; AVX512VBMIVL: # %bb.0:
1014 ; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0
1015 ; AVX512VBMIVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1016 ; AVX512VBMIVL-NEXT: vzeroupper
1017 ; AVX512VBMIVL-NEXT: retq
1018 %truncated = trunc <4 x i64> %vec to <4 x i16>
1019 %result = shufflevector <4 x i16> %truncated, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
1020 ret <8 x i16> %result
1023 define <16 x i8> @trunc_v4i64_to_v4i8_return_v16i8(<4 x i64> %vec) nounwind {
1024 ; AVX1-LABEL: trunc_v4i64_to_v4i8_return_v16i8:
1026 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1027 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1028 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1029 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1030 ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1031 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
1032 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1033 ; AVX1-NEXT: vzeroupper
1036 ; AVX2-LABEL: trunc_v4i64_to_v4i8_return_v16i8:
1038 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1039 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1040 ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1041 ; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1042 ; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1043 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
1044 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1045 ; AVX2-NEXT: vzeroupper
1048 ; AVX512F-LABEL: trunc_v4i64_to_v4i8_return_v16i8:
1050 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
1051 ; AVX512F-NEXT: vpmovqb %zmm0, %xmm0
1052 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
1053 ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1054 ; AVX512F-NEXT: vzeroupper
1055 ; AVX512F-NEXT: retq
1057 ; AVX512VL-LABEL: trunc_v4i64_to_v4i8_return_v16i8:
1058 ; AVX512VL: # %bb.0:
1059 ; AVX512VL-NEXT: vpmovqb %ymm0, %xmm0
1060 ; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
1061 ; AVX512VL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1062 ; AVX512VL-NEXT: vzeroupper
1063 ; AVX512VL-NEXT: retq
1065 ; AVX512BW-LABEL: trunc_v4i64_to_v4i8_return_v16i8:
1066 ; AVX512BW: # %bb.0:
1067 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
1068 ; AVX512BW-NEXT: vpmovqb %zmm0, %xmm0
1069 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
1070 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1071 ; AVX512BW-NEXT: vzeroupper
1072 ; AVX512BW-NEXT: retq
1074 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i8_return_v16i8:
1075 ; AVX512BWVL: # %bb.0:
1076 ; AVX512BWVL-NEXT: vpmovqb %ymm0, %xmm0
1077 ; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
1078 ; AVX512BWVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1079 ; AVX512BWVL-NEXT: vzeroupper
1080 ; AVX512BWVL-NEXT: retq
1082 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i8_return_v16i8:
1083 ; AVX512VBMIVL: # %bb.0:
1084 ; AVX512VBMIVL-NEXT: vpmovqb %ymm0, %xmm0
1085 ; AVX512VBMIVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
1086 ; AVX512VBMIVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1087 ; AVX512VBMIVL-NEXT: vzeroupper
1088 ; AVX512VBMIVL-NEXT: retq
1089 %truncated = trunc <4 x i64> %vec to <4 x i8>
1090 %result = shufflevector <4 x i8> %truncated, <4 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 5, i32 5, i32 undef, i32 7>
1091 ret <16 x i8> %result
1094 define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
1095 ; AVX1-LABEL: shuffle_v16i16_to_v4i16:
1097 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
1098 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
1099 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = mem[0,2,2,3]
1100 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
1101 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1102 ; AVX1-NEXT: vmovq %xmm0, (%rsi)
1105 ; AVX2-SLOW-LABEL: shuffle_v16i16_to_v4i16:
1106 ; AVX2-SLOW: # %bb.0:
1107 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
1108 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
1109 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = mem[0,2,2,3]
1110 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
1111 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1112 ; AVX2-SLOW-NEXT: vmovq %xmm0, (%rsi)
1113 ; AVX2-SLOW-NEXT: retq
1115 ; AVX2-FAST-LABEL: shuffle_v16i16_to_v4i16:
1116 ; AVX2-FAST: # %bb.0:
1117 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
1118 ; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
1119 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
1120 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1121 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1122 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1123 ; AVX2-FAST-NEXT: vmovq %xmm0, (%rsi)
1124 ; AVX2-FAST-NEXT: retq
1126 ; AVX512F-LABEL: shuffle_v16i16_to_v4i16:
1128 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
1129 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
1130 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = mem[0,2,2,3]
1131 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
1132 ; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1133 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
1134 ; AVX512F-NEXT: retq
1136 ; AVX512VL-LABEL: shuffle_v16i16_to_v4i16:
1137 ; AVX512VL: # %bb.0:
1138 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
1139 ; AVX512VL-NEXT: vmovdqa 16(%rdi), %xmm1
1140 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
1141 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1142 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1143 ; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1144 ; AVX512VL-NEXT: vmovq %xmm0, (%rsi)
1145 ; AVX512VL-NEXT: retq
1147 ; AVX512BW-LABEL: shuffle_v16i16_to_v4i16:
1148 ; AVX512BW: # %bb.0:
1149 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1150 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
1151 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
1152 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1153 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1154 ; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1155 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
1156 ; AVX512BW-NEXT: retq
1158 ; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16:
1159 ; AVX512BWVL: # %bb.0:
1160 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
1161 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm1 = [0,4,8,12,4,5,12,13]
1162 ; AVX512BWVL-NEXT: vpermi2w 16(%rdi), %xmm0, %xmm1
1163 ; AVX512BWVL-NEXT: vmovq %xmm1, (%rsi)
1164 ; AVX512BWVL-NEXT: retq
1166 ; AVX512VBMIVL-LABEL: shuffle_v16i16_to_v4i16:
1167 ; AVX512VBMIVL: # %bb.0:
1168 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %xmm0
1169 ; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} xmm1 = [0,4,8,12,4,5,12,13]
1170 ; AVX512VBMIVL-NEXT: vpermi2w 16(%rdi), %xmm0, %xmm1
1171 ; AVX512VBMIVL-NEXT: vmovq %xmm1, (%rsi)
1172 ; AVX512VBMIVL-NEXT: retq
1173 %vec = load <16 x i16>, <16 x i16>* %L
1174 %strided.vec = shufflevector <16 x i16> %vec, <16 x i16> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
1175 store <4 x i16> %strided.vec, <4 x i16>* %S
1179 define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind {
1180 ; AVX1-LABEL: trunc_v4i64_to_v4i16:
1182 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
1183 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
1184 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = mem[0,2,2,3]
1185 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
1186 ; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1187 ; AVX1-NEXT: vmovq %xmm0, (%rsi)
1190 ; AVX2-SLOW-LABEL: trunc_v4i64_to_v4i16:
1191 ; AVX2-SLOW: # %bb.0:
1192 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,2,2,3]
1193 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
1194 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = mem[0,2,2,3]
1195 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
1196 ; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1197 ; AVX2-SLOW-NEXT: vmovq %xmm0, (%rsi)
1198 ; AVX2-SLOW-NEXT: retq
1200 ; AVX2-FAST-LABEL: trunc_v4i64_to_v4i16:
1201 ; AVX2-FAST: # %bb.0:
1202 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
1203 ; AVX2-FAST-NEXT: vmovdqa 16(%rdi), %xmm1
1204 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
1205 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1206 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1207 ; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1208 ; AVX2-FAST-NEXT: vmovq %xmm0, (%rsi)
1209 ; AVX2-FAST-NEXT: retq
1211 ; AVX512F-LABEL: trunc_v4i64_to_v4i16:
1213 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
1214 ; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
1215 ; AVX512F-NEXT: vmovq %xmm0, (%rsi)
1216 ; AVX512F-NEXT: vzeroupper
1217 ; AVX512F-NEXT: retq
1219 ; AVX512VL-LABEL: trunc_v4i64_to_v4i16:
1220 ; AVX512VL: # %bb.0:
1221 ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
1222 ; AVX512VL-NEXT: vpmovqw %ymm0, (%rsi)
1223 ; AVX512VL-NEXT: vzeroupper
1224 ; AVX512VL-NEXT: retq
1226 ; AVX512BW-LABEL: trunc_v4i64_to_v4i16:
1227 ; AVX512BW: # %bb.0:
1228 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
1229 ; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
1230 ; AVX512BW-NEXT: vmovq %xmm0, (%rsi)
1231 ; AVX512BW-NEXT: vzeroupper
1232 ; AVX512BW-NEXT: retq
1234 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16:
1235 ; AVX512BWVL: # %bb.0:
1236 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
1237 ; AVX512BWVL-NEXT: vpmovqw %ymm0, (%rsi)
1238 ; AVX512BWVL-NEXT: vzeroupper
1239 ; AVX512BWVL-NEXT: retq
1241 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16:
1242 ; AVX512VBMIVL: # %bb.0:
1243 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0
1244 ; AVX512VBMIVL-NEXT: vpmovqw %ymm0, (%rsi)
1245 ; AVX512VBMIVL-NEXT: vzeroupper
1246 ; AVX512VBMIVL-NEXT: retq
1247 %vec = load <16 x i16>, <16 x i16>* %L
1248 %bc = bitcast <16 x i16> %vec to <4 x i64>
1249 %strided.vec = trunc <4 x i64> %bc to <4 x i16>
1250 store <4 x i16> %strided.vec, <4 x i16>* %S
1254 define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
1255 ; AVX-LABEL: shuffle_v32i8_to_v4i8:
1257 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1258 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
1259 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1260 ; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1261 ; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1262 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1263 ; AVX-NEXT: vmovd %xmm0, (%rsi)
1266 ; AVX512F-LABEL: shuffle_v32i8_to_v4i8:
1268 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1269 ; AVX512F-NEXT: vmovdqa 16(%rdi), %xmm1
1270 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1271 ; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1272 ; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1273 ; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1274 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
1275 ; AVX512F-NEXT: retq
1277 ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8:
1278 ; AVX512VL: # %bb.0:
1279 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
1280 ; AVX512VL-NEXT: vmovdqa 16(%rdi), %xmm1
1281 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1282 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1283 ; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1284 ; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1285 ; AVX512VL-NEXT: vmovd %xmm0, (%rsi)
1286 ; AVX512VL-NEXT: retq
1288 ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8:
1289 ; AVX512BW: # %bb.0:
1290 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1291 ; AVX512BW-NEXT: vmovdqa 16(%rdi), %xmm1
1292 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1293 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1294 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1295 ; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1296 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
1297 ; AVX512BW-NEXT: retq
1299 ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8:
1300 ; AVX512BWVL: # %bb.0:
1301 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %xmm0
1302 ; AVX512BWVL-NEXT: vmovdqa 16(%rdi), %xmm1
1303 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1304 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1305 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1306 ; AVX512BWVL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1307 ; AVX512BWVL-NEXT: vmovd %xmm0, (%rsi)
1308 ; AVX512BWVL-NEXT: retq
1310 ; AVX512VBMIVL-LABEL: shuffle_v32i8_to_v4i8:
1311 ; AVX512VBMIVL: # %bb.0:
1312 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %xmm0
1313 ; AVX512VBMIVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [403703808,403703808,403703808,403703808]
1314 ; AVX512VBMIVL-NEXT: vpermi2b 16(%rdi), %xmm0, %xmm1
1315 ; AVX512VBMIVL-NEXT: vmovd %xmm1, (%rsi)
1316 ; AVX512VBMIVL-NEXT: retq
1317 %vec = load <32 x i8>, <32 x i8>* %L
1318 %strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
1319 store <4 x i8> %strided.vec, <4 x i8>* %S
1323 define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind {
1324 ; AVX-LABEL: trunc_v4i64_to_v4i8:
1326 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1327 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
1328 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
1329 ; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1330 ; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
1331 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1332 ; AVX-NEXT: vmovd %xmm0, (%rsi)
1335 ; AVX512F-LABEL: trunc_v4i64_to_v4i8:
1337 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
1338 ; AVX512F-NEXT: vpmovqb %zmm0, %xmm0
1339 ; AVX512F-NEXT: vmovd %xmm0, (%rsi)
1340 ; AVX512F-NEXT: vzeroupper
1341 ; AVX512F-NEXT: retq
1343 ; AVX512VL-LABEL: trunc_v4i64_to_v4i8:
1344 ; AVX512VL: # %bb.0:
1345 ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
1346 ; AVX512VL-NEXT: vpmovqb %ymm0, (%rsi)
1347 ; AVX512VL-NEXT: vzeroupper
1348 ; AVX512VL-NEXT: retq
1350 ; AVX512BW-LABEL: trunc_v4i64_to_v4i8:
1351 ; AVX512BW: # %bb.0:
1352 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
1353 ; AVX512BW-NEXT: vpmovqb %zmm0, %xmm0
1354 ; AVX512BW-NEXT: vmovd %xmm0, (%rsi)
1355 ; AVX512BW-NEXT: vzeroupper
1356 ; AVX512BW-NEXT: retq
1358 ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i8:
1359 ; AVX512BWVL: # %bb.0:
1360 ; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0
1361 ; AVX512BWVL-NEXT: vpmovqb %ymm0, (%rsi)
1362 ; AVX512BWVL-NEXT: vzeroupper
1363 ; AVX512BWVL-NEXT: retq
1365 ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i8:
1366 ; AVX512VBMIVL: # %bb.0:
1367 ; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0
1368 ; AVX512VBMIVL-NEXT: vpmovqb %ymm0, (%rsi)
1369 ; AVX512VBMIVL-NEXT: vzeroupper
1370 ; AVX512VBMIVL-NEXT: retq
1371 %vec = load <32 x i8>, <32 x i8>* %L
1372 %bc = bitcast <32 x i8> %vec to <4 x i64>
1373 %strided.vec = trunc <4 x i64> %bc to <4 x i8>
1374 store <4 x i8> %strided.vec, <4 x i8>* %S
1378 ; In this case not all elements are collected from the same source vector, so
1379 ; the resulting BUILD_VECTOR should not be combined to a truncate.
1380 define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
1381 ; AVX1-LABEL: negative:
1383 ; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[u,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
1384 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
1385 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u],zero,zero,zero,zero,zero,zero,zero,xmm0[0,2,4,6,8,10,12,14]
1386 ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
1387 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
1388 ; AVX1-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
1389 ; AVX1-NEXT: vzeroupper
1392 ; AVX2-LABEL: negative:
1394 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
1395 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
1396 ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
1397 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
1398 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1399 ; AVX2-NEXT: vzeroupper
1402 ; AVX512F-LABEL: negative:
1404 ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
1405 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
1406 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
1407 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
1408 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1409 ; AVX512F-NEXT: vzeroupper
1410 ; AVX512F-NEXT: retq
1412 ; AVX512VL-LABEL: negative:
1413 ; AVX512VL: # %bb.0:
1414 ; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
1415 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
1416 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
1417 ; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
1418 ; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1419 ; AVX512VL-NEXT: vzeroupper
1420 ; AVX512VL-NEXT: retq
1422 ; AVX512BW-LABEL: negative:
1423 ; AVX512BW: # %bb.0:
1424 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
1425 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
1426 ; AVX512BW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
1427 ; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
1428 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1429 ; AVX512BW-NEXT: vzeroupper
1430 ; AVX512BW-NEXT: retq
1432 ; AVX512BWVL-LABEL: negative:
1433 ; AVX512BWVL: # %bb.0:
1434 ; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14,u,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
1435 ; AVX512BWVL-NEXT: movl $65537, %eax # imm = 0x10001
1436 ; AVX512BWVL-NEXT: kmovd %eax, %k1
1437 ; AVX512BWVL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
1438 ; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
1439 ; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1440 ; AVX512BWVL-NEXT: vzeroupper
1441 ; AVX512BWVL-NEXT: retq
1443 ; AVX512VBMIVL-LABEL: negative:
1444 ; AVX512VBMIVL: # %bb.0:
1445 ; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = [32,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,48,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30]
1446 ; AVX512VBMIVL-NEXT: vpermt2b %ymm1, %ymm2, %ymm0
1447 ; AVX512VBMIVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1448 ; AVX512VBMIVL-NEXT: vzeroupper
1449 ; AVX512VBMIVL-NEXT: retq
1450 %strided.vec = shufflevector <32 x i8> %v, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
1451 %w0 = extractelement <32 x i8> %w, i32 0
1452 %merged = insertelement <16 x i8> %strided.vec, i8 %w0, i32 0
1453 ret <16 x i8> %merged