1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i16_stride2_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
19 ; SSE-LABEL: load_i16_stride2_vf2:
21 ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
22 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
23 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
24 ; SSE-NEXT: movd %xmm1, (%rsi)
25 ; SSE-NEXT: movd %xmm0, (%rdx)
28 ; AVX-LABEL: load_i16_stride2_vf2:
30 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
31 ; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
32 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
33 ; AVX-NEXT: vmovd %xmm1, (%rsi)
34 ; AVX-NEXT: vmovd %xmm0, (%rdx)
36 %wide.vec = load <4 x i16>, ptr %in.vec, align 64
37 %strided.vec0 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
38 %strided.vec1 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> <i32 1, i32 3>
39 store <2 x i16> %strided.vec0, ptr %out.vec0, align 64
40 store <2 x i16> %strided.vec1, ptr %out.vec1, align 64
44 define void @load_i16_stride2_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
45 ; SSE-LABEL: load_i16_stride2_vf4:
47 ; SSE-NEXT: movdqa (%rdi), %xmm0
48 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
49 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
50 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
51 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
52 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
53 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
54 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
55 ; SSE-NEXT: movq %xmm1, (%rsi)
56 ; SSE-NEXT: movq %xmm0, (%rdx)
59 ; AVX1-LABEL: load_i16_stride2_vf4:
61 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
62 ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
63 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u]
64 ; AVX1-NEXT: vmovq %xmm1, (%rsi)
65 ; AVX1-NEXT: vmovq %xmm0, (%rdx)
68 ; AVX512-LABEL: load_i16_stride2_vf4:
70 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
71 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u]
72 ; AVX512-NEXT: vpmovdw %xmm0, (%rsi)
73 ; AVX512-NEXT: vmovq %xmm1, (%rdx)
75 %wide.vec = load <8 x i16>, ptr %in.vec, align 64
76 %strided.vec0 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
77 %strided.vec1 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
78 store <4 x i16> %strided.vec0, ptr %out.vec0, align 64
79 store <4 x i16> %strided.vec1, ptr %out.vec1, align 64
83 define void @load_i16_stride2_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
84 ; SSE-LABEL: load_i16_stride2_vf8:
86 ; SSE-NEXT: movdqa (%rdi), %xmm0
87 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
88 ; SSE-NEXT: movdqa %xmm1, %xmm2
89 ; SSE-NEXT: pslld $16, %xmm2
90 ; SSE-NEXT: psrad $16, %xmm2
91 ; SSE-NEXT: movdqa %xmm0, %xmm3
92 ; SSE-NEXT: pslld $16, %xmm3
93 ; SSE-NEXT: psrad $16, %xmm3
94 ; SSE-NEXT: packssdw %xmm2, %xmm3
95 ; SSE-NEXT: psrad $16, %xmm1
96 ; SSE-NEXT: psrad $16, %xmm0
97 ; SSE-NEXT: packssdw %xmm1, %xmm0
98 ; SSE-NEXT: movdqa %xmm3, (%rsi)
99 ; SSE-NEXT: movdqa %xmm0, (%rdx)
102 ; AVX1-LABEL: load_i16_stride2_vf8:
104 ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
105 ; AVX1-NEXT: vmovdqa (%rdi), %xmm1
106 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2
107 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
108 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
109 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
110 ; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
111 ; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
112 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
113 ; AVX1-NEXT: vmovdqa %xmm0, (%rsi)
114 ; AVX1-NEXT: vmovdqa %xmm1, (%rdx)
117 ; AVX512-LABEL: load_i16_stride2_vf8:
119 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
120 ; AVX512-NEXT: vpsrld $16, %ymm0, %ymm1
121 ; AVX512-NEXT: vpmovdw %ymm0, (%rsi)
122 ; AVX512-NEXT: vpmovdw %ymm1, (%rdx)
123 ; AVX512-NEXT: vzeroupper
125 %wide.vec = load <16 x i16>, ptr %in.vec, align 64
126 %strided.vec0 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
127 %strided.vec1 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
128 store <8 x i16> %strided.vec0, ptr %out.vec0, align 64
129 store <8 x i16> %strided.vec1, ptr %out.vec1, align 64
133 define void @load_i16_stride2_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
134 ; SSE-LABEL: load_i16_stride2_vf16:
136 ; SSE-NEXT: movdqa (%rdi), %xmm0
137 ; SSE-NEXT: movdqa 16(%rdi), %xmm2
138 ; SSE-NEXT: movdqa 32(%rdi), %xmm1
139 ; SSE-NEXT: movdqa 48(%rdi), %xmm3
140 ; SSE-NEXT: movdqa %xmm3, %xmm4
141 ; SSE-NEXT: pslld $16, %xmm4
142 ; SSE-NEXT: psrad $16, %xmm4
143 ; SSE-NEXT: movdqa %xmm1, %xmm5
144 ; SSE-NEXT: pslld $16, %xmm5
145 ; SSE-NEXT: psrad $16, %xmm5
146 ; SSE-NEXT: packssdw %xmm4, %xmm5
147 ; SSE-NEXT: movdqa %xmm2, %xmm4
148 ; SSE-NEXT: pslld $16, %xmm4
149 ; SSE-NEXT: psrad $16, %xmm4
150 ; SSE-NEXT: movdqa %xmm0, %xmm6
151 ; SSE-NEXT: pslld $16, %xmm6
152 ; SSE-NEXT: psrad $16, %xmm6
153 ; SSE-NEXT: packssdw %xmm4, %xmm6
154 ; SSE-NEXT: psrad $16, %xmm3
155 ; SSE-NEXT: psrad $16, %xmm1
156 ; SSE-NEXT: packssdw %xmm3, %xmm1
157 ; SSE-NEXT: psrad $16, %xmm2
158 ; SSE-NEXT: psrad $16, %xmm0
159 ; SSE-NEXT: packssdw %xmm2, %xmm0
160 ; SSE-NEXT: movdqa %xmm6, (%rsi)
161 ; SSE-NEXT: movdqa %xmm5, 16(%rsi)
162 ; SSE-NEXT: movdqa %xmm0, (%rdx)
163 ; SSE-NEXT: movdqa %xmm1, 16(%rdx)
166 ; AVX1-ONLY-LABEL: load_i16_stride2_vf16:
167 ; AVX1-ONLY: # %bb.0:
168 ; AVX1-ONLY-NEXT: vpxor %xmm0, %xmm0, %xmm0
169 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1
170 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2
171 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm3
172 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm4
173 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm4[0],xmm0[1],xmm4[2],xmm0[3],xmm4[4],xmm0[5],xmm4[6],xmm0[7]
174 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4],xmm0[5],xmm3[6],xmm0[7]
175 ; AVX1-ONLY-NEXT: vpackusdw %xmm5, %xmm6, %xmm5
176 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
177 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
178 ; AVX1-ONLY-NEXT: vpackusdw %xmm6, %xmm0, %xmm0
179 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm4, %xmm4
180 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm3, %xmm3
181 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
182 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2
183 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm1
184 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
185 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsi)
186 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, 16(%rsi)
187 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rdx)
188 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 16(%rdx)
189 ; AVX1-ONLY-NEXT: retq
191 ; AVX2-SLOW-LABEL: load_i16_stride2_vf16:
192 ; AVX2-SLOW: # %bb.0:
193 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
194 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
195 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
196 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
197 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm3 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
198 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
199 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,2],ymm2[0,2],ymm3[4,6],ymm2[4,6]
200 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
201 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
202 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
203 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
204 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
205 ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rsi)
206 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx)
207 ; AVX2-SLOW-NEXT: vzeroupper
208 ; AVX2-SLOW-NEXT: retq
210 ; AVX2-FAST-LABEL: load_i16_stride2_vf16:
211 ; AVX2-FAST: # %bb.0:
212 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
213 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1
214 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
215 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm1, %ymm3
216 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm0, %ymm2
217 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
218 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
219 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
220 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
221 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
222 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
223 ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rsi)
224 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx)
225 ; AVX2-FAST-NEXT: vzeroupper
226 ; AVX2-FAST-NEXT: retq
228 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride2_vf16:
229 ; AVX2-FAST-PERLANE: # %bb.0:
230 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
231 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1
232 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
233 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm1, %ymm3
234 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm2
235 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
236 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
237 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31]
238 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u]
239 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
240 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
241 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rsi)
242 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rdx)
243 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
244 ; AVX2-FAST-PERLANE-NEXT: retq
246 ; AVX512-LABEL: load_i16_stride2_vf16:
248 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
249 ; AVX512-NEXT: vpsrld $16, %zmm0, %zmm1
250 ; AVX512-NEXT: vpmovdw %zmm0, (%rsi)
251 ; AVX512-NEXT: vpmovdw %zmm1, (%rdx)
252 ; AVX512-NEXT: vzeroupper
254 %wide.vec = load <32 x i16>, ptr %in.vec, align 64
255 %strided.vec0 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
256 %strided.vec1 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
257 store <16 x i16> %strided.vec0, ptr %out.vec0, align 64
258 store <16 x i16> %strided.vec1, ptr %out.vec1, align 64
262 define void @load_i16_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
263 ; SSE-LABEL: load_i16_stride2_vf32:
265 ; SSE-NEXT: movdqa 64(%rdi), %xmm0
266 ; SSE-NEXT: movdqa 80(%rdi), %xmm4
267 ; SSE-NEXT: movdqa 96(%rdi), %xmm1
268 ; SSE-NEXT: movdqa 112(%rdi), %xmm6
269 ; SSE-NEXT: movdqa (%rdi), %xmm2
270 ; SSE-NEXT: movdqa 16(%rdi), %xmm7
271 ; SSE-NEXT: movdqa 32(%rdi), %xmm3
272 ; SSE-NEXT: movdqa 48(%rdi), %xmm9
273 ; SSE-NEXT: movdqa %xmm9, %xmm8
274 ; SSE-NEXT: pslld $16, %xmm8
275 ; SSE-NEXT: psrad $16, %xmm8
276 ; SSE-NEXT: movdqa %xmm3, %xmm5
277 ; SSE-NEXT: pslld $16, %xmm5
278 ; SSE-NEXT: psrad $16, %xmm5
279 ; SSE-NEXT: packssdw %xmm8, %xmm5
280 ; SSE-NEXT: movdqa %xmm7, %xmm10
281 ; SSE-NEXT: pslld $16, %xmm10
282 ; SSE-NEXT: psrad $16, %xmm10
283 ; SSE-NEXT: movdqa %xmm2, %xmm8
284 ; SSE-NEXT: pslld $16, %xmm8
285 ; SSE-NEXT: psrad $16, %xmm8
286 ; SSE-NEXT: packssdw %xmm10, %xmm8
287 ; SSE-NEXT: movdqa %xmm6, %xmm11
288 ; SSE-NEXT: pslld $16, %xmm11
289 ; SSE-NEXT: psrad $16, %xmm11
290 ; SSE-NEXT: movdqa %xmm1, %xmm10
291 ; SSE-NEXT: pslld $16, %xmm10
292 ; SSE-NEXT: psrad $16, %xmm10
293 ; SSE-NEXT: packssdw %xmm11, %xmm10
294 ; SSE-NEXT: movdqa %xmm4, %xmm11
295 ; SSE-NEXT: pslld $16, %xmm11
296 ; SSE-NEXT: psrad $16, %xmm11
297 ; SSE-NEXT: movdqa %xmm0, %xmm12
298 ; SSE-NEXT: pslld $16, %xmm12
299 ; SSE-NEXT: psrad $16, %xmm12
300 ; SSE-NEXT: packssdw %xmm11, %xmm12
301 ; SSE-NEXT: psrad $16, %xmm9
302 ; SSE-NEXT: psrad $16, %xmm3
303 ; SSE-NEXT: packssdw %xmm9, %xmm3
304 ; SSE-NEXT: psrad $16, %xmm7
305 ; SSE-NEXT: psrad $16, %xmm2
306 ; SSE-NEXT: packssdw %xmm7, %xmm2
307 ; SSE-NEXT: psrad $16, %xmm6
308 ; SSE-NEXT: psrad $16, %xmm1
309 ; SSE-NEXT: packssdw %xmm6, %xmm1
310 ; SSE-NEXT: psrad $16, %xmm4
311 ; SSE-NEXT: psrad $16, %xmm0
312 ; SSE-NEXT: packssdw %xmm4, %xmm0
313 ; SSE-NEXT: movdqa %xmm12, 32(%rsi)
314 ; SSE-NEXT: movdqa %xmm10, 48(%rsi)
315 ; SSE-NEXT: movdqa %xmm8, (%rsi)
316 ; SSE-NEXT: movdqa %xmm5, 16(%rsi)
317 ; SSE-NEXT: movdqa %xmm0, 32(%rdx)
318 ; SSE-NEXT: movdqa %xmm1, 48(%rdx)
319 ; SSE-NEXT: movdqa %xmm2, (%rdx)
320 ; SSE-NEXT: movdqa %xmm3, 16(%rdx)
323 ; AVX1-ONLY-LABEL: load_i16_stride2_vf32:
324 ; AVX1-ONLY: # %bb.0:
325 ; AVX1-ONLY-NEXT: vpxor %xmm0, %xmm0, %xmm0
326 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1
327 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2
328 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm3
329 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm4
330 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm4[0],xmm0[1],xmm4[2],xmm0[3],xmm4[4],xmm0[5],xmm4[6],xmm0[7]
331 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4],xmm0[5],xmm3[6],xmm0[7]
332 ; AVX1-ONLY-NEXT: vpackusdw %xmm5, %xmm6, %xmm5
333 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
334 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
335 ; AVX1-ONLY-NEXT: vpackusdw %xmm6, %xmm7, %xmm6
336 ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm7
337 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm7[0],xmm0[1],xmm7[2],xmm0[3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
338 ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm9
339 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm9[0],xmm0[1],xmm9[2],xmm0[3],xmm9[4],xmm0[5],xmm9[6],xmm0[7]
340 ; AVX1-ONLY-NEXT: vpackusdw %xmm8, %xmm10, %xmm8
341 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm10
342 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm10[0],xmm0[1],xmm10[2],xmm0[3],xmm10[4],xmm0[5],xmm10[6],xmm0[7]
343 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm12
344 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1],xmm12[2],xmm0[3],xmm12[4],xmm0[5],xmm12[6],xmm0[7]
345 ; AVX1-ONLY-NEXT: vpackusdw %xmm11, %xmm0, %xmm0
346 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm4, %xmm4
347 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm3, %xmm3
348 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
349 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2
350 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm1
351 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
352 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm7, %xmm2
353 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm9, %xmm4
354 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm4, %xmm2
355 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm10, %xmm4
356 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm12, %xmm7
357 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm7, %xmm4
358 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 32(%rsi)
359 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, 48(%rsi)
360 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, (%rsi)
361 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, 16(%rsi)
362 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, 32(%rdx)
363 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 48(%rdx)
364 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rdx)
365 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 16(%rdx)
366 ; AVX1-ONLY-NEXT: retq
368 ; AVX2-SLOW-LABEL: load_i16_stride2_vf32:
369 ; AVX2-SLOW: # %bb.0:
370 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
371 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
372 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm2
373 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm3
374 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm4 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
375 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
376 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
377 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
378 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,2],ymm4[0,2],ymm5[4,6],ymm4[4,6]
379 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
380 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
381 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
382 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm6 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
383 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
384 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6]
385 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3]
386 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
387 ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm3, %ymm3
388 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
389 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm2, %ymm2
390 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
391 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
392 ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm1, %ymm1
393 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm0, %ymm0
394 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
395 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
396 ; AVX2-SLOW-NEXT: vmovaps %ymm5, (%rsi)
397 ; AVX2-SLOW-NEXT: vmovaps %ymm4, 32(%rsi)
398 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx)
399 ; AVX2-SLOW-NEXT: vmovdqa %ymm2, 32(%rdx)
400 ; AVX2-SLOW-NEXT: vzeroupper
401 ; AVX2-SLOW-NEXT: retq
403 ; AVX2-FAST-LABEL: load_i16_stride2_vf32:
404 ; AVX2-FAST: # %bb.0:
405 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
406 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1
407 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm2
408 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm3
409 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
410 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm3, %ymm5
411 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm2, %ymm6
412 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6]
413 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3]
414 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm1, %ymm6
415 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm0, %ymm4
416 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6]
417 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
418 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
419 ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm3, %ymm3
420 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
421 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm2, %ymm2
422 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
423 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
424 ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm1, %ymm1
425 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm0, %ymm0
426 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
427 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
428 ; AVX2-FAST-NEXT: vmovaps %ymm4, (%rsi)
429 ; AVX2-FAST-NEXT: vmovaps %ymm5, 32(%rsi)
430 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx)
431 ; AVX2-FAST-NEXT: vmovdqa %ymm2, 32(%rdx)
432 ; AVX2-FAST-NEXT: vzeroupper
433 ; AVX2-FAST-NEXT: retq
435 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride2_vf32:
436 ; AVX2-FAST-PERLANE: # %bb.0:
437 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
438 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1
439 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm2
440 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm3
441 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm4 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
442 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm3, %ymm5
443 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm2, %ymm6
444 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6]
445 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3]
446 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm1, %ymm6
447 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm0, %ymm4
448 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6]
449 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
450 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
451 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm3, %ymm3
452 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
453 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm2, %ymm2
454 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
455 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
456 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm1, %ymm1
457 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm0, %ymm0
458 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
459 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
460 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, (%rsi)
461 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 32(%rsi)
462 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rdx)
463 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 32(%rdx)
464 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
465 ; AVX2-FAST-PERLANE-NEXT: retq
467 ; AVX512F-LABEL: load_i16_stride2_vf32:
469 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
470 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm1
471 ; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm2
472 ; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm3
473 ; AVX512F-NEXT: vpmovdw %zmm1, 32(%rsi)
474 ; AVX512F-NEXT: vpmovdw %zmm0, (%rsi)
475 ; AVX512F-NEXT: vpmovdw %zmm3, 32(%rdx)
476 ; AVX512F-NEXT: vpmovdw %zmm2, (%rdx)
477 ; AVX512F-NEXT: vzeroupper
480 ; AVX512BW-LABEL: load_i16_stride2_vf32:
482 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
483 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
484 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62]
485 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
486 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
487 ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm3
488 ; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rsi)
489 ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rdx)
490 ; AVX512BW-NEXT: vzeroupper
491 ; AVX512BW-NEXT: retq
492 %wide.vec = load <64 x i16>, ptr %in.vec, align 64
493 %strided.vec0 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
494 %strided.vec1 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
495 store <32 x i16> %strided.vec0, ptr %out.vec0, align 64
496 store <32 x i16> %strided.vec1, ptr %out.vec1, align 64
500 define void @load_i16_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
501 ; SSE-LABEL: load_i16_stride2_vf64:
503 ; SSE-NEXT: subq $40, %rsp
504 ; SSE-NEXT: movdqa 96(%rdi), %xmm13
505 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
506 ; SSE-NEXT: movdqa 112(%rdi), %xmm3
507 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
508 ; SSE-NEXT: movdqa 128(%rdi), %xmm11
509 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
510 ; SSE-NEXT: movdqa 144(%rdi), %xmm2
511 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
512 ; SSE-NEXT: movdqa 160(%rdi), %xmm10
513 ; SSE-NEXT: movdqa %xmm10, (%rsp) # 16-byte Spill
514 ; SSE-NEXT: movdqa 176(%rdi), %xmm4
515 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
516 ; SSE-NEXT: movdqa (%rdi), %xmm9
517 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
518 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
519 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
520 ; SSE-NEXT: movdqa 32(%rdi), %xmm12
521 ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
522 ; SSE-NEXT: movdqa 48(%rdi), %xmm14
523 ; SSE-NEXT: movdqa %xmm14, %xmm0
524 ; SSE-NEXT: pslld $16, %xmm0
525 ; SSE-NEXT: psrad $16, %xmm0
526 ; SSE-NEXT: pslld $16, %xmm12
527 ; SSE-NEXT: psrad $16, %xmm12
528 ; SSE-NEXT: packssdw %xmm0, %xmm12
529 ; SSE-NEXT: movdqa %xmm4, %xmm0
530 ; SSE-NEXT: pslld $16, %xmm0
531 ; SSE-NEXT: psrad $16, %xmm0
532 ; SSE-NEXT: pslld $16, %xmm10
533 ; SSE-NEXT: psrad $16, %xmm10
534 ; SSE-NEXT: packssdw %xmm0, %xmm10
535 ; SSE-NEXT: movdqa %xmm1, %xmm0
536 ; SSE-NEXT: pslld $16, %xmm0
537 ; SSE-NEXT: psrad $16, %xmm0
538 ; SSE-NEXT: pslld $16, %xmm9
539 ; SSE-NEXT: psrad $16, %xmm9
540 ; SSE-NEXT: packssdw %xmm0, %xmm9
541 ; SSE-NEXT: movdqa %xmm2, %xmm0
542 ; SSE-NEXT: pslld $16, %xmm0
543 ; SSE-NEXT: psrad $16, %xmm0
544 ; SSE-NEXT: pslld $16, %xmm11
545 ; SSE-NEXT: psrad $16, %xmm11
546 ; SSE-NEXT: packssdw %xmm0, %xmm11
547 ; SSE-NEXT: movdqa %xmm3, %xmm0
548 ; SSE-NEXT: pslld $16, %xmm0
549 ; SSE-NEXT: psrad $16, %xmm0
550 ; SSE-NEXT: pslld $16, %xmm13
551 ; SSE-NEXT: psrad $16, %xmm13
552 ; SSE-NEXT: packssdw %xmm0, %xmm13
553 ; SSE-NEXT: movdqa 240(%rdi), %xmm0
554 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
555 ; SSE-NEXT: pslld $16, %xmm0
556 ; SSE-NEXT: psrad $16, %xmm0
557 ; SSE-NEXT: movdqa 224(%rdi), %xmm7
558 ; SSE-NEXT: movdqa %xmm7, %xmm15
559 ; SSE-NEXT: pslld $16, %xmm15
560 ; SSE-NEXT: psrad $16, %xmm15
561 ; SSE-NEXT: packssdw %xmm0, %xmm15
562 ; SSE-NEXT: movdqa 80(%rdi), %xmm3
563 ; SSE-NEXT: movdqa %xmm3, %xmm1
564 ; SSE-NEXT: pslld $16, %xmm1
565 ; SSE-NEXT: psrad $16, %xmm1
566 ; SSE-NEXT: movdqa 64(%rdi), %xmm5
567 ; SSE-NEXT: movdqa %xmm5, %xmm4
568 ; SSE-NEXT: pslld $16, %xmm4
569 ; SSE-NEXT: psrad $16, %xmm4
570 ; SSE-NEXT: packssdw %xmm1, %xmm4
571 ; SSE-NEXT: movdqa 208(%rdi), %xmm8
572 ; SSE-NEXT: movdqa %xmm8, %xmm6
573 ; SSE-NEXT: pslld $16, %xmm6
574 ; SSE-NEXT: psrad $16, %xmm6
575 ; SSE-NEXT: movdqa 192(%rdi), %xmm2
576 ; SSE-NEXT: movdqa %xmm2, %xmm1
577 ; SSE-NEXT: pslld $16, %xmm1
578 ; SSE-NEXT: psrad $16, %xmm1
579 ; SSE-NEXT: packssdw %xmm6, %xmm1
580 ; SSE-NEXT: psrad $16, %xmm14
581 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
582 ; SSE-NEXT: psrad $16, %xmm0
583 ; SSE-NEXT: packssdw %xmm14, %xmm0
584 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
585 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
586 ; SSE-NEXT: psrad $16, %xmm0
587 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
588 ; SSE-NEXT: psrad $16, %xmm6
589 ; SSE-NEXT: packssdw %xmm0, %xmm6
590 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
591 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
592 ; SSE-NEXT: psrad $16, %xmm0
593 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
594 ; SSE-NEXT: psrad $16, %xmm14
595 ; SSE-NEXT: packssdw %xmm0, %xmm14
596 ; SSE-NEXT: psrad $16, %xmm3
597 ; SSE-NEXT: psrad $16, %xmm5
598 ; SSE-NEXT: packssdw %xmm3, %xmm5
599 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
600 ; SSE-NEXT: psrad $16, %xmm0
601 ; SSE-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload
602 ; SSE-NEXT: psrad $16, %xmm6
603 ; SSE-NEXT: packssdw %xmm0, %xmm6
604 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
605 ; SSE-NEXT: psrad $16, %xmm0
606 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
607 ; SSE-NEXT: psrad $16, %xmm3
608 ; SSE-NEXT: packssdw %xmm0, %xmm3
609 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
610 ; SSE-NEXT: psrad $16, %xmm0
611 ; SSE-NEXT: psrad $16, %xmm7
612 ; SSE-NEXT: packssdw %xmm0, %xmm7
613 ; SSE-NEXT: psrad $16, %xmm8
614 ; SSE-NEXT: psrad $16, %xmm2
615 ; SSE-NEXT: packssdw %xmm8, %xmm2
616 ; SSE-NEXT: movdqa %xmm1, 96(%rsi)
617 ; SSE-NEXT: movdqa %xmm4, 32(%rsi)
618 ; SSE-NEXT: movdqa %xmm15, 112(%rsi)
619 ; SSE-NEXT: movdqa %xmm13, 48(%rsi)
620 ; SSE-NEXT: movdqa %xmm11, 64(%rsi)
621 ; SSE-NEXT: movdqa %xmm9, (%rsi)
622 ; SSE-NEXT: movdqa %xmm10, 80(%rsi)
623 ; SSE-NEXT: movdqa %xmm12, 16(%rsi)
624 ; SSE-NEXT: movdqa %xmm2, 96(%rdx)
625 ; SSE-NEXT: movdqa %xmm7, 112(%rdx)
626 ; SSE-NEXT: movdqa %xmm3, 64(%rdx)
627 ; SSE-NEXT: movdqa %xmm6, 80(%rdx)
628 ; SSE-NEXT: movdqa %xmm5, 32(%rdx)
629 ; SSE-NEXT: movdqa %xmm14, 48(%rdx)
630 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
631 ; SSE-NEXT: movaps %xmm0, (%rdx)
632 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
633 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
634 ; SSE-NEXT: addq $40, %rsp
637 ; AVX1-ONLY-LABEL: load_i16_stride2_vf64:
638 ; AVX1-ONLY: # %bb.0:
639 ; AVX1-ONLY-NEXT: subq $24, %rsp
640 ; AVX1-ONLY-NEXT: vpxor %xmm11, %xmm11, %xmm11
641 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm0
642 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
643 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2],xmm11[3],xmm0[4],xmm11[5],xmm0[6],xmm11[7]
644 ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm9
645 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0],xmm11[1],xmm9[2],xmm11[3],xmm9[4],xmm11[5],xmm9[6],xmm11[7]
646 ; AVX1-ONLY-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
647 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
648 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm6
649 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
650 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm8
651 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
652 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm7
653 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm10
654 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm10[0],xmm11[1],xmm10[2],xmm11[3],xmm10[4],xmm11[5],xmm10[6],xmm11[7]
655 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0],xmm11[1],xmm7[2],xmm11[3],xmm7[4],xmm11[5],xmm7[6],xmm11[7]
656 ; AVX1-ONLY-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
657 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
658 ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm5
659 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0],xmm11[1],xmm5[2],xmm11[3],xmm5[4],xmm11[5],xmm5[6],xmm11[7]
660 ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm4
661 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm11[1],xmm4[2],xmm11[3],xmm4[4],xmm11[5],xmm4[6],xmm11[7]
662 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm3, %xmm0
663 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
664 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0],xmm11[1],xmm8[2],xmm11[3],xmm8[4],xmm11[5],xmm8[6],xmm11[7]
665 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm11[1],xmm6[2],xmm11[3],xmm6[4],xmm11[5],xmm6[6],xmm11[7]
666 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm3, %xmm0
667 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
668 ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm12
669 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4],xmm11[5],xmm12[6],xmm11[7]
670 ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm13
671 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm13[0],xmm11[1],xmm13[2],xmm11[3],xmm13[4],xmm11[5],xmm13[6],xmm11[7]
672 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm8, %xmm0
673 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
674 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm15
675 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0],xmm11[1],xmm15[2],xmm11[3],xmm15[4],xmm11[5],xmm15[6],xmm11[7]
676 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm6
677 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm11[1],xmm6[2],xmm11[3],xmm6[4],xmm11[5],xmm6[6],xmm11[7]
678 ; AVX1-ONLY-NEXT: vpackusdw %xmm14, %xmm3, %xmm0
679 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
680 ; AVX1-ONLY-NEXT: vmovdqa 240(%rdi), %xmm3
681 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm11[1],xmm3[2],xmm11[3],xmm3[4],xmm11[5],xmm3[6],xmm11[7]
682 ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm0
683 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm0[0],xmm11[1],xmm0[2],xmm11[3],xmm0[4],xmm11[5],xmm0[6],xmm11[7]
684 ; AVX1-ONLY-NEXT: vpackusdw %xmm1, %xmm8, %xmm14
685 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm8
686 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0],xmm11[1],xmm8[2],xmm11[3],xmm8[4],xmm11[5],xmm8[6],xmm11[7]
687 ; AVX1-ONLY-NEXT: vmovdqa 192(%rdi), %xmm2
688 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm2[0],xmm11[1],xmm2[2],xmm11[3],xmm2[4],xmm11[5],xmm2[6],xmm11[7]
689 ; AVX1-ONLY-NEXT: vpackusdw %xmm1, %xmm11, %xmm1
690 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm3, %xmm3
691 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm0, %xmm0
692 ; AVX1-ONLY-NEXT: vpackusdw %xmm3, %xmm0, %xmm11
693 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
694 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm0, %xmm0
695 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm9, %xmm3
696 ; AVX1-ONLY-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
697 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm3
698 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm4, %xmm4
699 ; AVX1-ONLY-NEXT: vpackusdw %xmm3, %xmm4, %xmm3
700 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm10, %xmm4
701 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm7, %xmm5
702 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm5, %xmm4
703 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
704 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm5
705 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
706 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm7, %xmm7
707 ; AVX1-ONLY-NEXT: vpackusdw %xmm5, %xmm7, %xmm5
708 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm8, %xmm7
709 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2
710 ; AVX1-ONLY-NEXT: vpackusdw %xmm7, %xmm2, %xmm2
711 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm12, %xmm7
712 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm13, %xmm8
713 ; AVX1-ONLY-NEXT: vpackusdw %xmm7, %xmm8, %xmm7
714 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm15, %xmm8
715 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm6, %xmm6
716 ; AVX1-ONLY-NEXT: vpackusdw %xmm8, %xmm6, %xmm6
717 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, 96(%rsi)
718 ; AVX1-ONLY-NEXT: vmovdqa %xmm14, 112(%rsi)
719 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
720 ; AVX1-ONLY-NEXT: vmovaps %xmm1, 32(%rsi)
721 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
722 ; AVX1-ONLY-NEXT: vmovaps %xmm1, 48(%rsi)
723 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
724 ; AVX1-ONLY-NEXT: vmovaps %xmm1, (%rsi)
725 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
726 ; AVX1-ONLY-NEXT: vmovaps %xmm1, 64(%rsi)
727 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
728 ; AVX1-ONLY-NEXT: vmovaps %xmm1, 16(%rsi)
729 ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
730 ; AVX1-ONLY-NEXT: vmovaps %xmm1, 80(%rsi)
731 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, 32(%rdx)
732 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, 48(%rdx)
733 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 96(%rdx)
734 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, (%rdx)
735 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, 16(%rdx)
736 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 64(%rdx)
737 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 80(%rdx)
738 ; AVX1-ONLY-NEXT: vmovdqa %xmm11, 112(%rdx)
739 ; AVX1-ONLY-NEXT: addq $24, %rsp
740 ; AVX1-ONLY-NEXT: retq
742 ; AVX2-SLOW-LABEL: load_i16_stride2_vf64:
743 ; AVX2-SLOW: # %bb.0:
744 ; AVX2-SLOW-NEXT: vmovdqa 128(%rdi), %ymm0
745 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdi), %ymm1
746 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdi), %ymm4
747 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdi), %ymm6
748 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm3
749 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm5
750 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm8
751 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm9
752 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm2 = ymm9[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
753 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
754 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm7 = ymm8[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
755 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
756 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm7[0,2],ymm2[0,2],ymm7[4,6],ymm2[4,6]
757 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
758 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm7 = ymm6[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
759 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
760 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm10 = ymm4[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
761 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
762 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm10[0,2],ymm7[0,2],ymm10[4,6],ymm7[4,6]
763 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,1,3]
764 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm10 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
765 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
766 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm11 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
767 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
768 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,2],ymm10[0,2],ymm11[4,6],ymm10[4,6]
769 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,1,3]
770 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm11 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
771 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
772 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm12 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
773 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15]
774 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[0,2],ymm12[4,6],ymm11[4,6]
775 ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3]
776 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
777 ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm9, %ymm9
778 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
779 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm8, %ymm8
780 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
781 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,1,3]
782 ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm6, %ymm6
783 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm4, %ymm4
784 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5],ymm6[6,7]
785 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
786 ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm5, %ymm5
787 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm3, %ymm3
788 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5],ymm5[6,7]
789 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
790 ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm1, %ymm1
791 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm0, %ymm0
792 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
793 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
794 ; AVX2-SLOW-NEXT: vmovaps %ymm11, 64(%rsi)
795 ; AVX2-SLOW-NEXT: vmovaps %ymm10, (%rsi)
796 ; AVX2-SLOW-NEXT: vmovaps %ymm7, 96(%rsi)
797 ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%rsi)
798 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 64(%rdx)
799 ; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%rdx)
800 ; AVX2-SLOW-NEXT: vmovdqa %ymm4, 96(%rdx)
801 ; AVX2-SLOW-NEXT: vmovdqa %ymm8, 32(%rdx)
802 ; AVX2-SLOW-NEXT: vzeroupper
803 ; AVX2-SLOW-NEXT: retq
805 ; AVX2-FAST-LABEL: load_i16_stride2_vf64:
806 ; AVX2-FAST: # %bb.0:
807 ; AVX2-FAST-NEXT: vmovdqa 128(%rdi), %ymm0
808 ; AVX2-FAST-NEXT: vmovdqa 160(%rdi), %ymm1
809 ; AVX2-FAST-NEXT: vmovdqa 192(%rdi), %ymm3
810 ; AVX2-FAST-NEXT: vmovdqa 224(%rdi), %ymm4
811 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm5
812 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm6
813 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm7
814 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm8
815 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
816 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm8, %ymm2
817 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm7, %ymm10
818 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,2],ymm2[0,2],ymm10[4,6],ymm2[4,6]
819 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
820 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm4, %ymm10
821 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm3, %ymm11
822 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,2],ymm10[0,2],ymm11[4,6],ymm10[4,6]
823 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,1,3]
824 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm6, %ymm11
825 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm5, %ymm12
826 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[0,2],ymm12[4,6],ymm11[4,6]
827 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3]
828 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm1, %ymm12
829 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm0, %ymm9
830 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,2],ymm12[0,2],ymm9[4,6],ymm12[4,6]
831 ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3]
832 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
833 ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm8, %ymm8
834 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm13 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
835 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm7, %ymm7
836 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
837 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,1,3]
838 ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm4, %ymm4
839 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm3, %ymm3
840 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
841 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
842 ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm6, %ymm4
843 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm5, %ymm5
844 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
845 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
846 ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm1, %ymm1
847 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm0, %ymm0
848 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
849 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
850 ; AVX2-FAST-NEXT: vmovaps %ymm9, 64(%rsi)
851 ; AVX2-FAST-NEXT: vmovaps %ymm11, (%rsi)
852 ; AVX2-FAST-NEXT: vmovaps %ymm10, 96(%rsi)
853 ; AVX2-FAST-NEXT: vmovaps %ymm2, 32(%rsi)
854 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 64(%rdx)
855 ; AVX2-FAST-NEXT: vmovdqa %ymm4, (%rdx)
856 ; AVX2-FAST-NEXT: vmovdqa %ymm3, 96(%rdx)
857 ; AVX2-FAST-NEXT: vmovdqa %ymm7, 32(%rdx)
858 ; AVX2-FAST-NEXT: vzeroupper
859 ; AVX2-FAST-NEXT: retq
861 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride2_vf64:
862 ; AVX2-FAST-PERLANE: # %bb.0:
863 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdi), %ymm0
864 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdi), %ymm1
865 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdi), %ymm3
866 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdi), %ymm4
867 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm5
868 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm6
869 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm7
870 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm8
871 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = <0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u>
872 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm8, %ymm2
873 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm7, %ymm10
874 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,2],ymm2[0,2],ymm10[4,6],ymm2[4,6]
875 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
876 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm4, %ymm10
877 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm3, %ymm11
878 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,2],ymm10[0,2],ymm11[4,6],ymm10[4,6]
879 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,1,3]
880 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm6, %ymm11
881 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm5, %ymm12
882 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[0,2],ymm12[4,6],ymm11[4,6]
883 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3]
884 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm1, %ymm12
885 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm0, %ymm9
886 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,2],ymm12[0,2],ymm9[4,6],ymm12[4,6]
887 ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3]
888 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
889 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm8, %ymm8
890 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm13 = <2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
891 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm7, %ymm7
892 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
893 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,1,3]
894 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm4, %ymm4
895 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm3, %ymm3
896 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
897 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
898 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm6, %ymm4
899 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm5, %ymm5
900 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
901 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
902 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm1, %ymm1
903 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm0, %ymm0
904 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
905 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
906 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 64(%rsi)
907 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, (%rsi)
908 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 96(%rsi)
909 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%rsi)
910 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 64(%rdx)
911 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, (%rdx)
912 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 96(%rdx)
913 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm7, 32(%rdx)
914 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
915 ; AVX2-FAST-PERLANE-NEXT: retq
917 ; AVX512F-LABEL: load_i16_stride2_vf64:
919 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
920 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm1
921 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm2
922 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm3
923 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm4
924 ; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm1
925 ; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm5
926 ; AVX512F-NEXT: vpsrld $16, %zmm3, %zmm6
927 ; AVX512F-NEXT: vpsrld $16, %zmm2, %zmm7
928 ; AVX512F-NEXT: vpmovdw %zmm0, (%rsi)
929 ; AVX512F-NEXT: vmovdqa %ymm4, 32(%rsi)
930 ; AVX512F-NEXT: vpmovdw %zmm2, 64(%rsi)
931 ; AVX512F-NEXT: vpmovdw %zmm3, 96(%rsi)
932 ; AVX512F-NEXT: vpmovdw %zmm7, 64(%rdx)
933 ; AVX512F-NEXT: vpmovdw %zmm6, 96(%rdx)
934 ; AVX512F-NEXT: vpmovdw %zmm5, (%rdx)
935 ; AVX512F-NEXT: vpmovdw %zmm1, 32(%rdx)
936 ; AVX512F-NEXT: vzeroupper
939 ; AVX512BW-LABEL: load_i16_stride2_vf64:
941 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
942 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
943 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2
944 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3
945 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62]
946 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5
947 ; AVX512BW-NEXT: vpermt2w %zmm1, %zmm4, %zmm5
948 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm4
949 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63]
950 ; AVX512BW-NEXT: vpermt2w %zmm1, %zmm6, %zmm0
951 ; AVX512BW-NEXT: vpermt2w %zmm3, %zmm6, %zmm2
952 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 64(%rsi)
953 ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rsi)
954 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%rdx)
955 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx)
956 ; AVX512BW-NEXT: vzeroupper
957 ; AVX512BW-NEXT: retq
958 %wide.vec = load <128 x i16>, ptr %in.vec, align 64
959 %strided.vec0 = shufflevector <128 x i16> %wide.vec, <128 x i16> poison, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
960 %strided.vec1 = shufflevector <128 x i16> %wide.vec, <128 x i16> poison, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
961 store <64 x i16> %strided.vec0, ptr %out.vec0, align 64
962 store <64 x i16> %strided.vec1, ptr %out.vec1, align 64
965 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
968 ; AVX512-FAST: {{.*}}
969 ; AVX512-SLOW: {{.*}}
970 ; AVX512BW-FAST: {{.*}}
971 ; AVX512BW-ONLY-FAST: {{.*}}
972 ; AVX512BW-ONLY-SLOW: {{.*}}
973 ; AVX512BW-SLOW: {{.*}}
974 ; AVX512DQ-FAST: {{.*}}
975 ; AVX512DQ-SLOW: {{.*}}
976 ; AVX512DQBW-FAST: {{.*}}
977 ; AVX512DQBW-SLOW: {{.*}}
978 ; AVX512F-FAST: {{.*}}
979 ; AVX512F-ONLY-FAST: {{.*}}
980 ; AVX512F-ONLY-SLOW: {{.*}}
981 ; AVX512F-SLOW: {{.*}}