1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i32_stride2_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
19 ; SSE-LABEL: load_i32_stride2_vf2:
21 ; SSE-NEXT: movdqa (%rdi), %xmm0
22 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
23 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
24 ; SSE-NEXT: movq %xmm1, (%rsi)
25 ; SSE-NEXT: movq %xmm0, (%rdx)
28 ; AVX-LABEL: load_i32_stride2_vf2:
30 ; AVX-NEXT: vmovaps (%rdi), %xmm0
31 ; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,2,2,3]
32 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3,2,3]
33 ; AVX-NEXT: vmovlps %xmm1, (%rsi)
34 ; AVX-NEXT: vmovlps %xmm0, (%rdx)
36 %wide.vec = load <4 x i32>, ptr %in.vec, align 64
37 %strided.vec0 = shufflevector <4 x i32> %wide.vec, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
38 %strided.vec1 = shufflevector <4 x i32> %wide.vec, <4 x i32> poison, <2 x i32> <i32 1, i32 3>
39 store <2 x i32> %strided.vec0, ptr %out.vec0, align 64
40 store <2 x i32> %strided.vec1, ptr %out.vec1, align 64
44 define void @load_i32_stride2_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
45 ; SSE-LABEL: load_i32_stride2_vf4:
47 ; SSE-NEXT: movaps (%rdi), %xmm0
48 ; SSE-NEXT: movaps 16(%rdi), %xmm1
49 ; SSE-NEXT: movaps %xmm0, %xmm2
50 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
51 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
52 ; SSE-NEXT: movaps %xmm2, (%rsi)
53 ; SSE-NEXT: movaps %xmm0, (%rdx)
56 ; AVX1-LABEL: load_i32_stride2_vf4:
58 ; AVX1-NEXT: vmovaps (%rdi), %xmm0
59 ; AVX1-NEXT: vmovaps 16(%rdi), %xmm1
60 ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,2],xmm1[0,2]
61 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
62 ; AVX1-NEXT: vmovaps %xmm2, (%rsi)
63 ; AVX1-NEXT: vmovaps %xmm0, (%rdx)
66 ; AVX512-LABEL: load_i32_stride2_vf4:
68 ; AVX512-NEXT: vmovdqa (%rdi), %ymm0
69 ; AVX512-NEXT: vmovaps (%rdi), %xmm1
70 ; AVX512-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],mem[1,3]
71 ; AVX512-NEXT: vpmovqd %ymm0, (%rsi)
72 ; AVX512-NEXT: vmovaps %xmm1, (%rdx)
73 ; AVX512-NEXT: vzeroupper
75 %wide.vec = load <8 x i32>, ptr %in.vec, align 64
76 %strided.vec0 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
77 %strided.vec1 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
78 store <4 x i32> %strided.vec0, ptr %out.vec0, align 64
79 store <4 x i32> %strided.vec1, ptr %out.vec1, align 64
83 define void @load_i32_stride2_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
84 ; SSE-LABEL: load_i32_stride2_vf8:
86 ; SSE-NEXT: movaps (%rdi), %xmm0
87 ; SSE-NEXT: movaps 16(%rdi), %xmm1
88 ; SSE-NEXT: movaps 32(%rdi), %xmm2
89 ; SSE-NEXT: movaps 48(%rdi), %xmm3
90 ; SSE-NEXT: movaps %xmm2, %xmm4
91 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
92 ; SSE-NEXT: movaps %xmm0, %xmm5
93 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm1[0,2]
94 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
95 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
96 ; SSE-NEXT: movaps %xmm5, (%rsi)
97 ; SSE-NEXT: movaps %xmm4, 16(%rsi)
98 ; SSE-NEXT: movaps %xmm0, (%rdx)
99 ; SSE-NEXT: movaps %xmm2, 16(%rdx)
102 ; AVX1-ONLY-LABEL: load_i32_stride2_vf8:
103 ; AVX1-ONLY: # %bb.0:
104 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm0
105 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
106 ; AVX1-ONLY-NEXT: vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
107 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
108 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
109 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rsi)
110 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
111 ; AVX1-ONLY-NEXT: vzeroupper
112 ; AVX1-ONLY-NEXT: retq
114 ; AVX2-ONLY-LABEL: load_i32_stride2_vf8:
115 ; AVX2-ONLY: # %bb.0:
116 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
117 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
118 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
119 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
120 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
121 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
122 ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rsi)
123 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rdx)
124 ; AVX2-ONLY-NEXT: vzeroupper
125 ; AVX2-ONLY-NEXT: retq
127 ; AVX512-SLOW-LABEL: load_i32_stride2_vf8:
128 ; AVX512-SLOW: # %bb.0:
129 ; AVX512-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0
130 ; AVX512-SLOW-NEXT: vmovaps (%rdi), %ymm1
131 ; AVX512-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,3],mem[1,3],ymm1[5,7],mem[5,7]
132 ; AVX512-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
133 ; AVX512-SLOW-NEXT: vpmovqd %zmm0, (%rsi)
134 ; AVX512-SLOW-NEXT: vmovaps %ymm1, (%rdx)
135 ; AVX512-SLOW-NEXT: vzeroupper
136 ; AVX512-SLOW-NEXT: retq
138 ; AVX512-FAST-LABEL: load_i32_stride2_vf8:
139 ; AVX512-FAST: # %bb.0:
140 ; AVX512-FAST-NEXT: vmovdqa64 (%rdi), %zmm0
141 ; AVX512-FAST-NEXT: vmovdqa (%rdi), %ymm1
142 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15]
143 ; AVX512-FAST-NEXT: vpermi2d 32(%rdi), %ymm1, %ymm2
144 ; AVX512-FAST-NEXT: vpmovqd %zmm0, (%rsi)
145 ; AVX512-FAST-NEXT: vmovdqa %ymm2, (%rdx)
146 ; AVX512-FAST-NEXT: vzeroupper
147 ; AVX512-FAST-NEXT: retq
148 %wide.vec = load <16 x i32>, ptr %in.vec, align 64
149 %strided.vec0 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
150 %strided.vec1 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
151 store <8 x i32> %strided.vec0, ptr %out.vec0, align 64
152 store <8 x i32> %strided.vec1, ptr %out.vec1, align 64
156 define void @load_i32_stride2_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
157 ; SSE-LABEL: load_i32_stride2_vf16:
159 ; SSE-NEXT: movaps (%rdi), %xmm0
160 ; SSE-NEXT: movaps 16(%rdi), %xmm1
161 ; SSE-NEXT: movaps 32(%rdi), %xmm2
162 ; SSE-NEXT: movaps 48(%rdi), %xmm3
163 ; SSE-NEXT: movaps 80(%rdi), %xmm4
164 ; SSE-NEXT: movaps 64(%rdi), %xmm5
165 ; SSE-NEXT: movaps 112(%rdi), %xmm6
166 ; SSE-NEXT: movaps 96(%rdi), %xmm7
167 ; SSE-NEXT: movaps %xmm7, %xmm8
168 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm6[0,2]
169 ; SSE-NEXT: movaps %xmm5, %xmm9
170 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm4[0,2]
171 ; SSE-NEXT: movaps %xmm2, %xmm10
172 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm3[0,2]
173 ; SSE-NEXT: movaps %xmm0, %xmm11
174 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm1[0,2]
175 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm6[1,3]
176 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm4[1,3]
177 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
178 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
179 ; SSE-NEXT: movaps %xmm9, 32(%rsi)
180 ; SSE-NEXT: movaps %xmm8, 48(%rsi)
181 ; SSE-NEXT: movaps %xmm11, (%rsi)
182 ; SSE-NEXT: movaps %xmm10, 16(%rsi)
183 ; SSE-NEXT: movaps %xmm5, 32(%rdx)
184 ; SSE-NEXT: movaps %xmm7, 48(%rdx)
185 ; SSE-NEXT: movaps %xmm0, (%rdx)
186 ; SSE-NEXT: movaps %xmm2, 16(%rdx)
189 ; AVX1-ONLY-LABEL: load_i32_stride2_vf16:
190 ; AVX1-ONLY: # %bb.0:
191 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm0
192 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
193 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
194 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
195 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
196 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],mem[2,3]
197 ; AVX1-ONLY-NEXT: vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
198 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm0[0,2],ymm4[0,2],ymm0[4,6],ymm4[4,6]
199 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,3],ymm2[1,3],ymm1[5,7],ymm2[5,7]
200 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm4[1,3],ymm0[5,7],ymm4[5,7]
201 ; AVX1-ONLY-NEXT: vmovaps %ymm5, (%rsi)
202 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 32(%rsi)
203 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
204 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rdx)
205 ; AVX1-ONLY-NEXT: vzeroupper
206 ; AVX1-ONLY-NEXT: retq
208 ; AVX2-ONLY-LABEL: load_i32_stride2_vf16:
209 ; AVX2-ONLY: # %bb.0:
210 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
211 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
212 ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm2
213 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm3
214 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
215 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
216 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
217 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3]
218 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm3[1,3],ymm2[5,7],ymm3[5,7]
219 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
220 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
221 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
222 ; AVX2-ONLY-NEXT: vmovaps %ymm5, (%rsi)
223 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 32(%rsi)
224 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rdx)
225 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rdx)
226 ; AVX2-ONLY-NEXT: vzeroupper
227 ; AVX2-ONLY-NEXT: retq
229 ; AVX512-LABEL: load_i32_stride2_vf16:
231 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
232 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
233 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
234 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
235 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
236 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
237 ; AVX512-NEXT: vmovdqa64 %zmm2, (%rsi)
238 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rdx)
239 ; AVX512-NEXT: vzeroupper
241 %wide.vec = load <32 x i32>, ptr %in.vec, align 64
242 %strided.vec0 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
243 %strided.vec1 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
244 store <16 x i32> %strided.vec0, ptr %out.vec0, align 64
245 store <16 x i32> %strided.vec1, ptr %out.vec1, align 64
249 define void @load_i32_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
250 ; SSE-LABEL: load_i32_stride2_vf32:
252 ; SSE-NEXT: movaps (%rdi), %xmm1
253 ; SSE-NEXT: movaps 16(%rdi), %xmm8
254 ; SSE-NEXT: movaps 32(%rdi), %xmm0
255 ; SSE-NEXT: movaps 208(%rdi), %xmm11
256 ; SSE-NEXT: movaps 192(%rdi), %xmm2
257 ; SSE-NEXT: movaps 240(%rdi), %xmm10
258 ; SSE-NEXT: movaps 224(%rdi), %xmm4
259 ; SSE-NEXT: movaps 144(%rdi), %xmm14
260 ; SSE-NEXT: movaps 128(%rdi), %xmm3
261 ; SSE-NEXT: movaps 176(%rdi), %xmm12
262 ; SSE-NEXT: movaps 160(%rdi), %xmm6
263 ; SSE-NEXT: movaps 80(%rdi), %xmm13
264 ; SSE-NEXT: movaps 64(%rdi), %xmm5
265 ; SSE-NEXT: movaps 112(%rdi), %xmm15
266 ; SSE-NEXT: movaps 96(%rdi), %xmm7
267 ; SSE-NEXT: movaps %xmm5, %xmm9
268 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm13[0,2]
269 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm13[1,3]
270 ; SSE-NEXT: movaps %xmm7, %xmm13
271 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[0,2],xmm15[0,2]
272 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm15[1,3]
273 ; SSE-NEXT: movaps %xmm3, %xmm15
274 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2],xmm14[0,2]
275 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm14[1,3]
276 ; SSE-NEXT: movaps %xmm6, %xmm14
277 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,2],xmm12[0,2]
278 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,3],xmm12[1,3]
279 ; SSE-NEXT: movaps %xmm2, %xmm12
280 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm11[0,2]
281 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm11[1,3]
282 ; SSE-NEXT: movaps %xmm4, %xmm11
283 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm10[0,2]
284 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm10[1,3]
285 ; SSE-NEXT: movaps %xmm1, %xmm10
286 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm8[0,2]
287 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm8[1,3]
288 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
289 ; SSE-NEXT: movaps 48(%rdi), %xmm8
290 ; SSE-NEXT: movaps %xmm0, %xmm1
291 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm8[0,2]
292 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm8[1,3]
293 ; SSE-NEXT: movaps %xmm12, 96(%rsi)
294 ; SSE-NEXT: movaps %xmm11, 112(%rsi)
295 ; SSE-NEXT: movaps %xmm15, 64(%rsi)
296 ; SSE-NEXT: movaps %xmm14, 80(%rsi)
297 ; SSE-NEXT: movaps %xmm9, 32(%rsi)
298 ; SSE-NEXT: movaps %xmm13, 48(%rsi)
299 ; SSE-NEXT: movaps %xmm10, (%rsi)
300 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
301 ; SSE-NEXT: movaps %xmm4, 112(%rdx)
302 ; SSE-NEXT: movaps %xmm2, 96(%rdx)
303 ; SSE-NEXT: movaps %xmm6, 80(%rdx)
304 ; SSE-NEXT: movaps %xmm3, 64(%rdx)
305 ; SSE-NEXT: movaps %xmm7, 48(%rdx)
306 ; SSE-NEXT: movaps %xmm5, 32(%rdx)
307 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
308 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
309 ; SSE-NEXT: movaps %xmm0, (%rdx)
312 ; AVX1-ONLY-LABEL: load_i32_stride2_vf32:
313 ; AVX1-ONLY: # %bb.0:
314 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm0
315 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
316 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm2
317 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm3
318 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],mem[2,3]
319 ; AVX1-ONLY-NEXT: vinsertf128 $1, 224(%rdi), %ymm3, %ymm3
320 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm3[0,2],ymm4[0,2],ymm3[4,6],ymm4[4,6]
321 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm1[2,3],mem[2,3]
322 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
323 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm1[0,2],ymm6[0,2],ymm1[4,6],ymm6[4,6]
324 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],mem[2,3]
325 ; AVX1-ONLY-NEXT: vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
326 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm0[0,2],ymm8[0,2],ymm0[4,6],ymm8[4,6]
327 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm2[2,3],mem[2,3]
328 ; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm2, %ymm2
329 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm2[0,2],ymm10[0,2],ymm2[4,6],ymm10[4,6]
330 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,3],ymm6[1,3],ymm1[5,7],ymm6[5,7]
331 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,3],ymm4[1,3],ymm3[5,7],ymm4[5,7]
332 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm8[1,3],ymm0[5,7],ymm8[5,7]
333 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm10[1,3],ymm2[5,7],ymm10[5,7]
334 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 64(%rsi)
335 ; AVX1-ONLY-NEXT: vmovaps %ymm9, (%rsi)
336 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 32(%rsi)
337 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 96(%rsi)
338 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 64(%rdx)
339 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
340 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rdx)
341 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rdx)
342 ; AVX1-ONLY-NEXT: vzeroupper
343 ; AVX1-ONLY-NEXT: retq
345 ; AVX2-ONLY-LABEL: load_i32_stride2_vf32:
346 ; AVX2-ONLY: # %bb.0:
347 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
348 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
349 ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm2
350 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm3
351 ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm4
352 ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm5
353 ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm6
354 ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm7
355 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm7[0,2],ymm6[0,2],ymm7[4,6],ymm6[4,6]
356 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,1,3]
357 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm5[0,2],ymm4[0,2],ymm5[4,6],ymm4[4,6]
358 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3]
359 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6]
360 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,1,3]
361 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
362 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3]
363 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm7[1,3],ymm6[1,3],ymm7[5,7],ymm6[5,7]
364 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,2,1,3]
365 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm5[1,3],ymm4[1,3],ymm5[5,7],ymm4[5,7]
366 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3]
367 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,3],ymm3[1,3],ymm2[5,7],ymm3[5,7]
368 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3]
369 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
370 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
371 ; AVX2-ONLY-NEXT: vmovaps %ymm9, 64(%rsi)
372 ; AVX2-ONLY-NEXT: vmovaps %ymm11, (%rsi)
373 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 96(%rsi)
374 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 32(%rsi)
375 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
376 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rdx)
377 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rdx)
378 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rdx)
379 ; AVX2-ONLY-NEXT: vzeroupper
380 ; AVX2-ONLY-NEXT: retq
382 ; AVX512-LABEL: load_i32_stride2_vf32:
384 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
385 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
386 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
387 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3
388 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
389 ; AVX512-NEXT: vmovdqa64 %zmm0, %zmm5
390 ; AVX512-NEXT: vpermt2d %zmm1, %zmm4, %zmm5
391 ; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
392 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
393 ; AVX512-NEXT: vpermt2d %zmm1, %zmm6, %zmm0
394 ; AVX512-NEXT: vpermt2d %zmm3, %zmm6, %zmm2
395 ; AVX512-NEXT: vmovdqa64 %zmm4, 64(%rsi)
396 ; AVX512-NEXT: vmovdqa64 %zmm5, (%rsi)
397 ; AVX512-NEXT: vmovdqa64 %zmm2, 64(%rdx)
398 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rdx)
399 ; AVX512-NEXT: vzeroupper
401 %wide.vec = load <64 x i32>, ptr %in.vec, align 64
402 %strided.vec0 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
403 %strided.vec1 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
404 store <32 x i32> %strided.vec0, ptr %out.vec0, align 64
405 store <32 x i32> %strided.vec1, ptr %out.vec1, align 64
409 define void @load_i32_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind {
410 ; SSE-LABEL: load_i32_stride2_vf64:
412 ; SSE-NEXT: subq $152, %rsp
413 ; SSE-NEXT: movaps 208(%rdi), %xmm11
414 ; SSE-NEXT: movaps 192(%rdi), %xmm6
415 ; SSE-NEXT: movaps 80(%rdi), %xmm1
416 ; SSE-NEXT: movaps 64(%rdi), %xmm5
417 ; SSE-NEXT: movaps 240(%rdi), %xmm14
418 ; SSE-NEXT: movaps 224(%rdi), %xmm8
419 ; SSE-NEXT: movaps 112(%rdi), %xmm3
420 ; SSE-NEXT: movaps 96(%rdi), %xmm7
421 ; SSE-NEXT: movaps 272(%rdi), %xmm12
422 ; SSE-NEXT: movaps 144(%rdi), %xmm2
423 ; SSE-NEXT: movaps 128(%rdi), %xmm9
424 ; SSE-NEXT: movaps 304(%rdi), %xmm0
425 ; SSE-NEXT: movaps 288(%rdi), %xmm13
426 ; SSE-NEXT: movaps 176(%rdi), %xmm4
427 ; SSE-NEXT: movaps 160(%rdi), %xmm10
428 ; SSE-NEXT: movaps %xmm7, %xmm15
429 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2],xmm3[0,2]
430 ; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
431 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm3[1,3]
432 ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
433 ; SSE-NEXT: movaps %xmm5, %xmm3
434 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm1[0,2]
435 ; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
436 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm1[1,3]
437 ; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
438 ; SSE-NEXT: movaps %xmm10, %xmm1
439 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
440 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
441 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,3],xmm4[1,3]
442 ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
443 ; SSE-NEXT: movaps %xmm9, %xmm1
444 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
445 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
446 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,3],xmm2[1,3]
447 ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
448 ; SSE-NEXT: movaps %xmm8, %xmm1
449 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm14[0,2]
450 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
451 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,3],xmm14[1,3]
452 ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
453 ; SSE-NEXT: movaps %xmm6, %xmm1
454 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm11[0,2]
455 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
456 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,3],xmm11[1,3]
457 ; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
458 ; SSE-NEXT: movaps %xmm13, %xmm1
459 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
460 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
461 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,3],xmm0[1,3]
462 ; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
463 ; SSE-NEXT: movaps 256(%rdi), %xmm0
464 ; SSE-NEXT: movaps %xmm0, %xmm1
465 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm12[0,2]
466 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
467 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm12[1,3]
468 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
469 ; SSE-NEXT: movaps 368(%rdi), %xmm0
470 ; SSE-NEXT: movaps 352(%rdi), %xmm15
471 ; SSE-NEXT: movaps %xmm15, %xmm1
472 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
473 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
474 ; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,3],xmm0[1,3]
475 ; SSE-NEXT: movaps 336(%rdi), %xmm0
476 ; SSE-NEXT: movaps 320(%rdi), %xmm13
477 ; SSE-NEXT: movaps %xmm13, %xmm11
478 ; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,2]
479 ; SSE-NEXT: shufps {{.*#+}} xmm13 = xmm13[1,3],xmm0[1,3]
480 ; SSE-NEXT: movaps 432(%rdi), %xmm0
481 ; SSE-NEXT: movaps 416(%rdi), %xmm12
482 ; SSE-NEXT: movaps %xmm12, %xmm14
483 ; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,2],xmm0[0,2]
484 ; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[1,3],xmm0[1,3]
485 ; SSE-NEXT: movaps 400(%rdi), %xmm0
486 ; SSE-NEXT: movaps 384(%rdi), %xmm9
487 ; SSE-NEXT: movaps %xmm9, %xmm10
488 ; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[0,2]
489 ; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,3],xmm0[1,3]
490 ; SSE-NEXT: movaps 496(%rdi), %xmm0
491 ; SSE-NEXT: movaps 480(%rdi), %xmm7
492 ; SSE-NEXT: movaps %xmm7, %xmm6
493 ; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[0,2]
494 ; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm0[1,3]
495 ; SSE-NEXT: movaps 464(%rdi), %xmm1
496 ; SSE-NEXT: movaps 448(%rdi), %xmm3
497 ; SSE-NEXT: movaps %xmm3, %xmm2
498 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
499 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm1[1,3]
500 ; SSE-NEXT: movaps 32(%rdi), %xmm8
501 ; SSE-NEXT: movaps 48(%rdi), %xmm1
502 ; SSE-NEXT: movaps %xmm8, %xmm5
503 ; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm1[0,2]
504 ; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,3],xmm1[1,3]
505 ; SSE-NEXT: movaps (%rdi), %xmm4
506 ; SSE-NEXT: movaps 16(%rdi), %xmm0
507 ; SSE-NEXT: movaps %xmm4, %xmm1
508 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
509 ; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm0[1,3]
510 ; SSE-NEXT: movaps %xmm2, 224(%rsi)
511 ; SSE-NEXT: movaps %xmm11, 160(%rsi)
512 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
513 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
514 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
515 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
516 ; SSE-NEXT: movaps %xmm6, 240(%rsi)
517 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
518 ; SSE-NEXT: movaps %xmm0, 176(%rsi)
519 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
520 ; SSE-NEXT: movaps %xmm0, 112(%rsi)
521 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
522 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
523 ; SSE-NEXT: movaps %xmm10, 192(%rsi)
524 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
525 ; SSE-NEXT: movaps %xmm0, 128(%rsi)
526 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
527 ; SSE-NEXT: movaps %xmm0, 64(%rsi)
528 ; SSE-NEXT: movaps %xmm1, (%rsi)
529 ; SSE-NEXT: movaps %xmm14, 208(%rsi)
530 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
531 ; SSE-NEXT: movaps %xmm0, 144(%rsi)
532 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
533 ; SSE-NEXT: movaps %xmm0, 80(%rsi)
534 ; SSE-NEXT: movaps %xmm5, 16(%rsi)
535 ; SSE-NEXT: movaps %xmm3, 224(%rdx)
536 ; SSE-NEXT: movaps %xmm7, 240(%rdx)
537 ; SSE-NEXT: movaps %xmm9, 192(%rdx)
538 ; SSE-NEXT: movaps %xmm12, 208(%rdx)
539 ; SSE-NEXT: movaps %xmm13, 160(%rdx)
540 ; SSE-NEXT: movaps %xmm15, 176(%rdx)
541 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
542 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
543 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
544 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
545 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
546 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
547 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
548 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
549 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
550 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
551 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
552 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
553 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
554 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
555 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
556 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
557 ; SSE-NEXT: movaps %xmm4, (%rdx)
558 ; SSE-NEXT: movaps %xmm8, 16(%rdx)
559 ; SSE-NEXT: addq $152, %rsp
562 ; AVX1-ONLY-LABEL: load_i32_stride2_vf64:
563 ; AVX1-ONLY: # %bb.0:
564 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm4
565 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %ymm6
566 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %ymm5
567 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %ymm1
568 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm2
569 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %ymm9
570 ; AVX1-ONLY-NEXT: vmovaps 192(%rdi), %ymm3
571 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %ymm0
572 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],mem[2,3]
573 ; AVX1-ONLY-NEXT: vinsertf128 $1, 480(%rdi), %ymm0, %ymm10
574 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,2],ymm8[0,2],ymm10[4,6],ymm8[4,6]
575 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
576 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3],mem[2,3]
577 ; AVX1-ONLY-NEXT: vinsertf128 $1, 96(%rdi), %ymm2, %ymm11
578 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm11[0,2],ymm7[0,2],ymm11[4,6],ymm7[4,6]
579 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm3[2,3],mem[2,3]
580 ; AVX1-ONLY-NEXT: vinsertf128 $1, 224(%rdi), %ymm3, %ymm13
581 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm13[0,2],ymm12[0,2],ymm13[4,6],ymm12[4,6]
582 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm5[2,3],mem[2,3]
583 ; AVX1-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm5, %ymm15
584 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm15[0,2],ymm14[0,2],ymm15[4,6],ymm14[4,6]
585 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm11[1,3],ymm7[1,3],ymm11[5,7],ymm7[5,7]
586 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm9[2,3],mem[2,3]
587 ; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm9, %ymm9
588 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm13[1,3],ymm12[1,3],ymm13[5,7],ymm12[5,7]
589 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm6[2,3],mem[2,3]
590 ; AVX1-ONLY-NEXT: vinsertf128 $1, 288(%rdi), %ymm6, %ymm6
591 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm15[1,3],ymm14[1,3],ymm15[5,7],ymm14[5,7]
592 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm4[2,3],mem[2,3]
593 ; AVX1-ONLY-NEXT: vinsertf128 $1, 416(%rdi), %ymm4, %ymm4
594 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm10[1,3],ymm8[1,3],ymm10[5,7],ymm8[5,7]
595 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm4[0,2],ymm15[0,2],ymm4[4,6],ymm15[4,6]
596 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[1,3],ymm15[1,3],ymm4[5,7],ymm15[5,7]
597 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm6[0,2],ymm13[0,2],ymm6[4,6],ymm13[4,6]
598 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[1,3],ymm13[1,3],ymm6[5,7],ymm13[5,7]
599 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm9[0,2],ymm11[0,2],ymm9[4,6],ymm11[4,6]
600 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm9[1,3],ymm11[1,3],ymm9[5,7],ymm11[5,7]
601 ; AVX1-ONLY-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm1[2,3],mem[2,3]
602 ; AVX1-ONLY-NEXT: vinsertf128 $1, 32(%rdi), %ymm1, %ymm1
603 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm11[0,2],ymm1[4,6],ymm11[4,6]
604 ; AVX1-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,3],ymm11[1,3],ymm1[5,7],ymm11[5,7]
605 ; AVX1-ONLY-NEXT: vmovaps %ymm10, 192(%rsi)
606 ; AVX1-ONLY-NEXT: vmovaps %ymm15, 128(%rsi)
607 ; AVX1-ONLY-NEXT: vmovaps %ymm13, 64(%rsi)
608 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rsi)
609 ; AVX1-ONLY-NEXT: vmovaps %ymm5, 160(%rsi)
610 ; AVX1-ONLY-NEXT: vmovaps %ymm3, 96(%rsi)
611 ; AVX1-ONLY-NEXT: vmovaps %ymm2, 32(%rsi)
612 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
613 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rsi)
614 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rdx)
615 ; AVX1-ONLY-NEXT: vmovaps %ymm9, 64(%rdx)
616 ; AVX1-ONLY-NEXT: vmovaps %ymm6, 128(%rdx)
617 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rdx)
618 ; AVX1-ONLY-NEXT: vmovaps %ymm8, 224(%rdx)
619 ; AVX1-ONLY-NEXT: vmovaps %ymm14, 160(%rdx)
620 ; AVX1-ONLY-NEXT: vmovaps %ymm12, 96(%rdx)
621 ; AVX1-ONLY-NEXT: vmovaps %ymm7, 32(%rdx)
622 ; AVX1-ONLY-NEXT: vzeroupper
623 ; AVX1-ONLY-NEXT: retq
625 ; AVX2-ONLY-LABEL: load_i32_stride2_vf64:
626 ; AVX2-ONLY: # %bb.0:
627 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm1
628 ; AVX2-ONLY-NEXT: vmovaps 64(%rdi), %ymm3
629 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm4
630 ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm10
631 ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm12
632 ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm8
633 ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm11
634 ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm7
635 ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm9
636 ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm6
637 ; AVX2-ONLY-NEXT: vmovaps 448(%rdi), %ymm14
638 ; AVX2-ONLY-NEXT: vmovaps 352(%rdi), %ymm5
639 ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm15
640 ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm2
641 ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm13
642 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm13[0,2],ymm2[0,2],ymm13[4,6],ymm2[4,6]
643 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
644 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm13[1,3],ymm2[1,3],ymm13[5,7],ymm2[5,7]
645 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm13 = ymm15[0,2],ymm5[0,2],ymm15[4,6],ymm5[4,6]
646 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm15[1,3],ymm5[1,3],ymm15[5,7],ymm5[5,7]
647 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm15 = ymm14[0,2],ymm6[0,2],ymm14[4,6],ymm6[4,6]
648 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm14[1,3],ymm6[1,3],ymm14[5,7],ymm6[5,7]
649 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm12[0,2],ymm10[0,2],ymm12[4,6],ymm10[4,6]
650 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm12[1,3],ymm10[1,3],ymm12[5,7],ymm10[5,7]
651 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm11[0,2],ymm8[0,2],ymm11[4,6],ymm8[4,6]
652 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm11[1,3],ymm8[1,3],ymm11[5,7],ymm8[5,7]
653 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm11 = ymm9[0,2],ymm7[0,2],ymm9[4,6],ymm7[4,6]
654 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm9[1,3],ymm7[1,3],ymm9[5,7],ymm7[5,7]
655 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm9 = ymm3[0,2],ymm4[0,2],ymm3[4,6],ymm4[4,6]
656 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,3],ymm4[1,3],ymm3[5,7],ymm4[5,7]
657 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm4
658 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm4[0,2],ymm1[4,6],ymm4[4,6]
659 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,3],ymm4[1,3],ymm1[5,7],ymm4[5,7]
660 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = ymm14[0,2,1,3]
661 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 192(%rsi)
662 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = ymm12[0,2,1,3]
663 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 128(%rsi)
664 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = ymm11[0,2,1,3]
665 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 64(%rsi)
666 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
667 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rsi)
668 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm15[0,2,1,3]
669 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 224(%rsi)
670 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm13[0,2,1,3]
671 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 160(%rsi)
672 ; AVX2-ONLY-NEXT: vpermpd $216, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
673 ; AVX2-ONLY-NEXT: # ymm0 = mem[0,2,1,3]
674 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%rsi)
675 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm9[0,2,1,3]
676 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rsi)
677 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm1[0,2,1,3]
678 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rdx)
679 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm7[0,2,1,3]
680 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 64(%rdx)
681 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm8[0,2,1,3]
682 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 128(%rdx)
683 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm10[0,2,1,3]
684 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 192(%rdx)
685 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm6[0,2,1,3]
686 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 224(%rdx)
687 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm5[0,2,1,3]
688 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 160(%rdx)
689 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,2,1,3]
690 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%rdx)
691 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[0,2,1,3]
692 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rdx)
693 ; AVX2-ONLY-NEXT: vzeroupper
694 ; AVX2-ONLY-NEXT: retq
696 ; AVX512-LABEL: load_i32_stride2_vf64:
698 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
699 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
700 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
701 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3
702 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm4
703 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm5
704 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm6
705 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm7
706 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
707 ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm9
708 ; AVX512-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
709 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm10
710 ; AVX512-NEXT: vpermt2d %zmm4, %zmm8, %zmm10
711 ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm11
712 ; AVX512-NEXT: vpermt2d %zmm3, %zmm8, %zmm11
713 ; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm8
714 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
715 ; AVX512-NEXT: vpermt2d %zmm4, %zmm12, %zmm5
716 ; AVX512-NEXT: vpermt2d %zmm6, %zmm12, %zmm7
717 ; AVX512-NEXT: vpermt2d %zmm3, %zmm12, %zmm2
718 ; AVX512-NEXT: vpermt2d %zmm1, %zmm12, %zmm0
719 ; AVX512-NEXT: vmovdqa64 %zmm10, 192(%rsi)
720 ; AVX512-NEXT: vmovdqa64 %zmm8, (%rsi)
721 ; AVX512-NEXT: vmovdqa64 %zmm11, 64(%rsi)
722 ; AVX512-NEXT: vmovdqa64 %zmm9, 128(%rsi)
723 ; AVX512-NEXT: vmovdqa64 %zmm7, 128(%rdx)
724 ; AVX512-NEXT: vmovdqa64 %zmm5, 192(%rdx)
725 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rdx)
726 ; AVX512-NEXT: vmovdqa64 %zmm2, 64(%rdx)
727 ; AVX512-NEXT: vzeroupper
729 %wide.vec = load <128 x i32>, ptr %in.vec, align 64
730 %strided.vec0 = shufflevector <128 x i32> %wide.vec, <128 x i32> poison, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
731 %strided.vec1 = shufflevector <128 x i32> %wide.vec, <128 x i32> poison, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
732 store <64 x i32> %strided.vec0, ptr %out.vec0, align 64
733 store <64 x i32> %strided.vec1, ptr %out.vec1, align 64
736 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
739 ; AVX2-FAST-PERLANE: {{.*}}
742 ; AVX512BW-FAST: {{.*}}
743 ; AVX512BW-ONLY: {{.*}}
744 ; AVX512BW-ONLY-FAST: {{.*}}
745 ; AVX512BW-ONLY-SLOW: {{.*}}
746 ; AVX512BW-SLOW: {{.*}}
747 ; AVX512DQ-FAST: {{.*}}
748 ; AVX512DQ-ONLY: {{.*}}
749 ; AVX512DQ-SLOW: {{.*}}
750 ; AVX512DQBW-FAST: {{.*}}
751 ; AVX512DQBW-ONLY: {{.*}}
752 ; AVX512DQBW-SLOW: {{.*}}
754 ; AVX512F-FAST: {{.*}}
755 ; AVX512F-ONLY: {{.*}}
756 ; AVX512F-ONLY-FAST: {{.*}}
757 ; AVX512F-ONLY-SLOW: {{.*}}
758 ; AVX512F-SLOW: {{.*}}