1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FP
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FCP
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512-FCP
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX512DQ
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-FCP
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW-FCP
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512DQ-BW
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-BW-FCP
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i64_stride3_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
19 ; SSE-LABEL: load_i64_stride3_vf2:
21 ; SSE-NEXT: movapd (%rdi), %xmm0
22 ; SSE-NEXT: movapd 16(%rdi), %xmm1
23 ; SSE-NEXT: movapd 32(%rdi), %xmm2
24 ; SSE-NEXT: movapd %xmm1, %xmm3
25 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
26 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm2[0]
27 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
28 ; SSE-NEXT: movapd %xmm3, (%rsi)
29 ; SSE-NEXT: movapd %xmm0, (%rdx)
30 ; SSE-NEXT: movapd %xmm2, (%rcx)
33 ; AVX-LABEL: load_i64_stride3_vf2:
35 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
36 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
37 ; AVX-NEXT: vmovdqa 32(%rdi), %xmm2
38 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm1[4,5,6,7]
39 ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
40 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7]
41 ; AVX-NEXT: vmovdqa %xmm3, (%rsi)
42 ; AVX-NEXT: vmovdqa %xmm0, (%rdx)
43 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
46 ; AVX2-LABEL: load_i64_stride3_vf2:
48 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
49 ; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1
50 ; AVX2-NEXT: vmovdqa 32(%rdi), %xmm2
51 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3]
52 ; AVX2-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
53 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
54 ; AVX2-NEXT: vmovdqa %xmm3, (%rsi)
55 ; AVX2-NEXT: vmovdqa %xmm0, (%rdx)
56 ; AVX2-NEXT: vmovdqa %xmm1, (%rcx)
59 ; AVX2-FP-LABEL: load_i64_stride3_vf2:
61 ; AVX2-FP-NEXT: vmovdqa (%rdi), %xmm0
62 ; AVX2-FP-NEXT: vmovdqa 16(%rdi), %xmm1
63 ; AVX2-FP-NEXT: vmovdqa 32(%rdi), %xmm2
64 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3]
65 ; AVX2-FP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
66 ; AVX2-FP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
67 ; AVX2-FP-NEXT: vmovdqa %xmm3, (%rsi)
68 ; AVX2-FP-NEXT: vmovdqa %xmm0, (%rdx)
69 ; AVX2-FP-NEXT: vmovdqa %xmm1, (%rcx)
72 ; AVX2-FCP-LABEL: load_i64_stride3_vf2:
74 ; AVX2-FCP-NEXT: vmovdqa (%rdi), %xmm0
75 ; AVX2-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
76 ; AVX2-FCP-NEXT: vmovdqa 32(%rdi), %xmm2
77 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3]
78 ; AVX2-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
79 ; AVX2-FCP-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
80 ; AVX2-FCP-NEXT: vmovdqa %xmm3, (%rsi)
81 ; AVX2-FCP-NEXT: vmovdqa %xmm0, (%rdx)
82 ; AVX2-FCP-NEXT: vmovdqa %xmm1, (%rcx)
85 ; AVX512-LABEL: load_i64_stride3_vf2:
87 ; AVX512-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,3,2,3,4,7,6,7]
88 ; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1
89 ; AVX512-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
90 ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
91 ; AVX512-NEXT: vmovaps %xmm0, (%rsi)
92 ; AVX512-NEXT: vmovdqa %xmm2, (%rdx)
93 ; AVX512-NEXT: vmovdqa %xmm1, (%rcx)
94 ; AVX512-NEXT: vzeroupper
97 ; AVX512-FCP-LABEL: load_i64_stride3_vf2:
98 ; AVX512-FCP: # %bb.0:
99 ; AVX512-FCP-NEXT: vmovaps {{.*#+}} xmm0 = [1,4]
100 ; AVX512-FCP-NEXT: vmovaps (%rdi), %zmm1
101 ; AVX512-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
102 ; AVX512-FCP-NEXT: vpermpd {{.*#+}} zmm1 = zmm1[0,3,2,3,4,7,6,7]
103 ; AVX512-FCP-NEXT: vmovaps 16(%rdi), %xmm2
104 ; AVX512-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
105 ; AVX512-FCP-NEXT: vmovaps %xmm1, (%rsi)
106 ; AVX512-FCP-NEXT: vmovaps %xmm0, (%rdx)
107 ; AVX512-FCP-NEXT: vmovaps %xmm2, (%rcx)
108 ; AVX512-FCP-NEXT: vzeroupper
109 ; AVX512-FCP-NEXT: retq
111 ; AVX512DQ-LABEL: load_i64_stride3_vf2:
113 ; AVX512DQ-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,3,2,3,4,7,6,7]
114 ; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm1
115 ; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
116 ; AVX512DQ-NEXT: vpblendd {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
117 ; AVX512DQ-NEXT: vmovaps %xmm0, (%rsi)
118 ; AVX512DQ-NEXT: vmovdqa %xmm2, (%rdx)
119 ; AVX512DQ-NEXT: vmovdqa %xmm1, (%rcx)
120 ; AVX512DQ-NEXT: vzeroupper
121 ; AVX512DQ-NEXT: retq
123 ; AVX512DQ-FCP-LABEL: load_i64_stride3_vf2:
124 ; AVX512DQ-FCP: # %bb.0:
125 ; AVX512DQ-FCP-NEXT: vmovaps {{.*#+}} xmm0 = [1,4]
126 ; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %zmm1
127 ; AVX512DQ-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
128 ; AVX512DQ-FCP-NEXT: vpermpd {{.*#+}} zmm1 = zmm1[0,3,2,3,4,7,6,7]
129 ; AVX512DQ-FCP-NEXT: vmovaps 16(%rdi), %xmm2
130 ; AVX512DQ-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
131 ; AVX512DQ-FCP-NEXT: vmovaps %xmm1, (%rsi)
132 ; AVX512DQ-FCP-NEXT: vmovaps %xmm0, (%rdx)
133 ; AVX512DQ-FCP-NEXT: vmovaps %xmm2, (%rcx)
134 ; AVX512DQ-FCP-NEXT: vzeroupper
135 ; AVX512DQ-FCP-NEXT: retq
137 ; AVX512BW-LABEL: load_i64_stride3_vf2:
139 ; AVX512BW-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,3,2,3,4,7,6,7]
140 ; AVX512BW-NEXT: vmovdqa 32(%rdi), %xmm1
141 ; AVX512BW-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
142 ; AVX512BW-NEXT: vpblendd {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
143 ; AVX512BW-NEXT: vmovaps %xmm0, (%rsi)
144 ; AVX512BW-NEXT: vmovdqa %xmm2, (%rdx)
145 ; AVX512BW-NEXT: vmovdqa %xmm1, (%rcx)
146 ; AVX512BW-NEXT: vzeroupper
147 ; AVX512BW-NEXT: retq
149 ; AVX512BW-FCP-LABEL: load_i64_stride3_vf2:
150 ; AVX512BW-FCP: # %bb.0:
151 ; AVX512BW-FCP-NEXT: vmovaps {{.*#+}} xmm0 = [1,4]
152 ; AVX512BW-FCP-NEXT: vmovaps (%rdi), %zmm1
153 ; AVX512BW-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
154 ; AVX512BW-FCP-NEXT: vpermpd {{.*#+}} zmm1 = zmm1[0,3,2,3,4,7,6,7]
155 ; AVX512BW-FCP-NEXT: vmovaps 16(%rdi), %xmm2
156 ; AVX512BW-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
157 ; AVX512BW-FCP-NEXT: vmovaps %xmm1, (%rsi)
158 ; AVX512BW-FCP-NEXT: vmovaps %xmm0, (%rdx)
159 ; AVX512BW-FCP-NEXT: vmovaps %xmm2, (%rcx)
160 ; AVX512BW-FCP-NEXT: vzeroupper
161 ; AVX512BW-FCP-NEXT: retq
163 ; AVX512DQ-BW-LABEL: load_i64_stride3_vf2:
164 ; AVX512DQ-BW: # %bb.0:
165 ; AVX512DQ-BW-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,3,2,3,4,7,6,7]
166 ; AVX512DQ-BW-NEXT: vmovdqa 32(%rdi), %xmm1
167 ; AVX512DQ-BW-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
168 ; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
169 ; AVX512DQ-BW-NEXT: vmovaps %xmm0, (%rsi)
170 ; AVX512DQ-BW-NEXT: vmovdqa %xmm2, (%rdx)
171 ; AVX512DQ-BW-NEXT: vmovdqa %xmm1, (%rcx)
172 ; AVX512DQ-BW-NEXT: vzeroupper
173 ; AVX512DQ-BW-NEXT: retq
175 ; AVX512DQ-BW-FCP-LABEL: load_i64_stride3_vf2:
176 ; AVX512DQ-BW-FCP: # %bb.0:
177 ; AVX512DQ-BW-FCP-NEXT: vmovaps {{.*#+}} xmm0 = [1,4]
178 ; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %zmm1
179 ; AVX512DQ-BW-FCP-NEXT: vpermpd %zmm1, %zmm0, %zmm0
180 ; AVX512DQ-BW-FCP-NEXT: vpermpd {{.*#+}} zmm1 = zmm1[0,3,2,3,4,7,6,7]
181 ; AVX512DQ-BW-FCP-NEXT: vmovaps 16(%rdi), %xmm2
182 ; AVX512DQ-BW-FCP-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
183 ; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm1, (%rsi)
184 ; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm0, (%rdx)
185 ; AVX512DQ-BW-FCP-NEXT: vmovaps %xmm2, (%rcx)
186 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
187 ; AVX512DQ-BW-FCP-NEXT: retq
188 %wide.vec = load <6 x i64>, ptr %in.vec, align 64
189 %strided.vec0 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 0, i32 3>
190 %strided.vec1 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 1, i32 4>
191 %strided.vec2 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 2, i32 5>
192 store <2 x i64> %strided.vec0, ptr %out.vec0, align 64
193 store <2 x i64> %strided.vec1, ptr %out.vec1, align 64
194 store <2 x i64> %strided.vec2, ptr %out.vec2, align 64
198 define void @load_i64_stride3_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
199 ; SSE-LABEL: load_i64_stride3_vf4:
201 ; SSE-NEXT: movapd 80(%rdi), %xmm0
202 ; SSE-NEXT: movapd (%rdi), %xmm1
203 ; SSE-NEXT: movapd 16(%rdi), %xmm2
204 ; SSE-NEXT: movapd 32(%rdi), %xmm3
205 ; SSE-NEXT: movapd 48(%rdi), %xmm4
206 ; SSE-NEXT: movapd 64(%rdi), %xmm5
207 ; SSE-NEXT: movapd %xmm5, %xmm6
208 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm4[0],xmm6[1]
209 ; SSE-NEXT: movapd %xmm2, %xmm7
210 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
211 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm0[0]
212 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm3[0]
213 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
214 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
215 ; SSE-NEXT: movapd %xmm6, 16(%rsi)
216 ; SSE-NEXT: movapd %xmm7, (%rsi)
217 ; SSE-NEXT: movapd %xmm4, 16(%rdx)
218 ; SSE-NEXT: movapd %xmm1, (%rdx)
219 ; SSE-NEXT: movapd %xmm0, 16(%rcx)
220 ; SSE-NEXT: movapd %xmm3, (%rcx)
223 ; AVX-LABEL: load_i64_stride3_vf4:
225 ; AVX-NEXT: vmovapd 32(%rdi), %ymm0
226 ; AVX-NEXT: vmovaps 16(%rdi), %xmm1
227 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = mem[0,1],ymm0[2,3]
228 ; AVX-NEXT: vinsertf128 $1, 64(%rdi), %ymm1, %ymm1
229 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2],ymm1[3]
230 ; AVX-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1],ymm0[0],ymm2[3],ymm0[3]
231 ; AVX-NEXT: vbroadcastsd 80(%rdi), %ymm4
232 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3]
233 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
234 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
235 ; AVX-NEXT: vmovapd %ymm3, (%rsi)
236 ; AVX-NEXT: vmovapd %ymm2, (%rdx)
237 ; AVX-NEXT: vmovapd %ymm0, (%rcx)
238 ; AVX-NEXT: vzeroupper
241 ; AVX2-LABEL: load_i64_stride3_vf4:
243 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm0
244 ; AVX2-NEXT: vmovaps (%rdi), %ymm1
245 ; AVX2-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
246 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[0,3,2,3]
247 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5,6,7]
248 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
249 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
250 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
251 ; AVX2-NEXT: vbroadcastsd 80(%rdi), %ymm1
252 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
253 ; AVX2-NEXT: vmovaps 16(%rdi), %xmm1
254 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
255 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
256 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
257 ; AVX2-NEXT: vmovaps %ymm2, (%rsi)
258 ; AVX2-NEXT: vmovaps %ymm0, (%rdx)
259 ; AVX2-NEXT: vmovaps %ymm1, (%rcx)
260 ; AVX2-NEXT: vzeroupper
263 ; AVX2-FP-LABEL: load_i64_stride3_vf4:
265 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm0
266 ; AVX2-FP-NEXT: vmovaps (%rdi), %ymm1
267 ; AVX2-FP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
268 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[0,3,2,3]
269 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5,6,7]
270 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
271 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
272 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
273 ; AVX2-FP-NEXT: vbroadcastsd 80(%rdi), %ymm1
274 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
275 ; AVX2-FP-NEXT: vmovaps 16(%rdi), %xmm1
276 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
277 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
278 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
279 ; AVX2-FP-NEXT: vmovaps %ymm2, (%rsi)
280 ; AVX2-FP-NEXT: vmovaps %ymm0, (%rdx)
281 ; AVX2-FP-NEXT: vmovaps %ymm1, (%rcx)
282 ; AVX2-FP-NEXT: vzeroupper
285 ; AVX2-FCP-LABEL: load_i64_stride3_vf4:
287 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm0
288 ; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm1
289 ; AVX2-FCP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
290 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[0,3,2,3]
291 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5,6,7]
292 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
293 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
294 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
295 ; AVX2-FCP-NEXT: vbroadcastsd 80(%rdi), %ymm1
296 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
297 ; AVX2-FCP-NEXT: vmovaps 16(%rdi), %xmm1
298 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
299 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
300 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
301 ; AVX2-FCP-NEXT: vmovaps %ymm2, (%rsi)
302 ; AVX2-FCP-NEXT: vmovaps %ymm0, (%rdx)
303 ; AVX2-FCP-NEXT: vmovaps %ymm1, (%rcx)
304 ; AVX2-FCP-NEXT: vzeroupper
305 ; AVX2-FCP-NEXT: retq
307 ; AVX512-LABEL: load_i64_stride3_vf4:
309 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
310 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
311 ; AVX512-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
312 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
313 ; AVX512-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
314 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
315 ; AVX512-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
316 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
317 ; AVX512-NEXT: vmovdqa %ymm2, (%rsi)
318 ; AVX512-NEXT: vmovdqa %ymm3, (%rdx)
319 ; AVX512-NEXT: vmovdqa %ymm4, (%rcx)
320 ; AVX512-NEXT: vzeroupper
323 ; AVX512-FCP-LABEL: load_i64_stride3_vf4:
324 ; AVX512-FCP: # %bb.0:
325 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
326 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
327 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
328 ; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
329 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
330 ; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
331 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
332 ; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
333 ; AVX512-FCP-NEXT: vmovdqa %ymm2, (%rsi)
334 ; AVX512-FCP-NEXT: vmovdqa %ymm3, (%rdx)
335 ; AVX512-FCP-NEXT: vmovdqa %ymm4, (%rcx)
336 ; AVX512-FCP-NEXT: vzeroupper
337 ; AVX512-FCP-NEXT: retq
339 ; AVX512DQ-LABEL: load_i64_stride3_vf4:
341 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
342 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm1
343 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
344 ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
345 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
346 ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
347 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
348 ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
349 ; AVX512DQ-NEXT: vmovdqa %ymm2, (%rsi)
350 ; AVX512DQ-NEXT: vmovdqa %ymm3, (%rdx)
351 ; AVX512DQ-NEXT: vmovdqa %ymm4, (%rcx)
352 ; AVX512DQ-NEXT: vzeroupper
353 ; AVX512DQ-NEXT: retq
355 ; AVX512DQ-FCP-LABEL: load_i64_stride3_vf4:
356 ; AVX512DQ-FCP: # %bb.0:
357 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
358 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
359 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
360 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
361 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
362 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
363 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
364 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
365 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, (%rsi)
366 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm3, (%rdx)
367 ; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, (%rcx)
368 ; AVX512DQ-FCP-NEXT: vzeroupper
369 ; AVX512DQ-FCP-NEXT: retq
371 ; AVX512BW-LABEL: load_i64_stride3_vf4:
373 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
374 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
375 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
376 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
377 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
378 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
379 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
380 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
381 ; AVX512BW-NEXT: vmovdqa %ymm2, (%rsi)
382 ; AVX512BW-NEXT: vmovdqa %ymm3, (%rdx)
383 ; AVX512BW-NEXT: vmovdqa %ymm4, (%rcx)
384 ; AVX512BW-NEXT: vzeroupper
385 ; AVX512BW-NEXT: retq
387 ; AVX512BW-FCP-LABEL: load_i64_stride3_vf4:
388 ; AVX512BW-FCP: # %bb.0:
389 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
390 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
391 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
392 ; AVX512BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
393 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
394 ; AVX512BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
395 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
396 ; AVX512BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
397 ; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rsi)
398 ; AVX512BW-FCP-NEXT: vmovdqa %ymm3, (%rdx)
399 ; AVX512BW-FCP-NEXT: vmovdqa %ymm4, (%rcx)
400 ; AVX512BW-FCP-NEXT: vzeroupper
401 ; AVX512BW-FCP-NEXT: retq
403 ; AVX512DQ-BW-LABEL: load_i64_stride3_vf4:
404 ; AVX512DQ-BW: # %bb.0:
405 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
406 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
407 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
408 ; AVX512DQ-BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
409 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
410 ; AVX512DQ-BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
411 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
412 ; AVX512DQ-BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
413 ; AVX512DQ-BW-NEXT: vmovdqa %ymm2, (%rsi)
414 ; AVX512DQ-BW-NEXT: vmovdqa %ymm3, (%rdx)
415 ; AVX512DQ-BW-NEXT: vmovdqa %ymm4, (%rcx)
416 ; AVX512DQ-BW-NEXT: vzeroupper
417 ; AVX512DQ-BW-NEXT: retq
419 ; AVX512DQ-BW-FCP-LABEL: load_i64_stride3_vf4:
420 ; AVX512DQ-BW-FCP: # %bb.0:
421 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
422 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
423 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,3,6,9]
424 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
425 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm3 = [1,4,7,10]
426 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
427 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm4 = [2,5,8,11]
428 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
429 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rsi)
430 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm3, (%rdx)
431 ; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm4, (%rcx)
432 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
433 ; AVX512DQ-BW-FCP-NEXT: retq
434 %wide.vec = load <12 x i64>, ptr %in.vec, align 64
435 %strided.vec0 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
436 %strided.vec1 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
437 %strided.vec2 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
438 store <4 x i64> %strided.vec0, ptr %out.vec0, align 64
439 store <4 x i64> %strided.vec1, ptr %out.vec1, align 64
440 store <4 x i64> %strided.vec2, ptr %out.vec2, align 64
444 define void @load_i64_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
445 ; SSE-LABEL: load_i64_stride3_vf8:
447 ; SSE-NEXT: movapd 128(%rdi), %xmm2
448 ; SSE-NEXT: movapd 176(%rdi), %xmm1
449 ; SSE-NEXT: movapd 80(%rdi), %xmm0
450 ; SSE-NEXT: movapd 96(%rdi), %xmm3
451 ; SSE-NEXT: movapd 112(%rdi), %xmm8
452 ; SSE-NEXT: movapd 144(%rdi), %xmm5
453 ; SSE-NEXT: movapd 160(%rdi), %xmm9
454 ; SSE-NEXT: movapd (%rdi), %xmm6
455 ; SSE-NEXT: movapd 16(%rdi), %xmm10
456 ; SSE-NEXT: movapd 32(%rdi), %xmm4
457 ; SSE-NEXT: movapd 48(%rdi), %xmm7
458 ; SSE-NEXT: movapd 64(%rdi), %xmm11
459 ; SSE-NEXT: movapd %xmm11, %xmm12
460 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm7[0],xmm12[1]
461 ; SSE-NEXT: movapd %xmm9, %xmm13
462 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm5[0],xmm13[1]
463 ; SSE-NEXT: movapd %xmm8, %xmm14
464 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm3[0],xmm14[1]
465 ; SSE-NEXT: movapd %xmm10, %xmm15
466 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm6[0],xmm15[1]
467 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm0[0]
468 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm1[0]
469 ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm2[0]
470 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm4[0]
471 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm11[0],xmm0[1]
472 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm9[0],xmm1[1]
473 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm8[0],xmm2[1]
474 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm10[0],xmm4[1]
475 ; SSE-NEXT: movapd %xmm14, 32(%rsi)
476 ; SSE-NEXT: movapd %xmm13, 48(%rsi)
477 ; SSE-NEXT: movapd %xmm15, (%rsi)
478 ; SSE-NEXT: movapd %xmm12, 16(%rsi)
479 ; SSE-NEXT: movapd %xmm3, 32(%rdx)
480 ; SSE-NEXT: movapd %xmm5, 48(%rdx)
481 ; SSE-NEXT: movapd %xmm6, (%rdx)
482 ; SSE-NEXT: movapd %xmm7, 16(%rdx)
483 ; SSE-NEXT: movapd %xmm2, 32(%rcx)
484 ; SSE-NEXT: movapd %xmm1, 48(%rcx)
485 ; SSE-NEXT: movapd %xmm4, (%rcx)
486 ; SSE-NEXT: movapd %xmm0, 16(%rcx)
489 ; AVX-LABEL: load_i64_stride3_vf8:
491 ; AVX-NEXT: vmovapd 128(%rdi), %ymm0
492 ; AVX-NEXT: vmovapd 32(%rdi), %ymm1
493 ; AVX-NEXT: vmovaps 16(%rdi), %xmm2
494 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm1[2,3]
495 ; AVX-NEXT: vinsertf128 $1, 64(%rdi), %ymm2, %ymm2
496 ; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
497 ; AVX-NEXT: vmovaps 112(%rdi), %xmm5
498 ; AVX-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm0[2,3]
499 ; AVX-NEXT: vinsertf128 $1, 160(%rdi), %ymm5, %ymm5
500 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2],ymm5[3]
501 ; AVX-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[1],ymm1[0],ymm3[3],ymm1[3]
502 ; AVX-NEXT: vbroadcastsd 80(%rdi), %ymm8
503 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3]
504 ; AVX-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[1],ymm0[0],ymm6[3],ymm0[3]
505 ; AVX-NEXT: vbroadcastsd 176(%rdi), %ymm8
506 ; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3]
507 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
508 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3]
509 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
510 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2],ymm0[3]
511 ; AVX-NEXT: vmovapd %ymm7, 32(%rsi)
512 ; AVX-NEXT: vmovapd %ymm4, (%rsi)
513 ; AVX-NEXT: vmovapd %ymm6, 32(%rdx)
514 ; AVX-NEXT: vmovapd %ymm3, (%rdx)
515 ; AVX-NEXT: vmovapd %ymm0, 32(%rcx)
516 ; AVX-NEXT: vmovapd %ymm1, (%rcx)
517 ; AVX-NEXT: vzeroupper
520 ; AVX2-LABEL: load_i64_stride3_vf8:
522 ; AVX2-NEXT: vmovaps 128(%rdi), %ymm0
523 ; AVX2-NEXT: vmovaps 96(%rdi), %ymm1
524 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm2
525 ; AVX2-NEXT: vmovaps (%rdi), %ymm3
526 ; AVX2-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
527 ; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm3[0,3,2,3]
528 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm2[4,5,6,7]
529 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
530 ; AVX2-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm5
531 ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm1[0,3,2,3]
532 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
533 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
534 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
535 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
536 ; AVX2-NEXT: vbroadcastsd 80(%rdi), %ymm3
537 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
538 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
539 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
540 ; AVX2-NEXT: vbroadcastsd 176(%rdi), %ymm1
541 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
542 ; AVX2-NEXT: vmovaps 16(%rdi), %xmm1
543 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
544 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
545 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
546 ; AVX2-NEXT: vmovaps 112(%rdi), %xmm3
547 ; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2,3]
548 ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
549 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
550 ; AVX2-NEXT: vmovaps %ymm5, 32(%rsi)
551 ; AVX2-NEXT: vmovaps %ymm4, (%rsi)
552 ; AVX2-NEXT: vmovaps %ymm0, 32(%rdx)
553 ; AVX2-NEXT: vmovaps %ymm2, (%rdx)
554 ; AVX2-NEXT: vmovaps %ymm3, 32(%rcx)
555 ; AVX2-NEXT: vmovaps %ymm1, (%rcx)
556 ; AVX2-NEXT: vzeroupper
559 ; AVX2-FP-LABEL: load_i64_stride3_vf8:
561 ; AVX2-FP-NEXT: vmovaps 128(%rdi), %ymm0
562 ; AVX2-FP-NEXT: vmovaps 96(%rdi), %ymm1
563 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm2
564 ; AVX2-FP-NEXT: vmovaps (%rdi), %ymm3
565 ; AVX2-FP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
566 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm3[0,3,2,3]
567 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm2[4,5,6,7]
568 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
569 ; AVX2-FP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm5
570 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm1[0,3,2,3]
571 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
572 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
573 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
574 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
575 ; AVX2-FP-NEXT: vbroadcastsd 80(%rdi), %ymm3
576 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
577 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
578 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
579 ; AVX2-FP-NEXT: vbroadcastsd 176(%rdi), %ymm1
580 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
581 ; AVX2-FP-NEXT: vmovaps 16(%rdi), %xmm1
582 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
583 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
584 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
585 ; AVX2-FP-NEXT: vmovaps 112(%rdi), %xmm3
586 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2,3]
587 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
588 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
589 ; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rsi)
590 ; AVX2-FP-NEXT: vmovaps %ymm4, (%rsi)
591 ; AVX2-FP-NEXT: vmovaps %ymm0, 32(%rdx)
592 ; AVX2-FP-NEXT: vmovaps %ymm2, (%rdx)
593 ; AVX2-FP-NEXT: vmovaps %ymm3, 32(%rcx)
594 ; AVX2-FP-NEXT: vmovaps %ymm1, (%rcx)
595 ; AVX2-FP-NEXT: vzeroupper
598 ; AVX2-FCP-LABEL: load_i64_stride3_vf8:
600 ; AVX2-FCP-NEXT: vmovaps 128(%rdi), %ymm0
601 ; AVX2-FCP-NEXT: vmovaps 96(%rdi), %ymm1
602 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm2
603 ; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm3
604 ; AVX2-FCP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
605 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm3[0,3,2,3]
606 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm2[4,5,6,7]
607 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
608 ; AVX2-FCP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm5
609 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm1[0,3,2,3]
610 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
611 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
612 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
613 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
614 ; AVX2-FCP-NEXT: vbroadcastsd 80(%rdi), %ymm3
615 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
616 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
617 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
618 ; AVX2-FCP-NEXT: vbroadcastsd 176(%rdi), %ymm1
619 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
620 ; AVX2-FCP-NEXT: vmovaps 16(%rdi), %xmm1
621 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
622 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
623 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
624 ; AVX2-FCP-NEXT: vmovaps 112(%rdi), %xmm3
625 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2,3]
626 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
627 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
628 ; AVX2-FCP-NEXT: vmovaps %ymm5, 32(%rsi)
629 ; AVX2-FCP-NEXT: vmovaps %ymm4, (%rsi)
630 ; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%rdx)
631 ; AVX2-FCP-NEXT: vmovaps %ymm2, (%rdx)
632 ; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rcx)
633 ; AVX2-FCP-NEXT: vmovaps %ymm1, (%rcx)
634 ; AVX2-FCP-NEXT: vzeroupper
635 ; AVX2-FCP-NEXT: retq
637 ; AVX512-LABEL: load_i64_stride3_vf8:
639 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
640 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
641 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
642 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
643 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
644 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
645 ; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
646 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
647 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
648 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
649 ; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
650 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
651 ; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
652 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
653 ; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
654 ; AVX512-NEXT: vmovdqa64 %zmm4, (%rsi)
655 ; AVX512-NEXT: vmovdqa64 %zmm5, (%rdx)
656 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rcx)
657 ; AVX512-NEXT: vzeroupper
660 ; AVX512-FCP-LABEL: load_i64_stride3_vf8:
661 ; AVX512-FCP: # %bb.0:
662 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
663 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
664 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
665 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
666 ; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
667 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
668 ; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
669 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
670 ; AVX512-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
671 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
672 ; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
673 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
674 ; AVX512-FCP-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
675 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
676 ; AVX512-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
677 ; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%rsi)
678 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
679 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rcx)
680 ; AVX512-FCP-NEXT: vzeroupper
681 ; AVX512-FCP-NEXT: retq
683 ; AVX512DQ-LABEL: load_i64_stride3_vf8:
685 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
686 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm1
687 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm2
688 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
689 ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
690 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
691 ; AVX512DQ-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
692 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
693 ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
694 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
695 ; AVX512DQ-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
696 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
697 ; AVX512DQ-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
698 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
699 ; AVX512DQ-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
700 ; AVX512DQ-NEXT: vmovdqa64 %zmm4, (%rsi)
701 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, (%rdx)
702 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%rcx)
703 ; AVX512DQ-NEXT: vzeroupper
704 ; AVX512DQ-NEXT: retq
706 ; AVX512DQ-FCP-LABEL: load_i64_stride3_vf8:
707 ; AVX512DQ-FCP: # %bb.0:
708 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
709 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
710 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
711 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
712 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
713 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
714 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
715 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
716 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
717 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
718 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
719 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
720 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
721 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
722 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
723 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%rsi)
724 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
725 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%rcx)
726 ; AVX512DQ-FCP-NEXT: vzeroupper
727 ; AVX512DQ-FCP-NEXT: retq
729 ; AVX512BW-LABEL: load_i64_stride3_vf8:
731 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
732 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
733 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2
734 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
735 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
736 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
737 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
738 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
739 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
740 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
741 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
742 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
743 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
744 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
745 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
746 ; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rsi)
747 ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rdx)
748 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
749 ; AVX512BW-NEXT: vzeroupper
750 ; AVX512BW-NEXT: retq
752 ; AVX512BW-FCP-LABEL: load_i64_stride3_vf8:
753 ; AVX512BW-FCP: # %bb.0:
754 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
755 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
756 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
757 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
758 ; AVX512BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
759 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
760 ; AVX512BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
761 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
762 ; AVX512BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
763 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
764 ; AVX512BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
765 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
766 ; AVX512BW-FCP-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
767 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
768 ; AVX512BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
769 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, (%rsi)
770 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
771 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, (%rcx)
772 ; AVX512BW-FCP-NEXT: vzeroupper
773 ; AVX512BW-FCP-NEXT: retq
775 ; AVX512DQ-BW-LABEL: load_i64_stride3_vf8:
776 ; AVX512DQ-BW: # %bb.0:
777 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
778 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
779 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm2
780 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
781 ; AVX512DQ-BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
782 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
783 ; AVX512DQ-BW-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
784 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
785 ; AVX512DQ-BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
786 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
787 ; AVX512DQ-BW-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
788 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
789 ; AVX512DQ-BW-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
790 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
791 ; AVX512DQ-BW-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
792 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, (%rsi)
793 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%rdx)
794 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, (%rcx)
795 ; AVX512DQ-BW-NEXT: vzeroupper
796 ; AVX512DQ-BW-NEXT: retq
798 ; AVX512DQ-BW-FCP-LABEL: load_i64_stride3_vf8:
799 ; AVX512DQ-BW-FCP: # %bb.0:
800 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
801 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
802 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
803 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,3,6,9,12,15,0,0]
804 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
805 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
806 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
807 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [1,4,7,10,13,0,0,0]
808 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
809 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
810 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
811 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm3 = [10,13,0,3,6,0,0,0]
812 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
813 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
814 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
815 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, (%rsi)
816 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, (%rdx)
817 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, (%rcx)
818 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
819 ; AVX512DQ-BW-FCP-NEXT: retq
820 %wide.vec = load <24 x i64>, ptr %in.vec, align 64
821 %strided.vec0 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
822 %strided.vec1 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
823 %strided.vec2 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
824 store <8 x i64> %strided.vec0, ptr %out.vec0, align 64
825 store <8 x i64> %strided.vec1, ptr %out.vec1, align 64
826 store <8 x i64> %strided.vec2, ptr %out.vec2, align 64
830 define void @load_i64_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
831 ; SSE-LABEL: load_i64_stride3_vf16:
833 ; SSE-NEXT: subq $24, %rsp
834 ; SSE-NEXT: movapd 128(%rdi), %xmm0
835 ; SSE-NEXT: movapd 176(%rdi), %xmm1
836 ; SSE-NEXT: movapd 224(%rdi), %xmm4
837 ; SSE-NEXT: movapd 272(%rdi), %xmm3
838 ; SSE-NEXT: movapd 80(%rdi), %xmm2
839 ; SSE-NEXT: movapd 96(%rdi), %xmm5
840 ; SSE-NEXT: movapd 112(%rdi), %xmm12
841 ; SSE-NEXT: movapd 144(%rdi), %xmm6
842 ; SSE-NEXT: movapd 160(%rdi), %xmm14
843 ; SSE-NEXT: movapd 192(%rdi), %xmm7
844 ; SSE-NEXT: movapd 208(%rdi), %xmm11
845 ; SSE-NEXT: movapd 240(%rdi), %xmm10
846 ; SSE-NEXT: movapd 256(%rdi), %xmm13
847 ; SSE-NEXT: movapd 48(%rdi), %xmm9
848 ; SSE-NEXT: movapd 64(%rdi), %xmm15
849 ; SSE-NEXT: movapd %xmm15, %xmm8
850 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm9[0],xmm8[1]
851 ; SSE-NEXT: movapd %xmm8, (%rsp) # 16-byte Spill
852 ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm2[0]
853 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm15[0],xmm2[1]
854 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
855 ; SSE-NEXT: movapd %xmm14, %xmm15
856 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm6[0],xmm15[1]
857 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm1[0]
858 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
859 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm14[0],xmm1[1]
860 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
861 ; SSE-NEXT: movapd %xmm12, %xmm14
862 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm5[0],xmm14[1]
863 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm0[0]
864 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
865 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm12[0],xmm0[1]
866 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
867 ; SSE-NEXT: movapd %xmm13, %xmm12
868 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm10[0],xmm12[1]
869 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm3[0]
870 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm13[0],xmm3[1]
871 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
872 ; SSE-NEXT: movapd %xmm11, %xmm8
873 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm7[0],xmm8[1]
874 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm4[0]
875 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
876 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm11[0],xmm4[1]
877 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
878 ; SSE-NEXT: movapd 336(%rdi), %xmm13
879 ; SSE-NEXT: movapd 352(%rdi), %xmm0
880 ; SSE-NEXT: movapd %xmm0, %xmm7
881 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm13[0],xmm7[1]
882 ; SSE-NEXT: movapd 368(%rdi), %xmm11
883 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm11[0]
884 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm0[0],xmm11[1]
885 ; SSE-NEXT: movapd 288(%rdi), %xmm0
886 ; SSE-NEXT: movapd 304(%rdi), %xmm2
887 ; SSE-NEXT: movapd %xmm2, %xmm1
888 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
889 ; SSE-NEXT: movapd 320(%rdi), %xmm6
890 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm6[0]
891 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
892 ; SSE-NEXT: movapd (%rdi), %xmm2
893 ; SSE-NEXT: movapd 16(%rdi), %xmm5
894 ; SSE-NEXT: movapd %xmm5, %xmm3
895 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
896 ; SSE-NEXT: movapd 32(%rdi), %xmm4
897 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm4[0]
898 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1]
899 ; SSE-NEXT: movapd %xmm1, 96(%rsi)
900 ; SSE-NEXT: movapd %xmm14, 32(%rsi)
901 ; SSE-NEXT: movapd %xmm7, 112(%rsi)
902 ; SSE-NEXT: movapd %xmm15, 48(%rsi)
903 ; SSE-NEXT: movapd %xmm8, 64(%rsi)
904 ; SSE-NEXT: movapd %xmm3, (%rsi)
905 ; SSE-NEXT: movapd %xmm12, 80(%rsi)
906 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
907 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
908 ; SSE-NEXT: movapd %xmm0, 96(%rdx)
909 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
910 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
911 ; SSE-NEXT: movapd %xmm13, 112(%rdx)
912 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
913 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
914 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
915 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
916 ; SSE-NEXT: movapd %xmm2, (%rdx)
917 ; SSE-NEXT: movapd %xmm10, 80(%rdx)
918 ; SSE-NEXT: movapd %xmm9, 16(%rdx)
919 ; SSE-NEXT: movapd %xmm6, 96(%rcx)
920 ; SSE-NEXT: movapd %xmm11, 112(%rcx)
921 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
922 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
923 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
924 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
925 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
926 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
927 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
928 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
929 ; SSE-NEXT: movapd %xmm4, (%rcx)
930 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
931 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
932 ; SSE-NEXT: addq $24, %rsp
935 ; AVX-LABEL: load_i64_stride3_vf16:
937 ; AVX-NEXT: vmovapd 32(%rdi), %ymm1
938 ; AVX-NEXT: vmovapd 224(%rdi), %ymm0
939 ; AVX-NEXT: vmovapd 320(%rdi), %ymm2
940 ; AVX-NEXT: vmovapd 128(%rdi), %ymm5
941 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = mem[0,1],ymm5[2,3]
942 ; AVX-NEXT: vblendpd {{.*#+}} ymm8 = mem[0,1],ymm2[2,3]
943 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm0[2,3]
944 ; AVX-NEXT: vblendpd {{.*#+}} ymm9 = mem[0,1],ymm1[2,3]
945 ; AVX-NEXT: vshufpd {{.*#+}} ymm4 = ymm7[1],ymm5[0],ymm7[3],ymm5[3]
946 ; AVX-NEXT: vbroadcastsd 176(%rdi), %ymm6
947 ; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3]
948 ; AVX-NEXT: vshufpd {{.*#+}} ymm6 = ymm8[1],ymm2[0],ymm8[3],ymm2[3]
949 ; AVX-NEXT: vbroadcastsd 368(%rdi), %ymm10
950 ; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1,2],ymm10[3]
951 ; AVX-NEXT: vshufpd {{.*#+}} ymm10 = ymm9[1],ymm1[0],ymm9[3],ymm1[3]
952 ; AVX-NEXT: vbroadcastsd 80(%rdi), %ymm11
953 ; AVX-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3]
954 ; AVX-NEXT: vshufpd {{.*#+}} ymm11 = ymm3[1],ymm0[0],ymm3[3],ymm0[3]
955 ; AVX-NEXT: vbroadcastsd 272(%rdi), %ymm12
956 ; AVX-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3]
957 ; AVX-NEXT: vmovaps 112(%rdi), %xmm12
958 ; AVX-NEXT: vinsertf128 $1, 160(%rdi), %ymm12, %ymm12
959 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2],ymm12[3]
960 ; AVX-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],mem[2,3]
961 ; AVX-NEXT: vblendpd {{.*#+}} ymm5 = ymm12[0],ymm5[1],ymm12[2],ymm5[3]
962 ; AVX-NEXT: vmovaps 304(%rdi), %xmm12
963 ; AVX-NEXT: vinsertf128 $1, 352(%rdi), %ymm12, %ymm12
964 ; AVX-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2],ymm12[3]
965 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
966 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3]
967 ; AVX-NEXT: vmovaps 16(%rdi), %xmm12
968 ; AVX-NEXT: vinsertf128 $1, 64(%rdi), %ymm12, %ymm12
969 ; AVX-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2],ymm12[3]
970 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
971 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm12[0],ymm1[1],ymm12[2],ymm1[3]
972 ; AVX-NEXT: vmovaps 208(%rdi), %xmm12
973 ; AVX-NEXT: vinsertf128 $1, 256(%rdi), %ymm12, %ymm12
974 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm12[1],ymm3[2],ymm12[3]
975 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
976 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
977 ; AVX-NEXT: vmovapd %ymm9, (%rsi)
978 ; AVX-NEXT: vmovapd %ymm3, 64(%rsi)
979 ; AVX-NEXT: vmovapd %ymm8, 96(%rsi)
980 ; AVX-NEXT: vmovapd %ymm7, 32(%rsi)
981 ; AVX-NEXT: vmovapd %ymm11, 64(%rdx)
982 ; AVX-NEXT: vmovapd %ymm10, (%rdx)
983 ; AVX-NEXT: vmovapd %ymm6, 96(%rdx)
984 ; AVX-NEXT: vmovapd %ymm4, 32(%rdx)
985 ; AVX-NEXT: vmovapd %ymm0, 64(%rcx)
986 ; AVX-NEXT: vmovapd %ymm1, (%rcx)
987 ; AVX-NEXT: vmovapd %ymm2, 96(%rcx)
988 ; AVX-NEXT: vmovapd %ymm5, 32(%rcx)
989 ; AVX-NEXT: vzeroupper
992 ; AVX2-LABEL: load_i64_stride3_vf16:
994 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm8
995 ; AVX2-NEXT: vmovaps (%rdi), %ymm9
996 ; AVX2-NEXT: vmovaps 224(%rdi), %ymm6
997 ; AVX2-NEXT: vmovaps 192(%rdi), %ymm7
998 ; AVX2-NEXT: vmovaps 320(%rdi), %ymm5
999 ; AVX2-NEXT: vmovaps 288(%rdi), %ymm10
1000 ; AVX2-NEXT: vmovaps 128(%rdi), %ymm3
1001 ; AVX2-NEXT: vmovaps 96(%rdi), %ymm11
1002 ; AVX2-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
1003 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm11[0,3,2,3]
1004 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1005 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1006 ; AVX2-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm1
1007 ; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm10[0,3,2,3]
1008 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
1009 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
1010 ; AVX2-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm2
1011 ; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm7[0,3,2,3]
1012 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
1013 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
1014 ; AVX2-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
1015 ; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = ymm9[0,3,2,3]
1016 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm8[4,5,6,7]
1017 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2,3,4,5],ymm4[6,7]
1018 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3],ymm3[4,5,6,7]
1019 ; AVX2-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
1020 ; AVX2-NEXT: vbroadcastsd 176(%rdi), %ymm11
1021 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm11[6,7]
1022 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
1023 ; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,0,1,6,7,4,5]
1024 ; AVX2-NEXT: vbroadcastsd 368(%rdi), %ymm10
1025 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm10[6,7]
1026 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
1027 ; AVX2-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
1028 ; AVX2-NEXT: vbroadcastsd 80(%rdi), %ymm9
1029 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
1030 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
1031 ; AVX2-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
1032 ; AVX2-NEXT: vbroadcastsd 272(%rdi), %ymm7
1033 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
1034 ; AVX2-NEXT: vmovaps 112(%rdi), %xmm7
1035 ; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
1036 ; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3]
1037 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
1038 ; AVX2-NEXT: vmovaps 304(%rdi), %xmm9
1039 ; AVX2-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],mem[2,3]
1040 ; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
1041 ; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
1042 ; AVX2-NEXT: vmovaps 16(%rdi), %xmm10
1043 ; AVX2-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
1044 ; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = mem[0,1,0,3]
1045 ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
1046 ; AVX2-NEXT: vmovaps 208(%rdi), %xmm11
1047 ; AVX2-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2,3]
1048 ; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = mem[0,1,0,3]
1049 ; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
1050 ; AVX2-NEXT: vmovaps %ymm4, (%rsi)
1051 ; AVX2-NEXT: vmovaps %ymm2, 64(%rsi)
1052 ; AVX2-NEXT: vmovaps %ymm1, 96(%rsi)
1053 ; AVX2-NEXT: vmovaps %ymm0, 32(%rsi)
1054 ; AVX2-NEXT: vmovaps %ymm6, 64(%rdx)
1055 ; AVX2-NEXT: vmovaps %ymm8, (%rdx)
1056 ; AVX2-NEXT: vmovaps %ymm5, 96(%rdx)
1057 ; AVX2-NEXT: vmovaps %ymm3, 32(%rdx)
1058 ; AVX2-NEXT: vmovaps %ymm11, 64(%rcx)
1059 ; AVX2-NEXT: vmovaps %ymm10, (%rcx)
1060 ; AVX2-NEXT: vmovaps %ymm9, 96(%rcx)
1061 ; AVX2-NEXT: vmovaps %ymm7, 32(%rcx)
1062 ; AVX2-NEXT: vzeroupper
1065 ; AVX2-FP-LABEL: load_i64_stride3_vf16:
1067 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm8
1068 ; AVX2-FP-NEXT: vmovaps (%rdi), %ymm9
1069 ; AVX2-FP-NEXT: vmovaps 224(%rdi), %ymm6
1070 ; AVX2-FP-NEXT: vmovaps 192(%rdi), %ymm7
1071 ; AVX2-FP-NEXT: vmovaps 320(%rdi), %ymm5
1072 ; AVX2-FP-NEXT: vmovaps 288(%rdi), %ymm10
1073 ; AVX2-FP-NEXT: vmovaps 128(%rdi), %ymm3
1074 ; AVX2-FP-NEXT: vmovaps 96(%rdi), %ymm11
1075 ; AVX2-FP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
1076 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm11[0,3,2,3]
1077 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1078 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1079 ; AVX2-FP-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm1
1080 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm10[0,3,2,3]
1081 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
1082 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
1083 ; AVX2-FP-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm2
1084 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm7[0,3,2,3]
1085 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
1086 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
1087 ; AVX2-FP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
1088 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm12 = ymm9[0,3,2,3]
1089 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm8[4,5,6,7]
1090 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2,3,4,5],ymm4[6,7]
1091 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3],ymm3[4,5,6,7]
1092 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
1093 ; AVX2-FP-NEXT: vbroadcastsd 176(%rdi), %ymm11
1094 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm11[6,7]
1095 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
1096 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,0,1,6,7,4,5]
1097 ; AVX2-FP-NEXT: vbroadcastsd 368(%rdi), %ymm10
1098 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm10[6,7]
1099 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
1100 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
1101 ; AVX2-FP-NEXT: vbroadcastsd 80(%rdi), %ymm9
1102 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
1103 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
1104 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
1105 ; AVX2-FP-NEXT: vbroadcastsd 272(%rdi), %ymm7
1106 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
1107 ; AVX2-FP-NEXT: vmovaps 112(%rdi), %xmm7
1108 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
1109 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3]
1110 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
1111 ; AVX2-FP-NEXT: vmovaps 304(%rdi), %xmm9
1112 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],mem[2,3]
1113 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
1114 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
1115 ; AVX2-FP-NEXT: vmovaps 16(%rdi), %xmm10
1116 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
1117 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm11 = mem[0,1,0,3]
1118 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
1119 ; AVX2-FP-NEXT: vmovaps 208(%rdi), %xmm11
1120 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2,3]
1121 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm12 = mem[0,1,0,3]
1122 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
1123 ; AVX2-FP-NEXT: vmovaps %ymm4, (%rsi)
1124 ; AVX2-FP-NEXT: vmovaps %ymm2, 64(%rsi)
1125 ; AVX2-FP-NEXT: vmovaps %ymm1, 96(%rsi)
1126 ; AVX2-FP-NEXT: vmovaps %ymm0, 32(%rsi)
1127 ; AVX2-FP-NEXT: vmovaps %ymm6, 64(%rdx)
1128 ; AVX2-FP-NEXT: vmovaps %ymm8, (%rdx)
1129 ; AVX2-FP-NEXT: vmovaps %ymm5, 96(%rdx)
1130 ; AVX2-FP-NEXT: vmovaps %ymm3, 32(%rdx)
1131 ; AVX2-FP-NEXT: vmovaps %ymm11, 64(%rcx)
1132 ; AVX2-FP-NEXT: vmovaps %ymm10, (%rcx)
1133 ; AVX2-FP-NEXT: vmovaps %ymm9, 96(%rcx)
1134 ; AVX2-FP-NEXT: vmovaps %ymm7, 32(%rcx)
1135 ; AVX2-FP-NEXT: vzeroupper
1136 ; AVX2-FP-NEXT: retq
1138 ; AVX2-FCP-LABEL: load_i64_stride3_vf16:
1139 ; AVX2-FCP: # %bb.0:
1140 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm8
1141 ; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm9
1142 ; AVX2-FCP-NEXT: vmovaps 224(%rdi), %ymm6
1143 ; AVX2-FCP-NEXT: vmovaps 192(%rdi), %ymm7
1144 ; AVX2-FCP-NEXT: vmovaps 320(%rdi), %ymm5
1145 ; AVX2-FCP-NEXT: vmovaps 288(%rdi), %ymm10
1146 ; AVX2-FCP-NEXT: vmovaps 128(%rdi), %ymm3
1147 ; AVX2-FCP-NEXT: vmovaps 96(%rdi), %ymm11
1148 ; AVX2-FCP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
1149 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm11[0,3,2,3]
1150 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
1151 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1152 ; AVX2-FCP-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm1
1153 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm10[0,3,2,3]
1154 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
1155 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
1156 ; AVX2-FCP-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm2
1157 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm7[0,3,2,3]
1158 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
1159 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
1160 ; AVX2-FCP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
1161 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm12 = ymm9[0,3,2,3]
1162 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm8[4,5,6,7]
1163 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2,3,4,5],ymm4[6,7]
1164 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3],ymm3[4,5,6,7]
1165 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
1166 ; AVX2-FCP-NEXT: vbroadcastsd 176(%rdi), %ymm11
1167 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm11[6,7]
1168 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
1169 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,0,1,6,7,4,5]
1170 ; AVX2-FCP-NEXT: vbroadcastsd 368(%rdi), %ymm10
1171 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm10[6,7]
1172 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
1173 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
1174 ; AVX2-FCP-NEXT: vbroadcastsd 80(%rdi), %ymm9
1175 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
1176 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
1177 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
1178 ; AVX2-FCP-NEXT: vbroadcastsd 272(%rdi), %ymm7
1179 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
1180 ; AVX2-FCP-NEXT: vmovaps 112(%rdi), %xmm7
1181 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
1182 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3]
1183 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
1184 ; AVX2-FCP-NEXT: vmovaps 304(%rdi), %xmm9
1185 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],mem[2,3]
1186 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
1187 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
1188 ; AVX2-FCP-NEXT: vmovaps 16(%rdi), %xmm10
1189 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
1190 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm11 = mem[0,1,0,3]
1191 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
1192 ; AVX2-FCP-NEXT: vmovaps 208(%rdi), %xmm11
1193 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2,3]
1194 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm12 = mem[0,1,0,3]
1195 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
1196 ; AVX2-FCP-NEXT: vmovaps %ymm4, (%rsi)
1197 ; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%rsi)
1198 ; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rsi)
1199 ; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%rsi)
1200 ; AVX2-FCP-NEXT: vmovaps %ymm6, 64(%rdx)
1201 ; AVX2-FCP-NEXT: vmovaps %ymm8, (%rdx)
1202 ; AVX2-FCP-NEXT: vmovaps %ymm5, 96(%rdx)
1203 ; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%rdx)
1204 ; AVX2-FCP-NEXT: vmovaps %ymm11, 64(%rcx)
1205 ; AVX2-FCP-NEXT: vmovaps %ymm10, (%rcx)
1206 ; AVX2-FCP-NEXT: vmovaps %ymm9, 96(%rcx)
1207 ; AVX2-FCP-NEXT: vmovaps %ymm7, 32(%rcx)
1208 ; AVX2-FCP-NEXT: vzeroupper
1209 ; AVX2-FCP-NEXT: retq
1211 ; AVX512-LABEL: load_i64_stride3_vf16:
1213 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm0
1214 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm1
1215 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
1216 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
1217 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm4
1218 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm5
1219 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1220 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm7
1221 ; AVX512-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1222 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1223 ; AVX512-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1224 ; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1225 ; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1226 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1227 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm9
1228 ; AVX512-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1229 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1230 ; AVX512-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1231 ; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1232 ; AVX512-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1233 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1234 ; AVX512-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1235 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1236 ; AVX512-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1237 ; AVX512-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1238 ; AVX512-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1239 ; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1240 ; AVX512-NEXT: vmovdqa64 %zmm6, (%rsi)
1241 ; AVX512-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1242 ; AVX512-NEXT: vmovdqa64 %zmm8, (%rdx)
1243 ; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1244 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rcx)
1245 ; AVX512-NEXT: vzeroupper
1248 ; AVX512-FCP-LABEL: load_i64_stride3_vf16:
1249 ; AVX512-FCP: # %bb.0:
1250 ; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm0
1251 ; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
1252 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
1253 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
1254 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm4
1255 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm5
1256 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1257 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm7
1258 ; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1259 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1260 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1261 ; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1262 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1263 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1264 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm9
1265 ; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1266 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1267 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1268 ; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1269 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1270 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1271 ; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1272 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1273 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1274 ; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1275 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1276 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1277 ; AVX512-FCP-NEXT: vmovdqa64 %zmm6, (%rsi)
1278 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1279 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, (%rdx)
1280 ; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1281 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
1282 ; AVX512-FCP-NEXT: vzeroupper
1283 ; AVX512-FCP-NEXT: retq
1285 ; AVX512DQ-LABEL: load_i64_stride3_vf16:
1286 ; AVX512DQ: # %bb.0:
1287 ; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %zmm0
1288 ; AVX512DQ-NEXT: vmovdqa64 256(%rdi), %zmm1
1289 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm2
1290 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm3
1291 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm4
1292 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm5
1293 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1294 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm7
1295 ; AVX512DQ-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1296 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1297 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1298 ; AVX512DQ-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1299 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1300 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1301 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm9
1302 ; AVX512DQ-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1303 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1304 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1305 ; AVX512DQ-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1306 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1307 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1308 ; AVX512DQ-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1309 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1310 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1311 ; AVX512DQ-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1312 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1313 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1314 ; AVX512DQ-NEXT: vmovdqa64 %zmm6, (%rsi)
1315 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1316 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rdx)
1317 ; AVX512DQ-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1318 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, (%rcx)
1319 ; AVX512DQ-NEXT: vzeroupper
1320 ; AVX512DQ-NEXT: retq
1322 ; AVX512DQ-FCP-LABEL: load_i64_stride3_vf16:
1323 ; AVX512DQ-FCP: # %bb.0:
1324 ; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm0
1325 ; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
1326 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
1327 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
1328 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm4
1329 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm5
1330 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1331 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm7
1332 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1333 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1334 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1335 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1336 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1337 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1338 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm9
1339 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1340 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1341 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1342 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1343 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1344 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1345 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1346 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1347 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1348 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1349 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1350 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1351 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, (%rsi)
1352 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1353 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, (%rdx)
1354 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1355 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
1356 ; AVX512DQ-FCP-NEXT: vzeroupper
1357 ; AVX512DQ-FCP-NEXT: retq
1359 ; AVX512BW-LABEL: load_i64_stride3_vf16:
1360 ; AVX512BW: # %bb.0:
1361 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm0
1362 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm1
1363 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm2
1364 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm3
1365 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm4
1366 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm5
1367 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1368 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm7
1369 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1370 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1371 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1372 ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1373 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1374 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1375 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm9
1376 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1377 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1378 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1379 ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1380 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1381 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1382 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1383 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1384 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1385 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1386 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1387 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1388 ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rsi)
1389 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1390 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%rdx)
1391 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1392 ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rcx)
1393 ; AVX512BW-NEXT: vzeroupper
1394 ; AVX512BW-NEXT: retq
1396 ; AVX512BW-FCP-LABEL: load_i64_stride3_vf16:
1397 ; AVX512BW-FCP: # %bb.0:
1398 ; AVX512BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm0
1399 ; AVX512BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
1400 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
1401 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
1402 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm4
1403 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm5
1404 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1405 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm7
1406 ; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1407 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1408 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1409 ; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1410 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1411 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1412 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm9
1413 ; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1414 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1415 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1416 ; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1417 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1418 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1419 ; AVX512BW-FCP-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1420 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1421 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1422 ; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1423 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1424 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1425 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%rsi)
1426 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1427 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, (%rdx)
1428 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1429 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
1430 ; AVX512BW-FCP-NEXT: vzeroupper
1431 ; AVX512BW-FCP-NEXT: retq
1433 ; AVX512DQ-BW-LABEL: load_i64_stride3_vf16:
1434 ; AVX512DQ-BW: # %bb.0:
1435 ; AVX512DQ-BW-NEXT: vmovdqa64 320(%rdi), %zmm0
1436 ; AVX512DQ-BW-NEXT: vmovdqa64 256(%rdi), %zmm1
1437 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm2
1438 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm3
1439 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm4
1440 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm5
1441 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1442 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm7
1443 ; AVX512DQ-BW-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1444 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1445 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1446 ; AVX512DQ-BW-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1447 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1448 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1449 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm9
1450 ; AVX512DQ-BW-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1451 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1452 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1453 ; AVX512DQ-BW-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1454 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1455 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1456 ; AVX512DQ-BW-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1457 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1458 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1459 ; AVX512DQ-BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1460 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1461 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1462 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, (%rsi)
1463 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1464 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, (%rdx)
1465 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1466 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, (%rcx)
1467 ; AVX512DQ-BW-NEXT: vzeroupper
1468 ; AVX512DQ-BW-NEXT: retq
1470 ; AVX512DQ-BW-FCP-LABEL: load_i64_stride3_vf16:
1471 ; AVX512DQ-BW-FCP: # %bb.0:
1472 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm0
1473 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
1474 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm2
1475 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
1476 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm4
1477 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm5
1478 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm6 = [0,3,6,9,12,15,0,0]
1479 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm7
1480 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
1481 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
1482 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
1483 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
1484 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
1485 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm8 = [1,4,7,10,13,0,0,0]
1486 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm9
1487 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
1488 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
1489 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
1490 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
1491 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
1492 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm10 = [10,13,0,3,6,0,0,0]
1493 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
1494 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
1495 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
1496 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
1497 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
1498 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%rsi)
1499 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%rsi)
1500 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rdx)
1501 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, (%rdx)
1502 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1503 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
1504 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
1505 ; AVX512DQ-BW-FCP-NEXT: retq
1506 %wide.vec = load <48 x i64>, ptr %in.vec, align 64
1507 %strided.vec0 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
1508 %strided.vec1 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
1509 %strided.vec2 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
1510 store <16 x i64> %strided.vec0, ptr %out.vec0, align 64
1511 store <16 x i64> %strided.vec1, ptr %out.vec1, align 64
1512 store <16 x i64> %strided.vec2, ptr %out.vec2, align 64
1516 define void @load_i64_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
1517 ; SSE-LABEL: load_i64_stride3_vf32:
1519 ; SSE-NEXT: subq $408, %rsp # imm = 0x198
1520 ; SSE-NEXT: movapd 224(%rdi), %xmm6
1521 ; SSE-NEXT: movapd 272(%rdi), %xmm9
1522 ; SSE-NEXT: movapd 128(%rdi), %xmm5
1523 ; SSE-NEXT: movapd 176(%rdi), %xmm8
1524 ; SSE-NEXT: movapd 80(%rdi), %xmm7
1525 ; SSE-NEXT: movapd 96(%rdi), %xmm10
1526 ; SSE-NEXT: movapd 112(%rdi), %xmm0
1527 ; SSE-NEXT: movapd 144(%rdi), %xmm11
1528 ; SSE-NEXT: movapd 160(%rdi), %xmm1
1529 ; SSE-NEXT: movapd 192(%rdi), %xmm12
1530 ; SSE-NEXT: movapd 208(%rdi), %xmm2
1531 ; SSE-NEXT: movapd 240(%rdi), %xmm13
1532 ; SSE-NEXT: movapd 256(%rdi), %xmm3
1533 ; SSE-NEXT: movapd 48(%rdi), %xmm14
1534 ; SSE-NEXT: movapd 64(%rdi), %xmm4
1535 ; SSE-NEXT: movapd %xmm4, %xmm15
1536 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm14[0],xmm15[1]
1537 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1538 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm7[0]
1539 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1540 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1]
1541 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1542 ; SSE-NEXT: movapd %xmm0, %xmm4
1543 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm10[0],xmm4[1]
1544 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1545 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm5[0]
1546 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1547 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
1548 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1549 ; SSE-NEXT: movapd %xmm1, %xmm0
1550 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm11[0],xmm0[1]
1551 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1552 ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm8[0]
1553 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1554 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
1555 ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1556 ; SSE-NEXT: movapd %xmm2, %xmm0
1557 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm12[0],xmm0[1]
1558 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1559 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm6[0]
1560 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1561 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
1562 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1563 ; SSE-NEXT: movapd %xmm3, %xmm0
1564 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm13[0],xmm0[1]
1565 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1566 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm9[0]
1567 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1568 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm3[0],xmm9[1]
1569 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1570 ; SSE-NEXT: movapd 288(%rdi), %xmm2
1571 ; SSE-NEXT: movapd 304(%rdi), %xmm0
1572 ; SSE-NEXT: movapd %xmm0, %xmm1
1573 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1574 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1575 ; SSE-NEXT: movapd 320(%rdi), %xmm1
1576 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1577 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1578 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1579 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1580 ; SSE-NEXT: movapd 336(%rdi), %xmm2
1581 ; SSE-NEXT: movapd 352(%rdi), %xmm0
1582 ; SSE-NEXT: movapd %xmm0, %xmm1
1583 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1584 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1585 ; SSE-NEXT: movapd 368(%rdi), %xmm1
1586 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1587 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1588 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1589 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1590 ; SSE-NEXT: movapd 384(%rdi), %xmm2
1591 ; SSE-NEXT: movapd 400(%rdi), %xmm0
1592 ; SSE-NEXT: movapd %xmm0, %xmm1
1593 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1594 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1595 ; SSE-NEXT: movapd 416(%rdi), %xmm1
1596 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1597 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1598 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1599 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1600 ; SSE-NEXT: movapd 432(%rdi), %xmm2
1601 ; SSE-NEXT: movapd 448(%rdi), %xmm0
1602 ; SSE-NEXT: movapd %xmm0, %xmm1
1603 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1604 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1605 ; SSE-NEXT: movapd 464(%rdi), %xmm1
1606 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1607 ; SSE-NEXT: movapd %xmm2, (%rsp) # 16-byte Spill
1608 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1609 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1610 ; SSE-NEXT: movapd 480(%rdi), %xmm2
1611 ; SSE-NEXT: movapd 496(%rdi), %xmm0
1612 ; SSE-NEXT: movapd %xmm0, %xmm1
1613 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1614 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1615 ; SSE-NEXT: movapd 512(%rdi), %xmm1
1616 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1617 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1618 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1619 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1620 ; SSE-NEXT: movapd 528(%rdi), %xmm15
1621 ; SSE-NEXT: movapd 544(%rdi), %xmm0
1622 ; SSE-NEXT: movapd %xmm0, %xmm1
1623 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1]
1624 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1625 ; SSE-NEXT: movapd 560(%rdi), %xmm1
1626 ; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm1[0]
1627 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1628 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1629 ; SSE-NEXT: movapd 576(%rdi), %xmm12
1630 ; SSE-NEXT: movapd 592(%rdi), %xmm0
1631 ; SSE-NEXT: movapd %xmm0, %xmm14
1632 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm12[0],xmm14[1]
1633 ; SSE-NEXT: movapd 608(%rdi), %xmm1
1634 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm1[0]
1635 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1636 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1637 ; SSE-NEXT: movapd 624(%rdi), %xmm8
1638 ; SSE-NEXT: movapd 640(%rdi), %xmm0
1639 ; SSE-NEXT: movapd %xmm0, %xmm11
1640 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm8[0],xmm11[1]
1641 ; SSE-NEXT: movapd 656(%rdi), %xmm13
1642 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm13[0]
1643 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
1644 ; SSE-NEXT: movapd 672(%rdi), %xmm6
1645 ; SSE-NEXT: movapd 688(%rdi), %xmm0
1646 ; SSE-NEXT: movapd %xmm0, %xmm5
1647 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
1648 ; SSE-NEXT: movapd 704(%rdi), %xmm10
1649 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm10[0]
1650 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
1651 ; SSE-NEXT: movapd 720(%rdi), %xmm4
1652 ; SSE-NEXT: movapd 736(%rdi), %xmm2
1653 ; SSE-NEXT: movapd %xmm2, %xmm3
1654 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
1655 ; SSE-NEXT: movapd 752(%rdi), %xmm7
1656 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm7[0]
1657 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm2[0],xmm7[1]
1658 ; SSE-NEXT: movapd (%rdi), %xmm2
1659 ; SSE-NEXT: movapd 16(%rdi), %xmm0
1660 ; SSE-NEXT: movapd %xmm0, %xmm1
1661 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1662 ; SSE-NEXT: movapd 32(%rdi), %xmm9
1663 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm9[0]
1664 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
1665 ; SSE-NEXT: movapd %xmm5, 224(%rsi)
1666 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1667 ; SSE-NEXT: movaps %xmm0, 160(%rsi)
1668 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1669 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
1670 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1671 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
1672 ; SSE-NEXT: movapd %xmm3, 240(%rsi)
1673 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1674 ; SSE-NEXT: movaps %xmm0, 176(%rsi)
1675 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1676 ; SSE-NEXT: movaps %xmm0, 112(%rsi)
1677 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1678 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
1679 ; SSE-NEXT: movapd %xmm14, 192(%rsi)
1680 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1681 ; SSE-NEXT: movaps %xmm0, 128(%rsi)
1682 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1683 ; SSE-NEXT: movaps %xmm0, 64(%rsi)
1684 ; SSE-NEXT: movapd %xmm1, (%rsi)
1685 ; SSE-NEXT: movapd %xmm11, 208(%rsi)
1686 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1687 ; SSE-NEXT: movaps %xmm0, 144(%rsi)
1688 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1689 ; SSE-NEXT: movaps %xmm0, 80(%rsi)
1690 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1691 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
1692 ; SSE-NEXT: movapd %xmm6, 224(%rdx)
1693 ; SSE-NEXT: movapd %xmm4, 240(%rdx)
1694 ; SSE-NEXT: movapd %xmm12, 192(%rdx)
1695 ; SSE-NEXT: movapd %xmm8, 208(%rdx)
1696 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1697 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
1698 ; SSE-NEXT: movapd %xmm15, 176(%rdx)
1699 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1700 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
1701 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1702 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
1703 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1704 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
1705 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1706 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
1707 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1708 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
1709 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1710 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
1711 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1712 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
1713 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1714 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
1715 ; SSE-NEXT: movapd %xmm2, (%rdx)
1716 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1717 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
1718 ; SSE-NEXT: movapd %xmm7, 240(%rcx)
1719 ; SSE-NEXT: movapd %xmm10, 224(%rcx)
1720 ; SSE-NEXT: movapd %xmm13, 208(%rcx)
1721 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1722 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
1723 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1724 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
1725 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1726 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
1727 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1728 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
1729 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1730 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
1731 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1732 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
1733 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1734 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
1735 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1736 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
1737 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1738 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
1739 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1740 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
1741 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1742 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
1743 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1744 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1745 ; SSE-NEXT: movapd %xmm9, (%rcx)
1746 ; SSE-NEXT: addq $408, %rsp # imm = 0x198
1749 ; AVX-LABEL: load_i64_stride3_vf32:
1751 ; AVX-NEXT: subq $232, %rsp
1752 ; AVX-NEXT: vmovapd 32(%rdi), %ymm7
1753 ; AVX-NEXT: vmovapd 704(%rdi), %ymm13
1754 ; AVX-NEXT: vmovapd 512(%rdi), %ymm9
1755 ; AVX-NEXT: vmovapd 320(%rdi), %ymm8
1756 ; AVX-NEXT: vmovapd 128(%rdi), %ymm10
1757 ; AVX-NEXT: vblendpd {{.*#+}} ymm12 = mem[0,1],ymm10[2,3]
1758 ; AVX-NEXT: vblendpd {{.*#+}} ymm11 = mem[0,1],ymm8[2,3]
1759 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm9[2,3]
1760 ; AVX-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1761 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = mem[0,1],ymm13[2,3]
1762 ; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1763 ; AVX-NEXT: vblendpd {{.*#+}} ymm15 = mem[0,1],ymm7[2,3]
1764 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm12[1],ymm10[0],ymm12[3],ymm10[3]
1765 ; AVX-NEXT: vbroadcastsd 176(%rdi), %ymm1
1766 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1767 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1768 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm11[1],ymm8[0],ymm11[3],ymm8[3]
1769 ; AVX-NEXT: vbroadcastsd 368(%rdi), %ymm1
1770 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1771 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1772 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm3[1],ymm9[0],ymm3[3],ymm9[3]
1773 ; AVX-NEXT: vbroadcastsd 560(%rdi), %ymm1
1774 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1775 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1776 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[1],ymm13[0],ymm2[3],ymm13[3]
1777 ; AVX-NEXT: vbroadcastsd 752(%rdi), %ymm1
1778 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1779 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1780 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm15[1],ymm7[0],ymm15[3],ymm7[3]
1781 ; AVX-NEXT: vbroadcastsd 80(%rdi), %ymm1
1782 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1783 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1784 ; AVX-NEXT: vmovapd 224(%rdi), %ymm3
1785 ; AVX-NEXT: vblendpd {{.*#+}} ymm4 = mem[0,1],ymm3[2,3]
1786 ; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm4[1],ymm3[0],ymm4[3],ymm3[3]
1787 ; AVX-NEXT: vbroadcastsd 272(%rdi), %ymm2
1788 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3]
1789 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1790 ; AVX-NEXT: vmovapd 416(%rdi), %ymm1
1791 ; AVX-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm1[2,3]
1792 ; AVX-NEXT: vshufpd {{.*#+}} ymm2 = ymm6[1],ymm1[0],ymm6[3],ymm1[3]
1793 ; AVX-NEXT: vbroadcastsd 464(%rdi), %ymm5
1794 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1,2],ymm5[3]
1795 ; AVX-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
1796 ; AVX-NEXT: vmovapd 608(%rdi), %ymm2
1797 ; AVX-NEXT: vblendpd {{.*#+}} ymm5 = mem[0,1],ymm2[2,3]
1798 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm5[1],ymm2[0],ymm5[3],ymm2[3]
1799 ; AVX-NEXT: vbroadcastsd 656(%rdi), %ymm14
1800 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3]
1801 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1802 ; AVX-NEXT: vmovaps 112(%rdi), %xmm0
1803 ; AVX-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
1804 ; AVX-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
1805 ; AVX-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1],mem[2,3]
1806 ; AVX-NEXT: vblendpd {{.*#+}} ymm14 = ymm0[0],ymm10[1],ymm0[2],ymm10[3]
1807 ; AVX-NEXT: vmovaps 16(%rdi), %xmm0
1808 ; AVX-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
1809 ; AVX-NEXT: vblendpd {{.*#+}} ymm15 = ymm15[0],ymm0[1],ymm15[2],ymm0[3]
1810 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],mem[2,3]
1811 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2],ymm7[3]
1812 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1813 ; AVX-NEXT: vmovaps 304(%rdi), %xmm0
1814 ; AVX-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
1815 ; AVX-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0],ymm0[1],ymm11[2],ymm0[3]
1816 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = ymm8[0,1],mem[2,3]
1817 ; AVX-NEXT: vblendpd {{.*#+}} ymm10 = ymm0[0],ymm7[1],ymm0[2],ymm7[3]
1818 ; AVX-NEXT: vmovaps 208(%rdi), %xmm0
1819 ; AVX-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm0
1820 ; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm0[1],ymm4[2],ymm0[3]
1821 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3]
1822 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3]
1823 ; AVX-NEXT: vmovaps 496(%rdi), %xmm3
1824 ; AVX-NEXT: vinsertf128 $1, 544(%rdi), %ymm3, %ymm3
1825 ; AVX-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload
1826 ; AVX-NEXT: # ymm7 = mem[0],ymm3[1],mem[2],ymm3[3]
1827 ; AVX-NEXT: vblendpd {{.*#+}} ymm8 = ymm9[0,1],mem[2,3]
1828 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2],ymm8[3]
1829 ; AVX-NEXT: vmovaps 400(%rdi), %xmm8
1830 ; AVX-NEXT: vinsertf128 $1, 448(%rdi), %ymm8, %ymm8
1831 ; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2],ymm8[3]
1832 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
1833 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm8[0],ymm1[1],ymm8[2],ymm1[3]
1834 ; AVX-NEXT: vmovaps 688(%rdi), %xmm8
1835 ; AVX-NEXT: vinsertf128 $1, 736(%rdi), %ymm8, %ymm8
1836 ; AVX-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm9 # 32-byte Folded Reload
1837 ; AVX-NEXT: # ymm9 = mem[0],ymm8[1],mem[2],ymm8[3]
1838 ; AVX-NEXT: vblendpd {{.*#+}} ymm13 = ymm13[0,1],mem[2,3]
1839 ; AVX-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0],ymm13[1],ymm8[2],ymm13[3]
1840 ; AVX-NEXT: vmovaps 592(%rdi), %xmm13
1841 ; AVX-NEXT: vinsertf128 $1, 640(%rdi), %ymm13, %ymm13
1842 ; AVX-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0],ymm13[1],ymm5[2],ymm13[3]
1843 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
1844 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = ymm13[0],ymm2[1],ymm13[2],ymm2[3]
1845 ; AVX-NEXT: vmovapd %ymm5, 192(%rsi)
1846 ; AVX-NEXT: vmovapd %ymm6, 128(%rsi)
1847 ; AVX-NEXT: vmovapd %ymm4, 64(%rsi)
1848 ; AVX-NEXT: vmovapd %ymm15, (%rsi)
1849 ; AVX-NEXT: vmovapd %ymm9, 224(%rsi)
1850 ; AVX-NEXT: vmovapd %ymm7, 160(%rsi)
1851 ; AVX-NEXT: vmovapd %ymm11, 96(%rsi)
1852 ; AVX-NEXT: vmovapd %ymm12, 32(%rsi)
1853 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1854 ; AVX-NEXT: vmovaps %ymm4, 192(%rdx)
1855 ; AVX-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
1856 ; AVX-NEXT: vmovaps %ymm4, 128(%rdx)
1857 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1858 ; AVX-NEXT: vmovaps %ymm4, 64(%rdx)
1859 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1860 ; AVX-NEXT: vmovaps %ymm4, (%rdx)
1861 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1862 ; AVX-NEXT: vmovaps %ymm4, 224(%rdx)
1863 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1864 ; AVX-NEXT: vmovaps %ymm4, 160(%rdx)
1865 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1866 ; AVX-NEXT: vmovaps %ymm4, 96(%rdx)
1867 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
1868 ; AVX-NEXT: vmovaps %ymm4, 32(%rdx)
1869 ; AVX-NEXT: vmovapd %ymm2, 192(%rcx)
1870 ; AVX-NEXT: vmovapd %ymm8, 224(%rcx)
1871 ; AVX-NEXT: vmovapd %ymm1, 128(%rcx)
1872 ; AVX-NEXT: vmovapd %ymm3, 160(%rcx)
1873 ; AVX-NEXT: vmovapd %ymm0, 64(%rcx)
1874 ; AVX-NEXT: vmovapd %ymm10, 96(%rcx)
1875 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1876 ; AVX-NEXT: vmovaps %ymm0, (%rcx)
1877 ; AVX-NEXT: vmovapd %ymm14, 32(%rcx)
1878 ; AVX-NEXT: addq $232, %rsp
1879 ; AVX-NEXT: vzeroupper
1882 ; AVX2-LABEL: load_i64_stride3_vf32:
1884 ; AVX2-NEXT: subq $232, %rsp
1885 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm2
1886 ; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1887 ; AVX2-NEXT: vmovaps (%rdi), %ymm3
1888 ; AVX2-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1889 ; AVX2-NEXT: vmovaps 704(%rdi), %ymm11
1890 ; AVX2-NEXT: vmovaps 672(%rdi), %ymm14
1891 ; AVX2-NEXT: vmovaps 512(%rdi), %ymm10
1892 ; AVX2-NEXT: vmovaps 480(%rdi), %ymm15
1893 ; AVX2-NEXT: vmovaps 320(%rdi), %ymm9
1894 ; AVX2-NEXT: vmovaps 288(%rdi), %ymm8
1895 ; AVX2-NEXT: vmovaps 128(%rdi), %ymm7
1896 ; AVX2-NEXT: vmovaps 96(%rdi), %ymm6
1897 ; AVX2-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
1898 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[0,3,2,3]
1899 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
1900 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1901 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1902 ; AVX2-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
1903 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,3,2,3]
1904 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
1905 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1906 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1907 ; AVX2-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
1908 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm15[0,3,2,3]
1909 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
1910 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1911 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1912 ; AVX2-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
1913 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm14[0,3,2,3]
1914 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
1915 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1916 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1917 ; AVX2-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
1918 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
1919 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1920 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1921 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1922 ; AVX2-NEXT: vmovaps 224(%rdi), %ymm5
1923 ; AVX2-NEXT: vmovaps 192(%rdi), %ymm4
1924 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
1925 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
1926 ; AVX2-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
1927 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1928 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1929 ; AVX2-NEXT: vmovaps 416(%rdi), %ymm3
1930 ; AVX2-NEXT: vmovaps 384(%rdi), %ymm2
1931 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
1932 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
1933 ; AVX2-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
1934 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1935 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1936 ; AVX2-NEXT: vmovaps 608(%rdi), %ymm1
1937 ; AVX2-NEXT: vmovaps 576(%rdi), %ymm0
1938 ; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = ymm0[0,3,2,3]
1939 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm1[4,5,6,7]
1940 ; AVX2-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm13
1941 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
1942 ; AVX2-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1943 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
1944 ; AVX2-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
1945 ; AVX2-NEXT: vbroadcastsd 176(%rdi), %ymm7
1946 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
1947 ; AVX2-NEXT: vmovups %ymm6, (%rsp) # 32-byte Spill
1948 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
1949 ; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1950 ; AVX2-NEXT: vbroadcastsd 368(%rdi), %ymm8
1951 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3,4,5],ymm8[6,7]
1952 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm15[2,3],ymm10[4,5,6,7]
1953 ; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1954 ; AVX2-NEXT: vbroadcastsd 560(%rdi), %ymm9
1955 ; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5],ymm9[6,7]
1956 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1],ymm14[2,3],ymm11[4,5,6,7]
1957 ; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1958 ; AVX2-NEXT: vbroadcastsd 752(%rdi), %ymm10
1959 ; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm7[0,1,2,3,4,5],ymm10[6,7]
1960 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1961 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
1962 ; AVX2-NEXT: # ymm7 = ymm6[0,1],mem[2,3],ymm6[4,5,6,7]
1963 ; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1964 ; AVX2-NEXT: vbroadcastsd 80(%rdi), %ymm10
1965 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3,4,5],ymm10[6,7]
1966 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
1967 ; AVX2-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
1968 ; AVX2-NEXT: vbroadcastsd 272(%rdi), %ymm5
1969 ; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm4[0,1,2,3,4,5],ymm5[6,7]
1970 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
1971 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
1972 ; AVX2-NEXT: vbroadcastsd 464(%rdi), %ymm3
1973 ; AVX2-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3,4,5],ymm3[6,7]
1974 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
1975 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
1976 ; AVX2-NEXT: vbroadcastsd 656(%rdi), %ymm1
1977 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1978 ; AVX2-NEXT: vmovaps 112(%rdi), %xmm0
1979 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1980 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
1981 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1982 ; AVX2-NEXT: vmovaps 16(%rdi), %xmm0
1983 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1984 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
1985 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1986 ; AVX2-NEXT: vmovaps 304(%rdi), %xmm0
1987 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1988 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
1989 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1990 ; AVX2-NEXT: vmovaps 208(%rdi), %xmm0
1991 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1992 ; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = mem[0,1,0,3]
1993 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
1994 ; AVX2-NEXT: vmovaps 496(%rdi), %xmm4
1995 ; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
1996 ; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = mem[0,1,0,3]
1997 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
1998 ; AVX2-NEXT: vmovaps 400(%rdi), %xmm5
1999 ; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
2000 ; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = mem[0,1,0,3]
2001 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
2002 ; AVX2-NEXT: vmovaps 688(%rdi), %xmm7
2003 ; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
2004 ; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
2005 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm10[4,5,6,7]
2006 ; AVX2-NEXT: vmovaps 592(%rdi), %xmm10
2007 ; AVX2-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
2008 ; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
2009 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
2010 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2011 ; AVX2-NEXT: vmovaps %ymm10, 192(%rsi)
2012 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2013 ; AVX2-NEXT: vmovaps %ymm10, 128(%rsi)
2014 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2015 ; AVX2-NEXT: vmovaps %ymm10, 64(%rsi)
2016 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2017 ; AVX2-NEXT: vmovaps %ymm10, (%rsi)
2018 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2019 ; AVX2-NEXT: vmovaps %ymm10, 224(%rsi)
2020 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2021 ; AVX2-NEXT: vmovaps %ymm10, 160(%rsi)
2022 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2023 ; AVX2-NEXT: vmovaps %ymm10, 96(%rsi)
2024 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2025 ; AVX2-NEXT: vmovaps %ymm10, 32(%rsi)
2026 ; AVX2-NEXT: vmovaps %ymm2, 192(%rdx)
2027 ; AVX2-NEXT: vmovaps %ymm15, 128(%rdx)
2028 ; AVX2-NEXT: vmovaps %ymm13, 64(%rdx)
2029 ; AVX2-NEXT: vmovaps %ymm12, (%rdx)
2030 ; AVX2-NEXT: vmovaps %ymm11, 224(%rdx)
2031 ; AVX2-NEXT: vmovaps %ymm9, 160(%rdx)
2032 ; AVX2-NEXT: vmovaps %ymm8, 96(%rdx)
2033 ; AVX2-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
2034 ; AVX2-NEXT: vmovaps %ymm2, 32(%rdx)
2035 ; AVX2-NEXT: vmovaps %ymm6, 192(%rcx)
2036 ; AVX2-NEXT: vmovaps %ymm7, 224(%rcx)
2037 ; AVX2-NEXT: vmovaps %ymm5, 128(%rcx)
2038 ; AVX2-NEXT: vmovaps %ymm4, 160(%rcx)
2039 ; AVX2-NEXT: vmovaps %ymm0, 64(%rcx)
2040 ; AVX2-NEXT: vmovaps %ymm1, 96(%rcx)
2041 ; AVX2-NEXT: vmovaps %ymm3, (%rcx)
2042 ; AVX2-NEXT: vmovaps %ymm14, 32(%rcx)
2043 ; AVX2-NEXT: addq $232, %rsp
2044 ; AVX2-NEXT: vzeroupper
2047 ; AVX2-FP-LABEL: load_i64_stride3_vf32:
2049 ; AVX2-FP-NEXT: subq $232, %rsp
2050 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm2
2051 ; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2052 ; AVX2-FP-NEXT: vmovaps (%rdi), %ymm3
2053 ; AVX2-FP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2054 ; AVX2-FP-NEXT: vmovaps 704(%rdi), %ymm11
2055 ; AVX2-FP-NEXT: vmovaps 672(%rdi), %ymm14
2056 ; AVX2-FP-NEXT: vmovaps 512(%rdi), %ymm10
2057 ; AVX2-FP-NEXT: vmovaps 480(%rdi), %ymm15
2058 ; AVX2-FP-NEXT: vmovaps 320(%rdi), %ymm9
2059 ; AVX2-FP-NEXT: vmovaps 288(%rdi), %ymm8
2060 ; AVX2-FP-NEXT: vmovaps 128(%rdi), %ymm7
2061 ; AVX2-FP-NEXT: vmovaps 96(%rdi), %ymm6
2062 ; AVX2-FP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
2063 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[0,3,2,3]
2064 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
2065 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2066 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2067 ; AVX2-FP-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
2068 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,3,2,3]
2069 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
2070 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2071 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2072 ; AVX2-FP-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
2073 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm15[0,3,2,3]
2074 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
2075 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2076 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2077 ; AVX2-FP-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
2078 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm14[0,3,2,3]
2079 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
2080 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2081 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2082 ; AVX2-FP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
2083 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
2084 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2085 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2086 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2087 ; AVX2-FP-NEXT: vmovaps 224(%rdi), %ymm5
2088 ; AVX2-FP-NEXT: vmovaps 192(%rdi), %ymm4
2089 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
2090 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
2091 ; AVX2-FP-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
2092 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2093 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2094 ; AVX2-FP-NEXT: vmovaps 416(%rdi), %ymm3
2095 ; AVX2-FP-NEXT: vmovaps 384(%rdi), %ymm2
2096 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
2097 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
2098 ; AVX2-FP-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
2099 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2100 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2101 ; AVX2-FP-NEXT: vmovaps 608(%rdi), %ymm1
2102 ; AVX2-FP-NEXT: vmovaps 576(%rdi), %ymm0
2103 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm12 = ymm0[0,3,2,3]
2104 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm1[4,5,6,7]
2105 ; AVX2-FP-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm13
2106 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
2107 ; AVX2-FP-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2108 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
2109 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
2110 ; AVX2-FP-NEXT: vbroadcastsd 176(%rdi), %ymm7
2111 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
2112 ; AVX2-FP-NEXT: vmovups %ymm6, (%rsp) # 32-byte Spill
2113 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
2114 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2115 ; AVX2-FP-NEXT: vbroadcastsd 368(%rdi), %ymm8
2116 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3,4,5],ymm8[6,7]
2117 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm15[2,3],ymm10[4,5,6,7]
2118 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2119 ; AVX2-FP-NEXT: vbroadcastsd 560(%rdi), %ymm9
2120 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5],ymm9[6,7]
2121 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1],ymm14[2,3],ymm11[4,5,6,7]
2122 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2123 ; AVX2-FP-NEXT: vbroadcastsd 752(%rdi), %ymm10
2124 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm11 = ymm7[0,1,2,3,4,5],ymm10[6,7]
2125 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2126 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
2127 ; AVX2-FP-NEXT: # ymm7 = ymm6[0,1],mem[2,3],ymm6[4,5,6,7]
2128 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2129 ; AVX2-FP-NEXT: vbroadcastsd 80(%rdi), %ymm10
2130 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3,4,5],ymm10[6,7]
2131 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
2132 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
2133 ; AVX2-FP-NEXT: vbroadcastsd 272(%rdi), %ymm5
2134 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm13 = ymm4[0,1,2,3,4,5],ymm5[6,7]
2135 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
2136 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
2137 ; AVX2-FP-NEXT: vbroadcastsd 464(%rdi), %ymm3
2138 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3,4,5],ymm3[6,7]
2139 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
2140 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
2141 ; AVX2-FP-NEXT: vbroadcastsd 656(%rdi), %ymm1
2142 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2143 ; AVX2-FP-NEXT: vmovaps 112(%rdi), %xmm0
2144 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2145 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2146 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2147 ; AVX2-FP-NEXT: vmovaps 16(%rdi), %xmm0
2148 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2149 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2150 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2151 ; AVX2-FP-NEXT: vmovaps 304(%rdi), %xmm0
2152 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2153 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2154 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2155 ; AVX2-FP-NEXT: vmovaps 208(%rdi), %xmm0
2156 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2157 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = mem[0,1,0,3]
2158 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
2159 ; AVX2-FP-NEXT: vmovaps 496(%rdi), %xmm4
2160 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
2161 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = mem[0,1,0,3]
2162 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
2163 ; AVX2-FP-NEXT: vmovaps 400(%rdi), %xmm5
2164 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
2165 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm7 = mem[0,1,0,3]
2166 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
2167 ; AVX2-FP-NEXT: vmovaps 688(%rdi), %xmm7
2168 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
2169 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
2170 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm10[4,5,6,7]
2171 ; AVX2-FP-NEXT: vmovaps 592(%rdi), %xmm10
2172 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
2173 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
2174 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
2175 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2176 ; AVX2-FP-NEXT: vmovaps %ymm10, 192(%rsi)
2177 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2178 ; AVX2-FP-NEXT: vmovaps %ymm10, 128(%rsi)
2179 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2180 ; AVX2-FP-NEXT: vmovaps %ymm10, 64(%rsi)
2181 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2182 ; AVX2-FP-NEXT: vmovaps %ymm10, (%rsi)
2183 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2184 ; AVX2-FP-NEXT: vmovaps %ymm10, 224(%rsi)
2185 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2186 ; AVX2-FP-NEXT: vmovaps %ymm10, 160(%rsi)
2187 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2188 ; AVX2-FP-NEXT: vmovaps %ymm10, 96(%rsi)
2189 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2190 ; AVX2-FP-NEXT: vmovaps %ymm10, 32(%rsi)
2191 ; AVX2-FP-NEXT: vmovaps %ymm2, 192(%rdx)
2192 ; AVX2-FP-NEXT: vmovaps %ymm15, 128(%rdx)
2193 ; AVX2-FP-NEXT: vmovaps %ymm13, 64(%rdx)
2194 ; AVX2-FP-NEXT: vmovaps %ymm12, (%rdx)
2195 ; AVX2-FP-NEXT: vmovaps %ymm11, 224(%rdx)
2196 ; AVX2-FP-NEXT: vmovaps %ymm9, 160(%rdx)
2197 ; AVX2-FP-NEXT: vmovaps %ymm8, 96(%rdx)
2198 ; AVX2-FP-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
2199 ; AVX2-FP-NEXT: vmovaps %ymm2, 32(%rdx)
2200 ; AVX2-FP-NEXT: vmovaps %ymm6, 192(%rcx)
2201 ; AVX2-FP-NEXT: vmovaps %ymm7, 224(%rcx)
2202 ; AVX2-FP-NEXT: vmovaps %ymm5, 128(%rcx)
2203 ; AVX2-FP-NEXT: vmovaps %ymm4, 160(%rcx)
2204 ; AVX2-FP-NEXT: vmovaps %ymm0, 64(%rcx)
2205 ; AVX2-FP-NEXT: vmovaps %ymm1, 96(%rcx)
2206 ; AVX2-FP-NEXT: vmovaps %ymm3, (%rcx)
2207 ; AVX2-FP-NEXT: vmovaps %ymm14, 32(%rcx)
2208 ; AVX2-FP-NEXT: addq $232, %rsp
2209 ; AVX2-FP-NEXT: vzeroupper
2210 ; AVX2-FP-NEXT: retq
2212 ; AVX2-FCP-LABEL: load_i64_stride3_vf32:
2213 ; AVX2-FCP: # %bb.0:
2214 ; AVX2-FCP-NEXT: subq $232, %rsp
2215 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm2
2216 ; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2217 ; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm3
2218 ; AVX2-FCP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2219 ; AVX2-FCP-NEXT: vmovaps 704(%rdi), %ymm11
2220 ; AVX2-FCP-NEXT: vmovaps 672(%rdi), %ymm14
2221 ; AVX2-FCP-NEXT: vmovaps 512(%rdi), %ymm10
2222 ; AVX2-FCP-NEXT: vmovaps 480(%rdi), %ymm15
2223 ; AVX2-FCP-NEXT: vmovaps 320(%rdi), %ymm9
2224 ; AVX2-FCP-NEXT: vmovaps 288(%rdi), %ymm8
2225 ; AVX2-FCP-NEXT: vmovaps 128(%rdi), %ymm7
2226 ; AVX2-FCP-NEXT: vmovaps 96(%rdi), %ymm6
2227 ; AVX2-FCP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
2228 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[0,3,2,3]
2229 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
2230 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2231 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2232 ; AVX2-FCP-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
2233 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,3,2,3]
2234 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
2235 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2236 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2237 ; AVX2-FCP-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
2238 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm15[0,3,2,3]
2239 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
2240 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2241 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2242 ; AVX2-FCP-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
2243 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm14[0,3,2,3]
2244 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
2245 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2246 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2247 ; AVX2-FCP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
2248 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
2249 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2250 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2251 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2252 ; AVX2-FCP-NEXT: vmovaps 224(%rdi), %ymm5
2253 ; AVX2-FCP-NEXT: vmovaps 192(%rdi), %ymm4
2254 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
2255 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
2256 ; AVX2-FCP-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
2257 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2258 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2259 ; AVX2-FCP-NEXT: vmovaps 416(%rdi), %ymm3
2260 ; AVX2-FCP-NEXT: vmovaps 384(%rdi), %ymm2
2261 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
2262 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
2263 ; AVX2-FCP-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
2264 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2265 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2266 ; AVX2-FCP-NEXT: vmovaps 608(%rdi), %ymm1
2267 ; AVX2-FCP-NEXT: vmovaps 576(%rdi), %ymm0
2268 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm12 = ymm0[0,3,2,3]
2269 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm1[4,5,6,7]
2270 ; AVX2-FCP-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm13
2271 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
2272 ; AVX2-FCP-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2273 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
2274 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
2275 ; AVX2-FCP-NEXT: vbroadcastsd 176(%rdi), %ymm7
2276 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
2277 ; AVX2-FCP-NEXT: vmovups %ymm6, (%rsp) # 32-byte Spill
2278 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
2279 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2280 ; AVX2-FCP-NEXT: vbroadcastsd 368(%rdi), %ymm8
2281 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3,4,5],ymm8[6,7]
2282 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm15[2,3],ymm10[4,5,6,7]
2283 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2284 ; AVX2-FCP-NEXT: vbroadcastsd 560(%rdi), %ymm9
2285 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5],ymm9[6,7]
2286 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1],ymm14[2,3],ymm11[4,5,6,7]
2287 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2288 ; AVX2-FCP-NEXT: vbroadcastsd 752(%rdi), %ymm10
2289 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm7[0,1,2,3,4,5],ymm10[6,7]
2290 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
2291 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
2292 ; AVX2-FCP-NEXT: # ymm7 = ymm6[0,1],mem[2,3],ymm6[4,5,6,7]
2293 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
2294 ; AVX2-FCP-NEXT: vbroadcastsd 80(%rdi), %ymm10
2295 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3,4,5],ymm10[6,7]
2296 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
2297 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
2298 ; AVX2-FCP-NEXT: vbroadcastsd 272(%rdi), %ymm5
2299 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm13 = ymm4[0,1,2,3,4,5],ymm5[6,7]
2300 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
2301 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
2302 ; AVX2-FCP-NEXT: vbroadcastsd 464(%rdi), %ymm3
2303 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3,4,5],ymm3[6,7]
2304 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
2305 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
2306 ; AVX2-FCP-NEXT: vbroadcastsd 656(%rdi), %ymm1
2307 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2308 ; AVX2-FCP-NEXT: vmovaps 112(%rdi), %xmm0
2309 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2310 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2311 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2312 ; AVX2-FCP-NEXT: vmovaps 16(%rdi), %xmm0
2313 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2314 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2315 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2316 ; AVX2-FCP-NEXT: vmovaps 304(%rdi), %xmm0
2317 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2318 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2319 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2320 ; AVX2-FCP-NEXT: vmovaps 208(%rdi), %xmm0
2321 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2322 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = mem[0,1,0,3]
2323 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
2324 ; AVX2-FCP-NEXT: vmovaps 496(%rdi), %xmm4
2325 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
2326 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = mem[0,1,0,3]
2327 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
2328 ; AVX2-FCP-NEXT: vmovaps 400(%rdi), %xmm5
2329 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
2330 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm7 = mem[0,1,0,3]
2331 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
2332 ; AVX2-FCP-NEXT: vmovaps 688(%rdi), %xmm7
2333 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
2334 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
2335 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm10[4,5,6,7]
2336 ; AVX2-FCP-NEXT: vmovaps 592(%rdi), %xmm10
2337 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
2338 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
2339 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
2340 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2341 ; AVX2-FCP-NEXT: vmovaps %ymm10, 192(%rsi)
2342 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2343 ; AVX2-FCP-NEXT: vmovaps %ymm10, 128(%rsi)
2344 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2345 ; AVX2-FCP-NEXT: vmovaps %ymm10, 64(%rsi)
2346 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2347 ; AVX2-FCP-NEXT: vmovaps %ymm10, (%rsi)
2348 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2349 ; AVX2-FCP-NEXT: vmovaps %ymm10, 224(%rsi)
2350 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2351 ; AVX2-FCP-NEXT: vmovaps %ymm10, 160(%rsi)
2352 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2353 ; AVX2-FCP-NEXT: vmovaps %ymm10, 96(%rsi)
2354 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2355 ; AVX2-FCP-NEXT: vmovaps %ymm10, 32(%rsi)
2356 ; AVX2-FCP-NEXT: vmovaps %ymm2, 192(%rdx)
2357 ; AVX2-FCP-NEXT: vmovaps %ymm15, 128(%rdx)
2358 ; AVX2-FCP-NEXT: vmovaps %ymm13, 64(%rdx)
2359 ; AVX2-FCP-NEXT: vmovaps %ymm12, (%rdx)
2360 ; AVX2-FCP-NEXT: vmovaps %ymm11, 224(%rdx)
2361 ; AVX2-FCP-NEXT: vmovaps %ymm9, 160(%rdx)
2362 ; AVX2-FCP-NEXT: vmovaps %ymm8, 96(%rdx)
2363 ; AVX2-FCP-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
2364 ; AVX2-FCP-NEXT: vmovaps %ymm2, 32(%rdx)
2365 ; AVX2-FCP-NEXT: vmovaps %ymm6, 192(%rcx)
2366 ; AVX2-FCP-NEXT: vmovaps %ymm7, 224(%rcx)
2367 ; AVX2-FCP-NEXT: vmovaps %ymm5, 128(%rcx)
2368 ; AVX2-FCP-NEXT: vmovaps %ymm4, 160(%rcx)
2369 ; AVX2-FCP-NEXT: vmovaps %ymm0, 64(%rcx)
2370 ; AVX2-FCP-NEXT: vmovaps %ymm1, 96(%rcx)
2371 ; AVX2-FCP-NEXT: vmovaps %ymm3, (%rcx)
2372 ; AVX2-FCP-NEXT: vmovaps %ymm14, 32(%rcx)
2373 ; AVX2-FCP-NEXT: addq $232, %rsp
2374 ; AVX2-FCP-NEXT: vzeroupper
2375 ; AVX2-FCP-NEXT: retq
2377 ; AVX512-LABEL: load_i64_stride3_vf32:
2379 ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm4
2380 ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm0
2381 ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm5
2382 ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm6
2383 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm2
2384 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm7
2385 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm8
2386 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm1
2387 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm9
2388 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
2389 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm10
2390 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm11
2391 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2392 ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm13
2393 ; AVX512-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2394 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2395 ; AVX512-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2396 ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm15
2397 ; AVX512-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2398 ; AVX512-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2399 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm16
2400 ; AVX512-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2401 ; AVX512-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2402 ; AVX512-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2403 ; AVX512-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2404 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2405 ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm17
2406 ; AVX512-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2407 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2408 ; AVX512-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2409 ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm19
2410 ; AVX512-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2411 ; AVX512-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2412 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm20
2413 ; AVX512-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2414 ; AVX512-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2415 ; AVX512-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2416 ; AVX512-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2417 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2418 ; AVX512-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2419 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2420 ; AVX512-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2421 ; AVX512-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2422 ; AVX512-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2423 ; AVX512-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2424 ; AVX512-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2425 ; AVX512-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2426 ; AVX512-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2427 ; AVX512-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2428 ; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2429 ; AVX512-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2430 ; AVX512-NEXT: vmovdqa64 %zmm12, (%rsi)
2431 ; AVX512-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2432 ; AVX512-NEXT: vmovdqa64 %zmm14, (%rdx)
2433 ; AVX512-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2434 ; AVX512-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2435 ; AVX512-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2436 ; AVX512-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2437 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rcx)
2438 ; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2439 ; AVX512-NEXT: vzeroupper
2442 ; AVX512-FCP-LABEL: load_i64_stride3_vf32:
2443 ; AVX512-FCP: # %bb.0:
2444 ; AVX512-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
2445 ; AVX512-FCP-NEXT: vmovdqa64 640(%rdi), %zmm0
2446 ; AVX512-FCP-NEXT: vmovdqa64 576(%rdi), %zmm5
2447 ; AVX512-FCP-NEXT: vmovdqa64 512(%rdi), %zmm6
2448 ; AVX512-FCP-NEXT: vmovdqa64 448(%rdi), %zmm2
2449 ; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm7
2450 ; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm8
2451 ; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
2452 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm9
2453 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
2454 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm10
2455 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm11
2456 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2457 ; AVX512-FCP-NEXT: vmovdqa64 %zmm11, %zmm13
2458 ; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2459 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2460 ; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2461 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, %zmm15
2462 ; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2463 ; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2464 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm16
2465 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2466 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2467 ; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2468 ; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2469 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2470 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, %zmm17
2471 ; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2472 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2473 ; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2474 ; AVX512-FCP-NEXT: vmovdqa64 %zmm11, %zmm19
2475 ; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2476 ; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2477 ; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm20
2478 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2479 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2480 ; AVX512-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2481 ; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2482 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2483 ; AVX512-FCP-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2484 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2485 ; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2486 ; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2487 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2488 ; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2489 ; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2490 ; AVX512-FCP-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2491 ; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2492 ; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2493 ; AVX512-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2494 ; AVX512-FCP-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2495 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, (%rsi)
2496 ; AVX512-FCP-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2497 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
2498 ; AVX512-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2499 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2500 ; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2501 ; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2502 ; AVX512-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
2503 ; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2504 ; AVX512-FCP-NEXT: vzeroupper
2505 ; AVX512-FCP-NEXT: retq
2507 ; AVX512DQ-LABEL: load_i64_stride3_vf32:
2508 ; AVX512DQ: # %bb.0:
2509 ; AVX512DQ-NEXT: vmovdqa64 704(%rdi), %zmm4
2510 ; AVX512DQ-NEXT: vmovdqa64 640(%rdi), %zmm0
2511 ; AVX512DQ-NEXT: vmovdqa64 576(%rdi), %zmm5
2512 ; AVX512DQ-NEXT: vmovdqa64 512(%rdi), %zmm6
2513 ; AVX512DQ-NEXT: vmovdqa64 448(%rdi), %zmm2
2514 ; AVX512DQ-NEXT: vmovdqa64 384(%rdi), %zmm7
2515 ; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %zmm8
2516 ; AVX512DQ-NEXT: vmovdqa64 256(%rdi), %zmm1
2517 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm9
2518 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm3
2519 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm10
2520 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm11
2521 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2522 ; AVX512DQ-NEXT: vmovdqa64 %zmm11, %zmm13
2523 ; AVX512DQ-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2524 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2525 ; AVX512DQ-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2526 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, %zmm15
2527 ; AVX512DQ-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2528 ; AVX512DQ-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2529 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm16
2530 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2531 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2532 ; AVX512DQ-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2533 ; AVX512DQ-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2534 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2535 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, %zmm17
2536 ; AVX512DQ-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2537 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2538 ; AVX512DQ-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2539 ; AVX512DQ-NEXT: vmovdqa64 %zmm11, %zmm19
2540 ; AVX512DQ-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2541 ; AVX512DQ-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2542 ; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm20
2543 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2544 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2545 ; AVX512DQ-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2546 ; AVX512DQ-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2547 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2548 ; AVX512DQ-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2549 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2550 ; AVX512DQ-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2551 ; AVX512DQ-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2552 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2553 ; AVX512DQ-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2554 ; AVX512DQ-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2555 ; AVX512DQ-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2556 ; AVX512DQ-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2557 ; AVX512DQ-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2558 ; AVX512DQ-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2559 ; AVX512DQ-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2560 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, (%rsi)
2561 ; AVX512DQ-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2562 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, (%rdx)
2563 ; AVX512DQ-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2564 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2565 ; AVX512DQ-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2566 ; AVX512DQ-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2567 ; AVX512DQ-NEXT: vmovdqa64 %zmm3, (%rcx)
2568 ; AVX512DQ-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2569 ; AVX512DQ-NEXT: vzeroupper
2570 ; AVX512DQ-NEXT: retq
2572 ; AVX512DQ-FCP-LABEL: load_i64_stride3_vf32:
2573 ; AVX512DQ-FCP: # %bb.0:
2574 ; AVX512DQ-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
2575 ; AVX512DQ-FCP-NEXT: vmovdqa64 640(%rdi), %zmm0
2576 ; AVX512DQ-FCP-NEXT: vmovdqa64 576(%rdi), %zmm5
2577 ; AVX512DQ-FCP-NEXT: vmovdqa64 512(%rdi), %zmm6
2578 ; AVX512DQ-FCP-NEXT: vmovdqa64 448(%rdi), %zmm2
2579 ; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm7
2580 ; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm8
2581 ; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
2582 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm9
2583 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
2584 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm10
2585 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm11
2586 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2587 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, %zmm13
2588 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2589 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2590 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2591 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, %zmm15
2592 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2593 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2594 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm16
2595 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2596 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2597 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2598 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2599 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2600 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, %zmm17
2601 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2602 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2603 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2604 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, %zmm19
2605 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2606 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2607 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm20
2608 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2609 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2610 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2611 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2612 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2613 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2614 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2615 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2616 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2617 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2618 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2619 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2620 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2621 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2622 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2623 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2624 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2625 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, (%rsi)
2626 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2627 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
2628 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2629 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2630 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2631 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2632 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
2633 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2634 ; AVX512DQ-FCP-NEXT: vzeroupper
2635 ; AVX512DQ-FCP-NEXT: retq
2637 ; AVX512BW-LABEL: load_i64_stride3_vf32:
2638 ; AVX512BW: # %bb.0:
2639 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm4
2640 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm0
2641 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm5
2642 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm6
2643 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm2
2644 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm7
2645 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm8
2646 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm1
2647 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm9
2648 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm3
2649 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm10
2650 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm11
2651 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2652 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm13
2653 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2654 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2655 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2656 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm15
2657 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2658 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2659 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm16
2660 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2661 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2662 ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2663 ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2664 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2665 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm17
2666 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2667 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2668 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2669 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm19
2670 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2671 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2672 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm20
2673 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2674 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2675 ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2676 ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2677 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2678 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2679 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2680 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2681 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2682 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2683 ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2684 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2685 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2686 ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2687 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2688 ; AVX512BW-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2689 ; AVX512BW-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2690 ; AVX512BW-NEXT: vmovdqa64 %zmm12, (%rsi)
2691 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2692 ; AVX512BW-NEXT: vmovdqa64 %zmm14, (%rdx)
2693 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2694 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2695 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2696 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2697 ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rcx)
2698 ; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2699 ; AVX512BW-NEXT: vzeroupper
2700 ; AVX512BW-NEXT: retq
2702 ; AVX512BW-FCP-LABEL: load_i64_stride3_vf32:
2703 ; AVX512BW-FCP: # %bb.0:
2704 ; AVX512BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
2705 ; AVX512BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm0
2706 ; AVX512BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm5
2707 ; AVX512BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm6
2708 ; AVX512BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm2
2709 ; AVX512BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm7
2710 ; AVX512BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm8
2711 ; AVX512BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
2712 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm9
2713 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
2714 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm10
2715 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm11
2716 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2717 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm13
2718 ; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2719 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2720 ; AVX512BW-FCP-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2721 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, %zmm15
2722 ; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2723 ; AVX512BW-FCP-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2724 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm16
2725 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2726 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2727 ; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2728 ; AVX512BW-FCP-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2729 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2730 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, %zmm17
2731 ; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2732 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2733 ; AVX512BW-FCP-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2734 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm19
2735 ; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2736 ; AVX512BW-FCP-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2737 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm20
2738 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2739 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2740 ; AVX512BW-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2741 ; AVX512BW-FCP-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2742 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2743 ; AVX512BW-FCP-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2744 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2745 ; AVX512BW-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2746 ; AVX512BW-FCP-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2747 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2748 ; AVX512BW-FCP-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2749 ; AVX512BW-FCP-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2750 ; AVX512BW-FCP-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2751 ; AVX512BW-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2752 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2753 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2754 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2755 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, (%rsi)
2756 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2757 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
2758 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2759 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2760 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2761 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2762 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
2763 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2764 ; AVX512BW-FCP-NEXT: vzeroupper
2765 ; AVX512BW-FCP-NEXT: retq
2767 ; AVX512DQ-BW-LABEL: load_i64_stride3_vf32:
2768 ; AVX512DQ-BW: # %bb.0:
2769 ; AVX512DQ-BW-NEXT: vmovdqa64 704(%rdi), %zmm4
2770 ; AVX512DQ-BW-NEXT: vmovdqa64 640(%rdi), %zmm0
2771 ; AVX512DQ-BW-NEXT: vmovdqa64 576(%rdi), %zmm5
2772 ; AVX512DQ-BW-NEXT: vmovdqa64 512(%rdi), %zmm6
2773 ; AVX512DQ-BW-NEXT: vmovdqa64 448(%rdi), %zmm2
2774 ; AVX512DQ-BW-NEXT: vmovdqa64 384(%rdi), %zmm7
2775 ; AVX512DQ-BW-NEXT: vmovdqa64 320(%rdi), %zmm8
2776 ; AVX512DQ-BW-NEXT: vmovdqa64 256(%rdi), %zmm1
2777 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm9
2778 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm3
2779 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm10
2780 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm11
2781 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2782 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, %zmm13
2783 ; AVX512DQ-BW-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2784 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2785 ; AVX512DQ-BW-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2786 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, %zmm15
2787 ; AVX512DQ-BW-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2788 ; AVX512DQ-BW-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2789 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm16
2790 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2791 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2792 ; AVX512DQ-BW-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2793 ; AVX512DQ-BW-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2794 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2795 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, %zmm17
2796 ; AVX512DQ-BW-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2797 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2798 ; AVX512DQ-BW-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2799 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, %zmm19
2800 ; AVX512DQ-BW-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2801 ; AVX512DQ-BW-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2802 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm20
2803 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2804 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2805 ; AVX512DQ-BW-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2806 ; AVX512DQ-BW-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2807 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2808 ; AVX512DQ-BW-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2809 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2810 ; AVX512DQ-BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2811 ; AVX512DQ-BW-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2812 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2813 ; AVX512DQ-BW-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2814 ; AVX512DQ-BW-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2815 ; AVX512DQ-BW-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2816 ; AVX512DQ-BW-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2817 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2818 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2819 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2820 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, (%rsi)
2821 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2822 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, (%rdx)
2823 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2824 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2825 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2826 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2827 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, (%rcx)
2828 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2829 ; AVX512DQ-BW-NEXT: vzeroupper
2830 ; AVX512DQ-BW-NEXT: retq
2832 ; AVX512DQ-BW-FCP-LABEL: load_i64_stride3_vf32:
2833 ; AVX512DQ-BW-FCP: # %bb.0:
2834 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
2835 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm0
2836 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm5
2837 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm6
2838 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm2
2839 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm7
2840 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm8
2841 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm1
2842 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm9
2843 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm3
2844 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm10
2845 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm11
2846 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm12 = [0,3,6,9,12,15,0,0]
2847 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm13
2848 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
2849 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
2850 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
2851 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, %zmm15
2852 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
2853 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
2854 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm16
2855 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
2856 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
2857 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
2858 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
2859 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm14 = [1,4,7,10,13,0,0,0]
2860 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, %zmm17
2861 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
2862 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
2863 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
2864 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, %zmm19
2865 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
2866 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
2867 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm20
2868 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
2869 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
2870 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
2871 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
2872 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm18 = [10,13,0,3,6,0,0,0]
2873 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
2874 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
2875 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
2876 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
2877 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
2878 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
2879 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
2880 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
2881 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
2882 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2883 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2884 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, 64(%rsi)
2885 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, (%rsi)
2886 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm20, 192(%rdx)
2887 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, (%rdx)
2888 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
2889 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, 128(%rdx)
2890 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rcx)
2891 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 192(%rcx)
2892 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, (%rcx)
2893 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
2894 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
2895 ; AVX512DQ-BW-FCP-NEXT: retq
2896 %wide.vec = load <96 x i64>, ptr %in.vec, align 64
2897 %strided.vec0 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93>
2898 %strided.vec1 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94>
2899 %strided.vec2 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95>
2900 store <32 x i64> %strided.vec0, ptr %out.vec0, align 64
2901 store <32 x i64> %strided.vec1, ptr %out.vec1, align 64
2902 store <32 x i64> %strided.vec2, ptr %out.vec2, align 64
2906 define void @load_i64_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
2907 ; SSE-LABEL: load_i64_stride3_vf64:
2909 ; SSE-NEXT: subq $1176, %rsp # imm = 0x498
2910 ; SSE-NEXT: movapd 272(%rdi), %xmm9
2911 ; SSE-NEXT: movapd 224(%rdi), %xmm8
2912 ; SSE-NEXT: movapd 176(%rdi), %xmm7
2913 ; SSE-NEXT: movapd 128(%rdi), %xmm6
2914 ; SSE-NEXT: movapd 80(%rdi), %xmm5
2915 ; SSE-NEXT: movapd 240(%rdi), %xmm10
2916 ; SSE-NEXT: movapd 256(%rdi), %xmm0
2917 ; SSE-NEXT: movapd 192(%rdi), %xmm11
2918 ; SSE-NEXT: movapd 208(%rdi), %xmm1
2919 ; SSE-NEXT: movapd 144(%rdi), %xmm12
2920 ; SSE-NEXT: movapd 160(%rdi), %xmm2
2921 ; SSE-NEXT: movapd 96(%rdi), %xmm13
2922 ; SSE-NEXT: movapd 112(%rdi), %xmm3
2923 ; SSE-NEXT: movapd 48(%rdi), %xmm14
2924 ; SSE-NEXT: movapd 64(%rdi), %xmm4
2925 ; SSE-NEXT: movapd %xmm4, %xmm15
2926 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm14[0],xmm15[1]
2927 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2928 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm5[0]
2929 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2930 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
2931 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2932 ; SSE-NEXT: movapd %xmm3, %xmm4
2933 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm13[0],xmm4[1]
2934 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2935 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm6[0]
2936 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2937 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm3[0],xmm6[1]
2938 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2939 ; SSE-NEXT: movapd %xmm2, %xmm3
2940 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm12[0],xmm3[1]
2941 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2942 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm7[0]
2943 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2944 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm2[0],xmm7[1]
2945 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2946 ; SSE-NEXT: movapd %xmm1, %xmm2
2947 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm11[0],xmm2[1]
2948 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2949 ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm8[0]
2950 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2951 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
2952 ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2953 ; SSE-NEXT: movapd %xmm0, %xmm1
2954 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm10[0],xmm1[1]
2955 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2956 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm9[0]
2957 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2958 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
2959 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2960 ; SSE-NEXT: movapd 288(%rdi), %xmm2
2961 ; SSE-NEXT: movapd 304(%rdi), %xmm0
2962 ; SSE-NEXT: movapd %xmm0, %xmm1
2963 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
2964 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2965 ; SSE-NEXT: movapd 320(%rdi), %xmm1
2966 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
2967 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2968 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2969 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2970 ; SSE-NEXT: movapd 336(%rdi), %xmm2
2971 ; SSE-NEXT: movapd 352(%rdi), %xmm0
2972 ; SSE-NEXT: movapd %xmm0, %xmm1
2973 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
2974 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2975 ; SSE-NEXT: movapd 368(%rdi), %xmm1
2976 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
2977 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2978 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2979 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2980 ; SSE-NEXT: movapd 384(%rdi), %xmm2
2981 ; SSE-NEXT: movapd 400(%rdi), %xmm0
2982 ; SSE-NEXT: movapd %xmm0, %xmm1
2983 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
2984 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2985 ; SSE-NEXT: movapd 416(%rdi), %xmm1
2986 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
2987 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2988 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2989 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2990 ; SSE-NEXT: movapd 432(%rdi), %xmm2
2991 ; SSE-NEXT: movapd 448(%rdi), %xmm0
2992 ; SSE-NEXT: movapd %xmm0, %xmm1
2993 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
2994 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2995 ; SSE-NEXT: movapd 464(%rdi), %xmm1
2996 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
2997 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2998 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
2999 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3000 ; SSE-NEXT: movapd 480(%rdi), %xmm2
3001 ; SSE-NEXT: movapd 496(%rdi), %xmm0
3002 ; SSE-NEXT: movapd %xmm0, %xmm1
3003 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3004 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3005 ; SSE-NEXT: movapd 512(%rdi), %xmm1
3006 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3007 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3008 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3009 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3010 ; SSE-NEXT: movapd 528(%rdi), %xmm2
3011 ; SSE-NEXT: movapd 544(%rdi), %xmm0
3012 ; SSE-NEXT: movapd %xmm0, %xmm1
3013 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3014 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3015 ; SSE-NEXT: movapd 560(%rdi), %xmm1
3016 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3017 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3018 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3019 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3020 ; SSE-NEXT: movapd 576(%rdi), %xmm2
3021 ; SSE-NEXT: movapd 592(%rdi), %xmm0
3022 ; SSE-NEXT: movapd %xmm0, %xmm1
3023 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3024 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3025 ; SSE-NEXT: movapd 608(%rdi), %xmm1
3026 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3027 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3028 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3029 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3030 ; SSE-NEXT: movapd 624(%rdi), %xmm2
3031 ; SSE-NEXT: movapd 640(%rdi), %xmm0
3032 ; SSE-NEXT: movapd %xmm0, %xmm1
3033 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3034 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3035 ; SSE-NEXT: movapd 656(%rdi), %xmm1
3036 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3037 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3038 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3039 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3040 ; SSE-NEXT: movapd 672(%rdi), %xmm2
3041 ; SSE-NEXT: movapd 688(%rdi), %xmm0
3042 ; SSE-NEXT: movapd %xmm0, %xmm1
3043 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3044 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3045 ; SSE-NEXT: movapd 704(%rdi), %xmm1
3046 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3047 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3048 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3049 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3050 ; SSE-NEXT: movapd 720(%rdi), %xmm2
3051 ; SSE-NEXT: movapd 736(%rdi), %xmm0
3052 ; SSE-NEXT: movapd %xmm0, %xmm1
3053 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3054 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3055 ; SSE-NEXT: movapd 752(%rdi), %xmm1
3056 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3057 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3058 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3059 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3060 ; SSE-NEXT: movapd 768(%rdi), %xmm2
3061 ; SSE-NEXT: movapd 784(%rdi), %xmm0
3062 ; SSE-NEXT: movapd %xmm0, %xmm1
3063 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3064 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3065 ; SSE-NEXT: movapd 800(%rdi), %xmm1
3066 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3067 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3068 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3069 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3070 ; SSE-NEXT: movapd 816(%rdi), %xmm2
3071 ; SSE-NEXT: movapd 832(%rdi), %xmm0
3072 ; SSE-NEXT: movapd %xmm0, %xmm1
3073 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3074 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3075 ; SSE-NEXT: movapd 848(%rdi), %xmm1
3076 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3077 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3078 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3079 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3080 ; SSE-NEXT: movapd 864(%rdi), %xmm2
3081 ; SSE-NEXT: movapd 880(%rdi), %xmm0
3082 ; SSE-NEXT: movapd %xmm0, %xmm1
3083 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3084 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3085 ; SSE-NEXT: movapd 896(%rdi), %xmm1
3086 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3087 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3088 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3089 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3090 ; SSE-NEXT: movapd 912(%rdi), %xmm2
3091 ; SSE-NEXT: movapd 928(%rdi), %xmm0
3092 ; SSE-NEXT: movapd %xmm0, %xmm1
3093 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3094 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3095 ; SSE-NEXT: movapd 944(%rdi), %xmm1
3096 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3097 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3098 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3099 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3100 ; SSE-NEXT: movapd 960(%rdi), %xmm2
3101 ; SSE-NEXT: movapd 976(%rdi), %xmm0
3102 ; SSE-NEXT: movapd %xmm0, %xmm1
3103 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3104 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3105 ; SSE-NEXT: movapd 992(%rdi), %xmm1
3106 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3107 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3108 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3109 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3110 ; SSE-NEXT: movapd 1008(%rdi), %xmm2
3111 ; SSE-NEXT: movapd 1024(%rdi), %xmm0
3112 ; SSE-NEXT: movapd %xmm0, %xmm1
3113 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3114 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3115 ; SSE-NEXT: movapd 1040(%rdi), %xmm1
3116 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3117 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3118 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3119 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3120 ; SSE-NEXT: movapd 1056(%rdi), %xmm2
3121 ; SSE-NEXT: movapd 1072(%rdi), %xmm0
3122 ; SSE-NEXT: movapd %xmm0, %xmm1
3123 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3124 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3125 ; SSE-NEXT: movapd 1088(%rdi), %xmm1
3126 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3127 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3128 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3129 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3130 ; SSE-NEXT: movapd 1104(%rdi), %xmm2
3131 ; SSE-NEXT: movapd 1120(%rdi), %xmm0
3132 ; SSE-NEXT: movapd %xmm0, %xmm1
3133 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3134 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3135 ; SSE-NEXT: movapd 1136(%rdi), %xmm1
3136 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3137 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3138 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3139 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3140 ; SSE-NEXT: movapd 1152(%rdi), %xmm2
3141 ; SSE-NEXT: movapd 1168(%rdi), %xmm0
3142 ; SSE-NEXT: movapd %xmm0, %xmm1
3143 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3144 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3145 ; SSE-NEXT: movapd 1184(%rdi), %xmm1
3146 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3147 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3148 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3149 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3150 ; SSE-NEXT: movapd 1200(%rdi), %xmm2
3151 ; SSE-NEXT: movapd 1216(%rdi), %xmm0
3152 ; SSE-NEXT: movapd %xmm0, %xmm1
3153 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3154 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3155 ; SSE-NEXT: movapd 1232(%rdi), %xmm1
3156 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3157 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3158 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3159 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3160 ; SSE-NEXT: movapd 1248(%rdi), %xmm2
3161 ; SSE-NEXT: movapd 1264(%rdi), %xmm0
3162 ; SSE-NEXT: movapd %xmm0, %xmm14
3163 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm2[0],xmm14[1]
3164 ; SSE-NEXT: movapd 1280(%rdi), %xmm1
3165 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3166 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3167 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3168 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3169 ; SSE-NEXT: movapd 1296(%rdi), %xmm15
3170 ; SSE-NEXT: movapd 1312(%rdi), %xmm0
3171 ; SSE-NEXT: movapd %xmm0, %xmm11
3172 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm15[0],xmm11[1]
3173 ; SSE-NEXT: movapd 1328(%rdi), %xmm1
3174 ; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm1[0]
3175 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3176 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3177 ; SSE-NEXT: movapd 1344(%rdi), %xmm12
3178 ; SSE-NEXT: movapd 1360(%rdi), %xmm0
3179 ; SSE-NEXT: movapd %xmm0, %xmm7
3180 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm12[0],xmm7[1]
3181 ; SSE-NEXT: movapd 1376(%rdi), %xmm1
3182 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm1[0]
3183 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3184 ; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill
3185 ; SSE-NEXT: movapd 1392(%rdi), %xmm10
3186 ; SSE-NEXT: movapd 1408(%rdi), %xmm0
3187 ; SSE-NEXT: movapd %xmm0, %xmm2
3188 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm10[0],xmm2[1]
3189 ; SSE-NEXT: movapd 1424(%rdi), %xmm1
3190 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm1[0]
3191 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3192 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3193 ; SSE-NEXT: movapd 1440(%rdi), %xmm9
3194 ; SSE-NEXT: movapd 1456(%rdi), %xmm0
3195 ; SSE-NEXT: movapd %xmm0, %xmm1
3196 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm9[0],xmm1[1]
3197 ; SSE-NEXT: movapd 1472(%rdi), %xmm3
3198 ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm3[0]
3199 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
3200 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3201 ; SSE-NEXT: movapd 1488(%rdi), %xmm0
3202 ; SSE-NEXT: movapd 1504(%rdi), %xmm8
3203 ; SSE-NEXT: movapd %xmm8, %xmm3
3204 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
3205 ; SSE-NEXT: movapd 1520(%rdi), %xmm13
3206 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm13[0]
3207 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm8[0],xmm13[1]
3208 ; SSE-NEXT: movapd (%rdi), %xmm8
3209 ; SSE-NEXT: movapd 16(%rdi), %xmm5
3210 ; SSE-NEXT: movapd %xmm5, %xmm6
3211 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm8[0],xmm6[1]
3212 ; SSE-NEXT: movapd 32(%rdi), %xmm4
3213 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm4[0]
3214 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1]
3215 ; SSE-NEXT: movapd %xmm3, 496(%rsi)
3216 ; SSE-NEXT: movapd %xmm1, 480(%rsi)
3217 ; SSE-NEXT: movapd %xmm2, 464(%rsi)
3218 ; SSE-NEXT: movapd %xmm7, 448(%rsi)
3219 ; SSE-NEXT: movapd %xmm11, 432(%rsi)
3220 ; SSE-NEXT: movapd %xmm14, 416(%rsi)
3221 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3222 ; SSE-NEXT: movaps %xmm1, 400(%rsi)
3223 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3224 ; SSE-NEXT: movaps %xmm1, 384(%rsi)
3225 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3226 ; SSE-NEXT: movaps %xmm1, 368(%rsi)
3227 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3228 ; SSE-NEXT: movaps %xmm1, 352(%rsi)
3229 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3230 ; SSE-NEXT: movaps %xmm1, 336(%rsi)
3231 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3232 ; SSE-NEXT: movaps %xmm1, 320(%rsi)
3233 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3234 ; SSE-NEXT: movaps %xmm1, 304(%rsi)
3235 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3236 ; SSE-NEXT: movaps %xmm1, 288(%rsi)
3237 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3238 ; SSE-NEXT: movaps %xmm1, 272(%rsi)
3239 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3240 ; SSE-NEXT: movaps %xmm1, 256(%rsi)
3241 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3242 ; SSE-NEXT: movaps %xmm1, 240(%rsi)
3243 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3244 ; SSE-NEXT: movaps %xmm1, 224(%rsi)
3245 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3246 ; SSE-NEXT: movaps %xmm1, 208(%rsi)
3247 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3248 ; SSE-NEXT: movaps %xmm1, 192(%rsi)
3249 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3250 ; SSE-NEXT: movaps %xmm1, 176(%rsi)
3251 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3252 ; SSE-NEXT: movaps %xmm1, 160(%rsi)
3253 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3254 ; SSE-NEXT: movaps %xmm1, 144(%rsi)
3255 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3256 ; SSE-NEXT: movaps %xmm1, 128(%rsi)
3257 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3258 ; SSE-NEXT: movaps %xmm1, 112(%rsi)
3259 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3260 ; SSE-NEXT: movaps %xmm1, 96(%rsi)
3261 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3262 ; SSE-NEXT: movaps %xmm1, 80(%rsi)
3263 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3264 ; SSE-NEXT: movaps %xmm1, 64(%rsi)
3265 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3266 ; SSE-NEXT: movaps %xmm1, 48(%rsi)
3267 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3268 ; SSE-NEXT: movaps %xmm1, 32(%rsi)
3269 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3270 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
3271 ; SSE-NEXT: movapd %xmm6, (%rsi)
3272 ; SSE-NEXT: movapd %xmm0, 496(%rdx)
3273 ; SSE-NEXT: movapd %xmm9, 480(%rdx)
3274 ; SSE-NEXT: movapd %xmm10, 464(%rdx)
3275 ; SSE-NEXT: movapd %xmm12, 448(%rdx)
3276 ; SSE-NEXT: movapd %xmm15, 432(%rdx)
3277 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3278 ; SSE-NEXT: movaps %xmm0, 416(%rdx)
3279 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3280 ; SSE-NEXT: movaps %xmm0, 400(%rdx)
3281 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3282 ; SSE-NEXT: movaps %xmm0, 384(%rdx)
3283 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3284 ; SSE-NEXT: movaps %xmm0, 368(%rdx)
3285 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3286 ; SSE-NEXT: movaps %xmm0, 352(%rdx)
3287 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3288 ; SSE-NEXT: movaps %xmm0, 336(%rdx)
3289 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3290 ; SSE-NEXT: movaps %xmm0, 320(%rdx)
3291 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3292 ; SSE-NEXT: movaps %xmm0, 304(%rdx)
3293 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3294 ; SSE-NEXT: movaps %xmm0, 288(%rdx)
3295 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3296 ; SSE-NEXT: movaps %xmm0, 272(%rdx)
3297 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3298 ; SSE-NEXT: movaps %xmm0, 256(%rdx)
3299 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3300 ; SSE-NEXT: movaps %xmm0, 240(%rdx)
3301 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3302 ; SSE-NEXT: movaps %xmm0, 224(%rdx)
3303 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3304 ; SSE-NEXT: movaps %xmm0, 208(%rdx)
3305 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3306 ; SSE-NEXT: movaps %xmm0, 192(%rdx)
3307 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3308 ; SSE-NEXT: movaps %xmm0, 176(%rdx)
3309 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3310 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
3311 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3312 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
3313 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3314 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
3315 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3316 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
3317 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3318 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
3319 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3320 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
3321 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3322 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
3323 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3324 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
3325 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3326 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
3327 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3328 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
3329 ; SSE-NEXT: movapd %xmm8, (%rdx)
3330 ; SSE-NEXT: movapd %xmm13, 496(%rcx)
3331 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3332 ; SSE-NEXT: movaps %xmm0, 480(%rcx)
3333 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3334 ; SSE-NEXT: movaps %xmm0, 464(%rcx)
3335 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3336 ; SSE-NEXT: movaps %xmm0, 448(%rcx)
3337 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3338 ; SSE-NEXT: movaps %xmm0, 432(%rcx)
3339 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3340 ; SSE-NEXT: movaps %xmm0, 416(%rcx)
3341 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3342 ; SSE-NEXT: movaps %xmm0, 400(%rcx)
3343 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3344 ; SSE-NEXT: movaps %xmm0, 384(%rcx)
3345 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3346 ; SSE-NEXT: movaps %xmm0, 368(%rcx)
3347 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3348 ; SSE-NEXT: movaps %xmm0, 352(%rcx)
3349 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3350 ; SSE-NEXT: movaps %xmm0, 336(%rcx)
3351 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3352 ; SSE-NEXT: movaps %xmm0, 320(%rcx)
3353 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3354 ; SSE-NEXT: movaps %xmm0, 304(%rcx)
3355 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3356 ; SSE-NEXT: movaps %xmm0, 288(%rcx)
3357 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3358 ; SSE-NEXT: movaps %xmm0, 272(%rcx)
3359 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3360 ; SSE-NEXT: movaps %xmm0, 256(%rcx)
3361 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3362 ; SSE-NEXT: movaps %xmm0, 240(%rcx)
3363 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3364 ; SSE-NEXT: movaps %xmm0, 224(%rcx)
3365 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3366 ; SSE-NEXT: movaps %xmm0, 208(%rcx)
3367 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3368 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
3369 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3370 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
3371 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3372 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
3373 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3374 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
3375 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3376 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
3377 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3378 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
3379 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3380 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
3381 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3382 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
3383 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3384 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
3385 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3386 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
3387 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3388 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
3389 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3390 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
3391 ; SSE-NEXT: movapd %xmm4, (%rcx)
3392 ; SSE-NEXT: addq $1176, %rsp # imm = 0x498
3395 ; AVX-LABEL: load_i64_stride3_vf64:
3397 ; AVX-NEXT: subq $1096, %rsp # imm = 0x448
3398 ; AVX-NEXT: vmovapd 896(%rdi), %ymm0
3399 ; AVX-NEXT: vmovapd 704(%rdi), %ymm1
3400 ; AVX-NEXT: vmovapd 512(%rdi), %ymm2
3401 ; AVX-NEXT: vmovapd 320(%rdi), %ymm5
3402 ; AVX-NEXT: vmovapd 128(%rdi), %ymm6
3403 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = mem[0,1],ymm6[2,3]
3404 ; AVX-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3405 ; AVX-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3406 ; AVX-NEXT: vblendpd {{.*#+}} ymm8 = mem[0,1],ymm5[2,3]
3407 ; AVX-NEXT: vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3408 ; AVX-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3409 ; AVX-NEXT: vblendpd {{.*#+}} ymm9 = mem[0,1],ymm2[2,3]
3410 ; AVX-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3411 ; AVX-NEXT: vmovapd %ymm2, %ymm4
3412 ; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3413 ; AVX-NEXT: vblendpd {{.*#+}} ymm10 = mem[0,1],ymm1[2,3]
3414 ; AVX-NEXT: vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3415 ; AVX-NEXT: vmovapd %ymm1, %ymm3
3416 ; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3417 ; AVX-NEXT: vblendpd {{.*#+}} ymm11 = mem[0,1],ymm0[2,3]
3418 ; AVX-NEXT: vmovupd %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3419 ; AVX-NEXT: vmovapd %ymm0, %ymm2
3420 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3421 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm7[1],ymm6[0],ymm7[3],ymm6[3]
3422 ; AVX-NEXT: vbroadcastsd 176(%rdi), %ymm1
3423 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3424 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3425 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm8[1],ymm5[0],ymm8[3],ymm5[3]
3426 ; AVX-NEXT: vbroadcastsd 368(%rdi), %ymm1
3427 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3428 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3429 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm9[1],ymm4[0],ymm9[3],ymm4[3]
3430 ; AVX-NEXT: vbroadcastsd 560(%rdi), %ymm1
3431 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3432 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3433 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm10[1],ymm3[0],ymm10[3],ymm3[3]
3434 ; AVX-NEXT: vbroadcastsd 752(%rdi), %ymm1
3435 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3436 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3437 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm11[1],ymm2[0],ymm11[3],ymm2[3]
3438 ; AVX-NEXT: vbroadcastsd 944(%rdi), %ymm1
3439 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3440 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3441 ; AVX-NEXT: vmovapd 1088(%rdi), %ymm9
3442 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm9[2,3]
3443 ; AVX-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
3444 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm9[0],ymm0[3],ymm9[3]
3445 ; AVX-NEXT: vbroadcastsd 1136(%rdi), %ymm1
3446 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3447 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3448 ; AVX-NEXT: vmovapd 1280(%rdi), %ymm8
3449 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm8[2,3]
3450 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3451 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm8[0],ymm0[3],ymm8[3]
3452 ; AVX-NEXT: vbroadcastsd 1328(%rdi), %ymm1
3453 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3454 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3455 ; AVX-NEXT: vmovapd 1472(%rdi), %ymm7
3456 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm7[2,3]
3457 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3458 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm7[0],ymm0[3],ymm7[3]
3459 ; AVX-NEXT: vbroadcastsd 1520(%rdi), %ymm1
3460 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3461 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3462 ; AVX-NEXT: vmovapd 32(%rdi), %ymm0
3463 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3464 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = mem[0,1],ymm0[2,3]
3465 ; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3466 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[1],ymm0[0],ymm1[3],ymm0[3]
3467 ; AVX-NEXT: vbroadcastsd 80(%rdi), %ymm1
3468 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3469 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3470 ; AVX-NEXT: vmovapd 224(%rdi), %ymm13
3471 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm13[2,3]
3472 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3473 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm13[0],ymm0[3],ymm13[3]
3474 ; AVX-NEXT: vbroadcastsd 272(%rdi), %ymm1
3475 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3476 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3477 ; AVX-NEXT: vmovapd 416(%rdi), %ymm12
3478 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm12[2,3]
3479 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3480 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm12[0],ymm0[3],ymm12[3]
3481 ; AVX-NEXT: vbroadcastsd 464(%rdi), %ymm1
3482 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3483 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3484 ; AVX-NEXT: vmovapd 608(%rdi), %ymm10
3485 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm10[2,3]
3486 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3487 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm10[0],ymm0[3],ymm10[3]
3488 ; AVX-NEXT: vbroadcastsd 656(%rdi), %ymm1
3489 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3490 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3491 ; AVX-NEXT: vmovapd 800(%rdi), %ymm5
3492 ; AVX-NEXT: vblendpd {{.*#+}} ymm14 = mem[0,1],ymm5[2,3]
3493 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm14[1],ymm5[0],ymm14[3],ymm5[3]
3494 ; AVX-NEXT: vbroadcastsd 848(%rdi), %ymm1
3495 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3496 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3497 ; AVX-NEXT: vmovapd 992(%rdi), %ymm4
3498 ; AVX-NEXT: vblendpd {{.*#+}} ymm11 = mem[0,1],ymm4[2,3]
3499 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm11[1],ymm4[0],ymm11[3],ymm4[3]
3500 ; AVX-NEXT: vbroadcastsd 1040(%rdi), %ymm1
3501 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
3502 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3503 ; AVX-NEXT: vmovapd 1184(%rdi), %ymm2
3504 ; AVX-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm2[2,3]
3505 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm6[1],ymm2[0],ymm6[3],ymm2[3]
3506 ; AVX-NEXT: vbroadcastsd 1232(%rdi), %ymm3
3507 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3]
3508 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3509 ; AVX-NEXT: vmovapd 1376(%rdi), %ymm1
3510 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm1[2,3]
3511 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm3[1],ymm1[0],ymm3[3],ymm1[3]
3512 ; AVX-NEXT: vbroadcastsd 1424(%rdi), %ymm15
3513 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3]
3514 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3515 ; AVX-NEXT: vmovaps 112(%rdi), %xmm0
3516 ; AVX-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
3517 ; AVX-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3518 ; AVX-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
3519 ; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3520 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3521 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
3522 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
3523 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3524 ; AVX-NEXT: vmovaps 304(%rdi), %xmm0
3525 ; AVX-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
3526 ; AVX-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3527 ; AVX-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
3528 ; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3529 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3530 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
3531 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
3532 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3533 ; AVX-NEXT: vmovaps 496(%rdi), %xmm0
3534 ; AVX-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
3535 ; AVX-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3536 ; AVX-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
3537 ; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3538 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3539 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
3540 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
3541 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3542 ; AVX-NEXT: vmovaps 688(%rdi), %xmm0
3543 ; AVX-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
3544 ; AVX-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3545 ; AVX-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
3546 ; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3547 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3548 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
3549 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
3550 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3551 ; AVX-NEXT: vmovaps 880(%rdi), %xmm0
3552 ; AVX-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
3553 ; AVX-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3554 ; AVX-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
3555 ; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3556 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
3557 ; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
3558 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
3559 ; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3560 ; AVX-NEXT: vmovaps 1072(%rdi), %xmm0
3561 ; AVX-NEXT: vinsertf128 $1, 1120(%rdi), %ymm0, %ymm0
3562 ; AVX-NEXT: vblendpd $5, (%rsp), %ymm0, %ymm15 # 32-byte Folded Reload
3563 ; AVX-NEXT: # ymm15 = mem[0],ymm0[1],mem[2],ymm0[3]
3564 ; AVX-NEXT: vmovupd %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3565 ; AVX-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],mem[2,3]
3566 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2],ymm9[3]
3567 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3568 ; AVX-NEXT: vmovaps 1264(%rdi), %xmm0
3569 ; AVX-NEXT: vinsertf128 $1, 1312(%rdi), %ymm0, %ymm0
3570 ; AVX-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
3571 ; AVX-NEXT: # ymm15 = mem[0],ymm0[1],mem[2],ymm0[3]
3572 ; AVX-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1],mem[2,3]
3573 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3]
3574 ; AVX-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
3575 ; AVX-NEXT: vmovaps 1456(%rdi), %xmm0
3576 ; AVX-NEXT: vinsertf128 $1, 1504(%rdi), %ymm0, %ymm0
3577 ; AVX-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
3578 ; AVX-NEXT: # ymm8 = mem[0],ymm0[1],mem[2],ymm0[3]
3579 ; AVX-NEXT: vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3580 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],mem[2,3]
3581 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2],ymm7[3]
3582 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3583 ; AVX-NEXT: vmovaps 1360(%rdi), %xmm0
3584 ; AVX-NEXT: vinsertf128 $1, 1408(%rdi), %ymm0, %ymm0
3585 ; AVX-NEXT: vblendpd {{.*#+}} ymm7 = ymm3[0],ymm0[1],ymm3[2],ymm0[3]
3586 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
3587 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
3588 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3589 ; AVX-NEXT: vmovaps 1168(%rdi), %xmm0
3590 ; AVX-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm1
3591 ; AVX-NEXT: vblendpd {{.*#+}} ymm3 = ymm6[0],ymm1[1],ymm6[2],ymm1[3]
3592 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
3593 ; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1],ymm1[2],ymm2[3]
3594 ; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3595 ; AVX-NEXT: vmovaps 976(%rdi), %xmm1
3596 ; AVX-NEXT: vinsertf128 $1, 1024(%rdi), %ymm1, %ymm1
3597 ; AVX-NEXT: vblendpd {{.*#+}} ymm2 = ymm11[0],ymm1[1],ymm11[2],ymm1[3]
3598 ; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3]
3599 ; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm1[0],ymm4[1],ymm1[2],ymm4[3]
3600 ; AVX-NEXT: vmovaps 784(%rdi), %xmm1
3601 ; AVX-NEXT: vinsertf128 $1, 832(%rdi), %ymm1, %ymm1
3602 ; AVX-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0],ymm1[1],ymm14[2],ymm1[3]
3603 ; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],mem[2,3]
3604 ; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3]
3605 ; AVX-NEXT: vmovaps 592(%rdi), %xmm4
3606 ; AVX-NEXT: vinsertf128 $1, 640(%rdi), %ymm4, %ymm4
3607 ; AVX-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
3608 ; AVX-NEXT: # ymm5 = mem[0],ymm4[1],mem[2],ymm4[3]
3609 ; AVX-NEXT: vblendpd {{.*#+}} ymm9 = ymm10[0,1],mem[2,3]
3610 ; AVX-NEXT: vblendpd {{.*#+}} ymm10 = ymm4[0],ymm9[1],ymm4[2],ymm9[3]
3611 ; AVX-NEXT: vmovaps 400(%rdi), %xmm4
3612 ; AVX-NEXT: vinsertf128 $1, 448(%rdi), %ymm4, %ymm4
3613 ; AVX-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm9 # 32-byte Folded Reload
3614 ; AVX-NEXT: # ymm9 = mem[0],ymm4[1],mem[2],ymm4[3]
3615 ; AVX-NEXT: vblendpd {{.*#+}} ymm11 = ymm12[0,1],mem[2,3]
3616 ; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm11[1],ymm4[2],ymm11[3]
3617 ; AVX-NEXT: vmovaps 208(%rdi), %xmm11
3618 ; AVX-NEXT: vinsertf128 $1, 256(%rdi), %ymm11, %ymm11
3619 ; AVX-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm12 # 32-byte Folded Reload
3620 ; AVX-NEXT: # ymm12 = mem[0],ymm11[1],mem[2],ymm11[3]
3621 ; AVX-NEXT: vblendpd {{.*#+}} ymm13 = ymm13[0,1],mem[2,3]
3622 ; AVX-NEXT: vblendpd {{.*#+}} ymm13 = ymm11[0],ymm13[1],ymm11[2],ymm13[3]
3623 ; AVX-NEXT: vmovaps 16(%rdi), %xmm11
3624 ; AVX-NEXT: vinsertf128 $1, 64(%rdi), %ymm11, %ymm11
3625 ; AVX-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
3626 ; AVX-NEXT: # ymm0 = mem[0,1],ymm11[2,3],mem[4,5],ymm11[6,7]
3627 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
3628 ; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
3629 ; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm8[2,3],ymm11[4,5],ymm8[6,7]
3630 ; AVX-NEXT: vmovapd %ymm7, 448(%rsi)
3631 ; AVX-NEXT: vmovapd %ymm3, 384(%rsi)
3632 ; AVX-NEXT: vmovapd %ymm2, 320(%rsi)
3633 ; AVX-NEXT: vmovapd %ymm14, 256(%rsi)
3634 ; AVX-NEXT: vmovapd %ymm5, 192(%rsi)
3635 ; AVX-NEXT: vmovapd %ymm9, 128(%rsi)
3636 ; AVX-NEXT: vmovapd %ymm12, 64(%rsi)
3637 ; AVX-NEXT: vmovaps %ymm0, (%rsi)
3638 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3639 ; AVX-NEXT: vmovaps %ymm0, 480(%rsi)
3640 ; AVX-NEXT: vmovapd %ymm15, 416(%rsi)
3641 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3642 ; AVX-NEXT: vmovaps %ymm0, 352(%rsi)
3643 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3644 ; AVX-NEXT: vmovaps %ymm0, 288(%rsi)
3645 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3646 ; AVX-NEXT: vmovaps %ymm0, 224(%rsi)
3647 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3648 ; AVX-NEXT: vmovaps %ymm0, 160(%rsi)
3649 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3650 ; AVX-NEXT: vmovaps %ymm0, 96(%rsi)
3651 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3652 ; AVX-NEXT: vmovaps %ymm0, 32(%rsi)
3653 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3654 ; AVX-NEXT: vmovaps %ymm0, 448(%rdx)
3655 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3656 ; AVX-NEXT: vmovaps %ymm0, 384(%rdx)
3657 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3658 ; AVX-NEXT: vmovaps %ymm0, 320(%rdx)
3659 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3660 ; AVX-NEXT: vmovaps %ymm0, 256(%rdx)
3661 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3662 ; AVX-NEXT: vmovaps %ymm0, 192(%rdx)
3663 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3664 ; AVX-NEXT: vmovaps %ymm0, 128(%rdx)
3665 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3666 ; AVX-NEXT: vmovaps %ymm0, 64(%rdx)
3667 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3668 ; AVX-NEXT: vmovaps %ymm0, (%rdx)
3669 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3670 ; AVX-NEXT: vmovaps %ymm0, 480(%rdx)
3671 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3672 ; AVX-NEXT: vmovaps %ymm0, 416(%rdx)
3673 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3674 ; AVX-NEXT: vmovaps %ymm0, 352(%rdx)
3675 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3676 ; AVX-NEXT: vmovaps %ymm0, 288(%rdx)
3677 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3678 ; AVX-NEXT: vmovaps %ymm0, 224(%rdx)
3679 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3680 ; AVX-NEXT: vmovaps %ymm0, 160(%rdx)
3681 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3682 ; AVX-NEXT: vmovaps %ymm0, 96(%rdx)
3683 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3684 ; AVX-NEXT: vmovaps %ymm0, 32(%rdx)
3685 ; AVX-NEXT: vmovaps %ymm11, (%rcx)
3686 ; AVX-NEXT: vmovapd %ymm13, 64(%rcx)
3687 ; AVX-NEXT: vmovapd %ymm4, 128(%rcx)
3688 ; AVX-NEXT: vmovapd %ymm10, 192(%rcx)
3689 ; AVX-NEXT: vmovapd %ymm1, 256(%rcx)
3690 ; AVX-NEXT: vmovapd %ymm6, 320(%rcx)
3691 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3692 ; AVX-NEXT: vmovaps %ymm0, 384(%rcx)
3693 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3694 ; AVX-NEXT: vmovaps %ymm0, 448(%rcx)
3695 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3696 ; AVX-NEXT: vmovaps %ymm0, 480(%rcx)
3697 ; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
3698 ; AVX-NEXT: vmovaps %ymm0, 416(%rcx)
3699 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3700 ; AVX-NEXT: vmovaps %ymm0, 352(%rcx)
3701 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3702 ; AVX-NEXT: vmovaps %ymm0, 288(%rcx)
3703 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3704 ; AVX-NEXT: vmovaps %ymm0, 224(%rcx)
3705 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3706 ; AVX-NEXT: vmovaps %ymm0, 160(%rcx)
3707 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3708 ; AVX-NEXT: vmovaps %ymm0, 96(%rcx)
3709 ; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
3710 ; AVX-NEXT: vmovaps %ymm0, 32(%rcx)
3711 ; AVX-NEXT: addq $1096, %rsp # imm = 0x448
3712 ; AVX-NEXT: vzeroupper
3715 ; AVX2-LABEL: load_i64_stride3_vf64:
3717 ; AVX2-NEXT: subq $968, %rsp # imm = 0x3C8
3718 ; AVX2-NEXT: vmovaps 896(%rdi), %ymm2
3719 ; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3720 ; AVX2-NEXT: vmovaps 864(%rdi), %ymm3
3721 ; AVX2-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3722 ; AVX2-NEXT: vmovaps 704(%rdi), %ymm4
3723 ; AVX2-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3724 ; AVX2-NEXT: vmovaps 672(%rdi), %ymm5
3725 ; AVX2-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3726 ; AVX2-NEXT: vmovaps 512(%rdi), %ymm6
3727 ; AVX2-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3728 ; AVX2-NEXT: vmovaps 480(%rdi), %ymm7
3729 ; AVX2-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3730 ; AVX2-NEXT: vmovaps 320(%rdi), %ymm8
3731 ; AVX2-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3732 ; AVX2-NEXT: vmovaps 288(%rdi), %ymm9
3733 ; AVX2-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3734 ; AVX2-NEXT: vmovaps 128(%rdi), %ymm10
3735 ; AVX2-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3736 ; AVX2-NEXT: vmovaps 96(%rdi), %ymm1
3737 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3738 ; AVX2-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
3739 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
3740 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
3741 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3742 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3743 ; AVX2-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
3744 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm9[0,3,2,3]
3745 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
3746 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3747 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3748 ; AVX2-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
3749 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm7[0,3,2,3]
3750 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
3751 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3752 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3753 ; AVX2-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
3754 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm5[0,3,2,3]
3755 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
3756 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3757 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3758 ; AVX2-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
3759 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
3760 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3761 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3762 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3763 ; AVX2-NEXT: vmovaps 1088(%rdi), %ymm1
3764 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3765 ; AVX2-NEXT: vmovaps 1056(%rdi), %ymm0
3766 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3767 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3768 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3769 ; AVX2-NEXT: vinsertf128 $1, 1120(%rdi), %ymm0, %ymm1
3770 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3771 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3772 ; AVX2-NEXT: vmovaps 1280(%rdi), %ymm1
3773 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3774 ; AVX2-NEXT: vmovaps 1248(%rdi), %ymm0
3775 ; AVX2-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
3776 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3777 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3778 ; AVX2-NEXT: vinsertf128 $1, 1312(%rdi), %ymm0, %ymm1
3779 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3780 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3781 ; AVX2-NEXT: vmovaps 1472(%rdi), %ymm1
3782 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3783 ; AVX2-NEXT: vmovaps 1440(%rdi), %ymm0
3784 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3785 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3786 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3787 ; AVX2-NEXT: vinsertf128 $1, 1504(%rdi), %ymm0, %ymm1
3788 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3789 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3790 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm1
3791 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3792 ; AVX2-NEXT: vmovaps (%rdi), %ymm0
3793 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3794 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
3795 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3796 ; AVX2-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm1
3797 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3798 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3799 ; AVX2-NEXT: vmovaps 224(%rdi), %ymm13
3800 ; AVX2-NEXT: vmovaps 192(%rdi), %ymm12
3801 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm12[0,3,2,3]
3802 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
3803 ; AVX2-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
3804 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3805 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3806 ; AVX2-NEXT: vmovaps 416(%rdi), %ymm11
3807 ; AVX2-NEXT: vmovaps 384(%rdi), %ymm10
3808 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm10[0,3,2,3]
3809 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
3810 ; AVX2-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
3811 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3812 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3813 ; AVX2-NEXT: vmovaps 608(%rdi), %ymm9
3814 ; AVX2-NEXT: vmovaps 576(%rdi), %ymm8
3815 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm8[0,3,2,3]
3816 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
3817 ; AVX2-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm1
3818 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3819 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3820 ; AVX2-NEXT: vmovaps 800(%rdi), %ymm7
3821 ; AVX2-NEXT: vmovaps 768(%rdi), %ymm6
3822 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm6[0,3,2,3]
3823 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
3824 ; AVX2-NEXT: vinsertf128 $1, 832(%rdi), %ymm0, %ymm1
3825 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3826 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3827 ; AVX2-NEXT: vmovaps 992(%rdi), %ymm5
3828 ; AVX2-NEXT: vmovaps 960(%rdi), %ymm4
3829 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
3830 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
3831 ; AVX2-NEXT: vinsertf128 $1, 1024(%rdi), %ymm0, %ymm1
3832 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3833 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3834 ; AVX2-NEXT: vmovaps 1184(%rdi), %ymm3
3835 ; AVX2-NEXT: vmovaps 1152(%rdi), %ymm2
3836 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
3837 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
3838 ; AVX2-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm1
3839 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3840 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3841 ; AVX2-NEXT: vmovaps 1376(%rdi), %ymm1
3842 ; AVX2-NEXT: vmovaps 1344(%rdi), %ymm0
3843 ; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm0[0,3,2,3]
3844 ; AVX2-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm1[4,5,6,7]
3845 ; AVX2-NEXT: vinsertf128 $1, 1408(%rdi), %ymm0, %ymm14
3846 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
3847 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3848 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3849 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3850 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3851 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3852 ; AVX2-NEXT: vbroadcastsd 176(%rdi), %ymm15
3853 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3854 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3855 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3856 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3857 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3858 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3859 ; AVX2-NEXT: vbroadcastsd 368(%rdi), %ymm15
3860 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3861 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3862 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3863 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3864 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3865 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3866 ; AVX2-NEXT: vbroadcastsd 560(%rdi), %ymm15
3867 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3868 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3869 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3870 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3871 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3872 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3873 ; AVX2-NEXT: vbroadcastsd 752(%rdi), %ymm15
3874 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3875 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3876 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3877 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3878 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3879 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3880 ; AVX2-NEXT: vbroadcastsd 944(%rdi), %ymm15
3881 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3882 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3883 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3884 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3885 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3886 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3887 ; AVX2-NEXT: vbroadcastsd 1136(%rdi), %ymm15
3888 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3889 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3890 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3891 ; AVX2-NEXT: vblendps $12, (%rsp), %ymm14, %ymm14 # 32-byte Folded Reload
3892 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3893 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3894 ; AVX2-NEXT: vbroadcastsd 1328(%rdi), %ymm15
3895 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3896 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3897 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3898 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3899 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3900 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3901 ; AVX2-NEXT: vbroadcastsd 1520(%rdi), %ymm15
3902 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3903 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3904 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
3905 ; AVX2-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
3906 ; AVX2-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
3907 ; AVX2-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
3908 ; AVX2-NEXT: vbroadcastsd 80(%rdi), %ymm15
3909 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
3910 ; AVX2-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3911 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
3912 ; AVX2-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,3,0,1,6,7,4,5]
3913 ; AVX2-NEXT: vbroadcastsd 272(%rdi), %ymm13
3914 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
3915 ; AVX2-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3916 ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
3917 ; AVX2-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,3,0,1,6,7,4,5]
3918 ; AVX2-NEXT: vbroadcastsd 464(%rdi), %ymm11
3919 ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
3920 ; AVX2-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3921 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
3922 ; AVX2-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
3923 ; AVX2-NEXT: vbroadcastsd 656(%rdi), %ymm9
3924 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
3925 ; AVX2-NEXT: vmovups %ymm8, (%rsp) # 32-byte Spill
3926 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
3927 ; AVX2-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
3928 ; AVX2-NEXT: vbroadcastsd 848(%rdi), %ymm7
3929 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
3930 ; AVX2-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3931 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
3932 ; AVX2-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
3933 ; AVX2-NEXT: vbroadcastsd 1040(%rdi), %ymm5
3934 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
3935 ; AVX2-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3936 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
3937 ; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
3938 ; AVX2-NEXT: vbroadcastsd 1232(%rdi), %ymm3
3939 ; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm3[6,7]
3940 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
3941 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
3942 ; AVX2-NEXT: vbroadcastsd 1424(%rdi), %ymm1
3943 ; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm1[6,7]
3944 ; AVX2-NEXT: vmovaps 112(%rdi), %xmm0
3945 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3946 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3947 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3948 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3949 ; AVX2-NEXT: vmovaps 304(%rdi), %xmm0
3950 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3951 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3952 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3953 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3954 ; AVX2-NEXT: vmovaps 496(%rdi), %xmm0
3955 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3956 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3957 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3958 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3959 ; AVX2-NEXT: vmovaps 688(%rdi), %xmm0
3960 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3961 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3962 ; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3963 ; AVX2-NEXT: vmovaps 880(%rdi), %xmm0
3964 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3965 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3966 ; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3967 ; AVX2-NEXT: vmovaps 1072(%rdi), %xmm0
3968 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3969 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3970 ; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3971 ; AVX2-NEXT: vmovaps 1264(%rdi), %xmm0
3972 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3973 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3974 ; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3975 ; AVX2-NEXT: vmovaps 1456(%rdi), %xmm0
3976 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3977 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3978 ; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3979 ; AVX2-NEXT: vmovaps 1360(%rdi), %xmm0
3980 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3981 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3982 ; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3983 ; AVX2-NEXT: vmovaps 1168(%rdi), %xmm0
3984 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3985 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3986 ; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3987 ; AVX2-NEXT: vmovaps 976(%rdi), %xmm0
3988 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3989 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3990 ; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3991 ; AVX2-NEXT: vmovaps 784(%rdi), %xmm0
3992 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3993 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3994 ; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3995 ; AVX2-NEXT: vmovaps 592(%rdi), %xmm0
3996 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
3997 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
3998 ; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
3999 ; AVX2-NEXT: vmovaps 400(%rdi), %xmm0
4000 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4001 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4002 ; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4003 ; AVX2-NEXT: vmovaps 208(%rdi), %xmm0
4004 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4005 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4006 ; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4007 ; AVX2-NEXT: vmovaps 16(%rdi), %xmm0
4008 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4009 ; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = mem[0,1,0,3]
4010 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4011 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4012 ; AVX2-NEXT: vmovaps %ymm15, 448(%rsi)
4013 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4014 ; AVX2-NEXT: vmovaps %ymm15, 384(%rsi)
4015 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4016 ; AVX2-NEXT: vmovaps %ymm15, 320(%rsi)
4017 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4018 ; AVX2-NEXT: vmovaps %ymm15, 256(%rsi)
4019 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4020 ; AVX2-NEXT: vmovaps %ymm15, 192(%rsi)
4021 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4022 ; AVX2-NEXT: vmovaps %ymm15, 128(%rsi)
4023 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4024 ; AVX2-NEXT: vmovaps %ymm15, 64(%rsi)
4025 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4026 ; AVX2-NEXT: vmovaps %ymm15, (%rsi)
4027 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4028 ; AVX2-NEXT: vmovaps %ymm15, 480(%rsi)
4029 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4030 ; AVX2-NEXT: vmovaps %ymm15, 416(%rsi)
4031 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4032 ; AVX2-NEXT: vmovaps %ymm15, 352(%rsi)
4033 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4034 ; AVX2-NEXT: vmovaps %ymm15, 288(%rsi)
4035 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4036 ; AVX2-NEXT: vmovaps %ymm15, 224(%rsi)
4037 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4038 ; AVX2-NEXT: vmovaps %ymm15, 160(%rsi)
4039 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4040 ; AVX2-NEXT: vmovaps %ymm15, 96(%rsi)
4041 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4042 ; AVX2-NEXT: vmovaps %ymm15, 32(%rsi)
4043 ; AVX2-NEXT: vmovaps %ymm11, 448(%rdx)
4044 ; AVX2-NEXT: vmovaps %ymm13, 384(%rdx)
4045 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4046 ; AVX2-NEXT: vmovaps %ymm11, 320(%rdx)
4047 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4048 ; AVX2-NEXT: vmovaps %ymm11, 256(%rdx)
4049 ; AVX2-NEXT: vmovups (%rsp), %ymm11 # 32-byte Reload
4050 ; AVX2-NEXT: vmovaps %ymm11, 192(%rdx)
4051 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4052 ; AVX2-NEXT: vmovaps %ymm11, 128(%rdx)
4053 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4054 ; AVX2-NEXT: vmovaps %ymm11, 64(%rdx)
4055 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4056 ; AVX2-NEXT: vmovaps %ymm11, (%rdx)
4057 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4058 ; AVX2-NEXT: vmovaps %ymm11, 480(%rdx)
4059 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4060 ; AVX2-NEXT: vmovaps %ymm11, 416(%rdx)
4061 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4062 ; AVX2-NEXT: vmovaps %ymm11, 352(%rdx)
4063 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4064 ; AVX2-NEXT: vmovaps %ymm11, 288(%rdx)
4065 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4066 ; AVX2-NEXT: vmovaps %ymm11, 224(%rdx)
4067 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4068 ; AVX2-NEXT: vmovaps %ymm11, 160(%rdx)
4069 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4070 ; AVX2-NEXT: vmovaps %ymm11, 96(%rdx)
4071 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4072 ; AVX2-NEXT: vmovaps %ymm11, 32(%rdx)
4073 ; AVX2-NEXT: vmovaps %ymm0, (%rcx)
4074 ; AVX2-NEXT: vmovaps %ymm1, 64(%rcx)
4075 ; AVX2-NEXT: vmovaps %ymm2, 128(%rcx)
4076 ; AVX2-NEXT: vmovaps %ymm3, 192(%rcx)
4077 ; AVX2-NEXT: vmovaps %ymm4, 256(%rcx)
4078 ; AVX2-NEXT: vmovaps %ymm5, 320(%rcx)
4079 ; AVX2-NEXT: vmovaps %ymm6, 384(%rcx)
4080 ; AVX2-NEXT: vmovaps %ymm7, 448(%rcx)
4081 ; AVX2-NEXT: vmovaps %ymm8, 480(%rcx)
4082 ; AVX2-NEXT: vmovaps %ymm9, 416(%rcx)
4083 ; AVX2-NEXT: vmovaps %ymm10, 352(%rcx)
4084 ; AVX2-NEXT: vmovaps %ymm12, 288(%rcx)
4085 ; AVX2-NEXT: vmovaps %ymm14, 224(%rcx)
4086 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4087 ; AVX2-NEXT: vmovaps %ymm0, 160(%rcx)
4088 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4089 ; AVX2-NEXT: vmovaps %ymm0, 96(%rcx)
4090 ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4091 ; AVX2-NEXT: vmovaps %ymm0, 32(%rcx)
4092 ; AVX2-NEXT: addq $968, %rsp # imm = 0x3C8
4093 ; AVX2-NEXT: vzeroupper
4096 ; AVX2-FP-LABEL: load_i64_stride3_vf64:
4098 ; AVX2-FP-NEXT: subq $968, %rsp # imm = 0x3C8
4099 ; AVX2-FP-NEXT: vmovaps 896(%rdi), %ymm2
4100 ; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4101 ; AVX2-FP-NEXT: vmovaps 864(%rdi), %ymm3
4102 ; AVX2-FP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4103 ; AVX2-FP-NEXT: vmovaps 704(%rdi), %ymm4
4104 ; AVX2-FP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4105 ; AVX2-FP-NEXT: vmovaps 672(%rdi), %ymm5
4106 ; AVX2-FP-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4107 ; AVX2-FP-NEXT: vmovaps 512(%rdi), %ymm6
4108 ; AVX2-FP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4109 ; AVX2-FP-NEXT: vmovaps 480(%rdi), %ymm7
4110 ; AVX2-FP-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4111 ; AVX2-FP-NEXT: vmovaps 320(%rdi), %ymm8
4112 ; AVX2-FP-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4113 ; AVX2-FP-NEXT: vmovaps 288(%rdi), %ymm9
4114 ; AVX2-FP-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4115 ; AVX2-FP-NEXT: vmovaps 128(%rdi), %ymm10
4116 ; AVX2-FP-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4117 ; AVX2-FP-NEXT: vmovaps 96(%rdi), %ymm1
4118 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4119 ; AVX2-FP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
4120 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
4121 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
4122 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4123 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4124 ; AVX2-FP-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
4125 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm9[0,3,2,3]
4126 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
4127 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4128 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4129 ; AVX2-FP-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
4130 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm7[0,3,2,3]
4131 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
4132 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4133 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4134 ; AVX2-FP-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
4135 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm5[0,3,2,3]
4136 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
4137 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4138 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4139 ; AVX2-FP-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
4140 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
4141 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4142 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4143 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4144 ; AVX2-FP-NEXT: vmovaps 1088(%rdi), %ymm1
4145 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4146 ; AVX2-FP-NEXT: vmovaps 1056(%rdi), %ymm0
4147 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4148 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4149 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4150 ; AVX2-FP-NEXT: vinsertf128 $1, 1120(%rdi), %ymm0, %ymm1
4151 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4152 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4153 ; AVX2-FP-NEXT: vmovaps 1280(%rdi), %ymm1
4154 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4155 ; AVX2-FP-NEXT: vmovaps 1248(%rdi), %ymm0
4156 ; AVX2-FP-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
4157 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4158 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4159 ; AVX2-FP-NEXT: vinsertf128 $1, 1312(%rdi), %ymm0, %ymm1
4160 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4161 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4162 ; AVX2-FP-NEXT: vmovaps 1472(%rdi), %ymm1
4163 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4164 ; AVX2-FP-NEXT: vmovaps 1440(%rdi), %ymm0
4165 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4166 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4167 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4168 ; AVX2-FP-NEXT: vinsertf128 $1, 1504(%rdi), %ymm0, %ymm1
4169 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4170 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4171 ; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm1
4172 ; AVX2-FP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4173 ; AVX2-FP-NEXT: vmovaps (%rdi), %ymm0
4174 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4175 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4176 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4177 ; AVX2-FP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm1
4178 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4179 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4180 ; AVX2-FP-NEXT: vmovaps 224(%rdi), %ymm13
4181 ; AVX2-FP-NEXT: vmovaps 192(%rdi), %ymm12
4182 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm12[0,3,2,3]
4183 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
4184 ; AVX2-FP-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
4185 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4186 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4187 ; AVX2-FP-NEXT: vmovaps 416(%rdi), %ymm11
4188 ; AVX2-FP-NEXT: vmovaps 384(%rdi), %ymm10
4189 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm10[0,3,2,3]
4190 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
4191 ; AVX2-FP-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
4192 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4193 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4194 ; AVX2-FP-NEXT: vmovaps 608(%rdi), %ymm9
4195 ; AVX2-FP-NEXT: vmovaps 576(%rdi), %ymm8
4196 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm8[0,3,2,3]
4197 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
4198 ; AVX2-FP-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm1
4199 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4200 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4201 ; AVX2-FP-NEXT: vmovaps 800(%rdi), %ymm7
4202 ; AVX2-FP-NEXT: vmovaps 768(%rdi), %ymm6
4203 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm6[0,3,2,3]
4204 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
4205 ; AVX2-FP-NEXT: vinsertf128 $1, 832(%rdi), %ymm0, %ymm1
4206 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4207 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4208 ; AVX2-FP-NEXT: vmovaps 992(%rdi), %ymm5
4209 ; AVX2-FP-NEXT: vmovaps 960(%rdi), %ymm4
4210 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
4211 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
4212 ; AVX2-FP-NEXT: vinsertf128 $1, 1024(%rdi), %ymm0, %ymm1
4213 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4214 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4215 ; AVX2-FP-NEXT: vmovaps 1184(%rdi), %ymm3
4216 ; AVX2-FP-NEXT: vmovaps 1152(%rdi), %ymm2
4217 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
4218 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
4219 ; AVX2-FP-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm1
4220 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4221 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4222 ; AVX2-FP-NEXT: vmovaps 1376(%rdi), %ymm1
4223 ; AVX2-FP-NEXT: vmovaps 1344(%rdi), %ymm0
4224 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm15 = ymm0[0,3,2,3]
4225 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm1[4,5,6,7]
4226 ; AVX2-FP-NEXT: vinsertf128 $1, 1408(%rdi), %ymm0, %ymm14
4227 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
4228 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4229 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4230 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4231 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4232 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4233 ; AVX2-FP-NEXT: vbroadcastsd 176(%rdi), %ymm15
4234 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4235 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4236 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4237 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4238 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4239 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4240 ; AVX2-FP-NEXT: vbroadcastsd 368(%rdi), %ymm15
4241 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4242 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4243 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4244 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4245 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4246 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4247 ; AVX2-FP-NEXT: vbroadcastsd 560(%rdi), %ymm15
4248 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4249 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4250 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4251 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4252 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4253 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4254 ; AVX2-FP-NEXT: vbroadcastsd 752(%rdi), %ymm15
4255 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4256 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4257 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4258 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4259 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4260 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4261 ; AVX2-FP-NEXT: vbroadcastsd 944(%rdi), %ymm15
4262 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4263 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4264 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4265 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4266 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4267 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4268 ; AVX2-FP-NEXT: vbroadcastsd 1136(%rdi), %ymm15
4269 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4270 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4271 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4272 ; AVX2-FP-NEXT: vblendps $12, (%rsp), %ymm14, %ymm14 # 32-byte Folded Reload
4273 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4274 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4275 ; AVX2-FP-NEXT: vbroadcastsd 1328(%rdi), %ymm15
4276 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4277 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4278 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4279 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4280 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4281 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4282 ; AVX2-FP-NEXT: vbroadcastsd 1520(%rdi), %ymm15
4283 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4284 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4285 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4286 ; AVX2-FP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4287 ; AVX2-FP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4288 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4289 ; AVX2-FP-NEXT: vbroadcastsd 80(%rdi), %ymm15
4290 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4291 ; AVX2-FP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4292 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
4293 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,3,0,1,6,7,4,5]
4294 ; AVX2-FP-NEXT: vbroadcastsd 272(%rdi), %ymm13
4295 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
4296 ; AVX2-FP-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4297 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
4298 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,3,0,1,6,7,4,5]
4299 ; AVX2-FP-NEXT: vbroadcastsd 464(%rdi), %ymm11
4300 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
4301 ; AVX2-FP-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4302 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
4303 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
4304 ; AVX2-FP-NEXT: vbroadcastsd 656(%rdi), %ymm9
4305 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
4306 ; AVX2-FP-NEXT: vmovups %ymm8, (%rsp) # 32-byte Spill
4307 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
4308 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
4309 ; AVX2-FP-NEXT: vbroadcastsd 848(%rdi), %ymm7
4310 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
4311 ; AVX2-FP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4312 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
4313 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
4314 ; AVX2-FP-NEXT: vbroadcastsd 1040(%rdi), %ymm5
4315 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
4316 ; AVX2-FP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4317 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
4318 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
4319 ; AVX2-FP-NEXT: vbroadcastsd 1232(%rdi), %ymm3
4320 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm3[6,7]
4321 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
4322 ; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
4323 ; AVX2-FP-NEXT: vbroadcastsd 1424(%rdi), %ymm1
4324 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4325 ; AVX2-FP-NEXT: vmovaps 112(%rdi), %xmm0
4326 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4327 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4328 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4329 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4330 ; AVX2-FP-NEXT: vmovaps 304(%rdi), %xmm0
4331 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4332 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4333 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4334 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4335 ; AVX2-FP-NEXT: vmovaps 496(%rdi), %xmm0
4336 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4337 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4338 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4339 ; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4340 ; AVX2-FP-NEXT: vmovaps 688(%rdi), %xmm0
4341 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4342 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4343 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4344 ; AVX2-FP-NEXT: vmovaps 880(%rdi), %xmm0
4345 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4346 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4347 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4348 ; AVX2-FP-NEXT: vmovaps 1072(%rdi), %xmm0
4349 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4350 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4351 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4352 ; AVX2-FP-NEXT: vmovaps 1264(%rdi), %xmm0
4353 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4354 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4355 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4356 ; AVX2-FP-NEXT: vmovaps 1456(%rdi), %xmm0
4357 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4358 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4359 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4360 ; AVX2-FP-NEXT: vmovaps 1360(%rdi), %xmm0
4361 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4362 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4363 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4364 ; AVX2-FP-NEXT: vmovaps 1168(%rdi), %xmm0
4365 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4366 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4367 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4368 ; AVX2-FP-NEXT: vmovaps 976(%rdi), %xmm0
4369 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4370 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4371 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4372 ; AVX2-FP-NEXT: vmovaps 784(%rdi), %xmm0
4373 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4374 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4375 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4376 ; AVX2-FP-NEXT: vmovaps 592(%rdi), %xmm0
4377 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4378 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4379 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4380 ; AVX2-FP-NEXT: vmovaps 400(%rdi), %xmm0
4381 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4382 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4383 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4384 ; AVX2-FP-NEXT: vmovaps 208(%rdi), %xmm0
4385 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4386 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4387 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4388 ; AVX2-FP-NEXT: vmovaps 16(%rdi), %xmm0
4389 ; AVX2-FP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4390 ; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm15 = mem[0,1,0,3]
4391 ; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4392 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4393 ; AVX2-FP-NEXT: vmovaps %ymm15, 448(%rsi)
4394 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4395 ; AVX2-FP-NEXT: vmovaps %ymm15, 384(%rsi)
4396 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4397 ; AVX2-FP-NEXT: vmovaps %ymm15, 320(%rsi)
4398 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4399 ; AVX2-FP-NEXT: vmovaps %ymm15, 256(%rsi)
4400 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4401 ; AVX2-FP-NEXT: vmovaps %ymm15, 192(%rsi)
4402 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4403 ; AVX2-FP-NEXT: vmovaps %ymm15, 128(%rsi)
4404 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4405 ; AVX2-FP-NEXT: vmovaps %ymm15, 64(%rsi)
4406 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4407 ; AVX2-FP-NEXT: vmovaps %ymm15, (%rsi)
4408 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4409 ; AVX2-FP-NEXT: vmovaps %ymm15, 480(%rsi)
4410 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4411 ; AVX2-FP-NEXT: vmovaps %ymm15, 416(%rsi)
4412 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4413 ; AVX2-FP-NEXT: vmovaps %ymm15, 352(%rsi)
4414 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4415 ; AVX2-FP-NEXT: vmovaps %ymm15, 288(%rsi)
4416 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4417 ; AVX2-FP-NEXT: vmovaps %ymm15, 224(%rsi)
4418 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4419 ; AVX2-FP-NEXT: vmovaps %ymm15, 160(%rsi)
4420 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4421 ; AVX2-FP-NEXT: vmovaps %ymm15, 96(%rsi)
4422 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4423 ; AVX2-FP-NEXT: vmovaps %ymm15, 32(%rsi)
4424 ; AVX2-FP-NEXT: vmovaps %ymm11, 448(%rdx)
4425 ; AVX2-FP-NEXT: vmovaps %ymm13, 384(%rdx)
4426 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4427 ; AVX2-FP-NEXT: vmovaps %ymm11, 320(%rdx)
4428 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4429 ; AVX2-FP-NEXT: vmovaps %ymm11, 256(%rdx)
4430 ; AVX2-FP-NEXT: vmovups (%rsp), %ymm11 # 32-byte Reload
4431 ; AVX2-FP-NEXT: vmovaps %ymm11, 192(%rdx)
4432 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4433 ; AVX2-FP-NEXT: vmovaps %ymm11, 128(%rdx)
4434 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4435 ; AVX2-FP-NEXT: vmovaps %ymm11, 64(%rdx)
4436 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4437 ; AVX2-FP-NEXT: vmovaps %ymm11, (%rdx)
4438 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4439 ; AVX2-FP-NEXT: vmovaps %ymm11, 480(%rdx)
4440 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4441 ; AVX2-FP-NEXT: vmovaps %ymm11, 416(%rdx)
4442 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4443 ; AVX2-FP-NEXT: vmovaps %ymm11, 352(%rdx)
4444 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4445 ; AVX2-FP-NEXT: vmovaps %ymm11, 288(%rdx)
4446 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4447 ; AVX2-FP-NEXT: vmovaps %ymm11, 224(%rdx)
4448 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4449 ; AVX2-FP-NEXT: vmovaps %ymm11, 160(%rdx)
4450 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4451 ; AVX2-FP-NEXT: vmovaps %ymm11, 96(%rdx)
4452 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4453 ; AVX2-FP-NEXT: vmovaps %ymm11, 32(%rdx)
4454 ; AVX2-FP-NEXT: vmovaps %ymm0, (%rcx)
4455 ; AVX2-FP-NEXT: vmovaps %ymm1, 64(%rcx)
4456 ; AVX2-FP-NEXT: vmovaps %ymm2, 128(%rcx)
4457 ; AVX2-FP-NEXT: vmovaps %ymm3, 192(%rcx)
4458 ; AVX2-FP-NEXT: vmovaps %ymm4, 256(%rcx)
4459 ; AVX2-FP-NEXT: vmovaps %ymm5, 320(%rcx)
4460 ; AVX2-FP-NEXT: vmovaps %ymm6, 384(%rcx)
4461 ; AVX2-FP-NEXT: vmovaps %ymm7, 448(%rcx)
4462 ; AVX2-FP-NEXT: vmovaps %ymm8, 480(%rcx)
4463 ; AVX2-FP-NEXT: vmovaps %ymm9, 416(%rcx)
4464 ; AVX2-FP-NEXT: vmovaps %ymm10, 352(%rcx)
4465 ; AVX2-FP-NEXT: vmovaps %ymm12, 288(%rcx)
4466 ; AVX2-FP-NEXT: vmovaps %ymm14, 224(%rcx)
4467 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4468 ; AVX2-FP-NEXT: vmovaps %ymm0, 160(%rcx)
4469 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4470 ; AVX2-FP-NEXT: vmovaps %ymm0, 96(%rcx)
4471 ; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4472 ; AVX2-FP-NEXT: vmovaps %ymm0, 32(%rcx)
4473 ; AVX2-FP-NEXT: addq $968, %rsp # imm = 0x3C8
4474 ; AVX2-FP-NEXT: vzeroupper
4475 ; AVX2-FP-NEXT: retq
4477 ; AVX2-FCP-LABEL: load_i64_stride3_vf64:
4478 ; AVX2-FCP: # %bb.0:
4479 ; AVX2-FCP-NEXT: subq $968, %rsp # imm = 0x3C8
4480 ; AVX2-FCP-NEXT: vmovaps 896(%rdi), %ymm2
4481 ; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4482 ; AVX2-FCP-NEXT: vmovaps 864(%rdi), %ymm3
4483 ; AVX2-FCP-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4484 ; AVX2-FCP-NEXT: vmovaps 704(%rdi), %ymm4
4485 ; AVX2-FCP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4486 ; AVX2-FCP-NEXT: vmovaps 672(%rdi), %ymm5
4487 ; AVX2-FCP-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4488 ; AVX2-FCP-NEXT: vmovaps 512(%rdi), %ymm6
4489 ; AVX2-FCP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4490 ; AVX2-FCP-NEXT: vmovaps 480(%rdi), %ymm7
4491 ; AVX2-FCP-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4492 ; AVX2-FCP-NEXT: vmovaps 320(%rdi), %ymm8
4493 ; AVX2-FCP-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4494 ; AVX2-FCP-NEXT: vmovaps 288(%rdi), %ymm9
4495 ; AVX2-FCP-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4496 ; AVX2-FCP-NEXT: vmovaps 128(%rdi), %ymm10
4497 ; AVX2-FCP-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4498 ; AVX2-FCP-NEXT: vmovaps 96(%rdi), %ymm1
4499 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4500 ; AVX2-FCP-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
4501 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
4502 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
4503 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4504 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4505 ; AVX2-FCP-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
4506 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm9[0,3,2,3]
4507 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
4508 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4509 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4510 ; AVX2-FCP-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
4511 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm7[0,3,2,3]
4512 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
4513 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4514 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4515 ; AVX2-FCP-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
4516 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm5[0,3,2,3]
4517 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
4518 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4519 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4520 ; AVX2-FCP-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
4521 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
4522 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
4523 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4524 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4525 ; AVX2-FCP-NEXT: vmovaps 1088(%rdi), %ymm1
4526 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4527 ; AVX2-FCP-NEXT: vmovaps 1056(%rdi), %ymm0
4528 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4529 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4530 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4531 ; AVX2-FCP-NEXT: vinsertf128 $1, 1120(%rdi), %ymm0, %ymm1
4532 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4533 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4534 ; AVX2-FCP-NEXT: vmovaps 1280(%rdi), %ymm1
4535 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4536 ; AVX2-FCP-NEXT: vmovaps 1248(%rdi), %ymm0
4537 ; AVX2-FCP-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
4538 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4539 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4540 ; AVX2-FCP-NEXT: vinsertf128 $1, 1312(%rdi), %ymm0, %ymm1
4541 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4542 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4543 ; AVX2-FCP-NEXT: vmovaps 1472(%rdi), %ymm1
4544 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4545 ; AVX2-FCP-NEXT: vmovaps 1440(%rdi), %ymm0
4546 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4547 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4548 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4549 ; AVX2-FCP-NEXT: vinsertf128 $1, 1504(%rdi), %ymm0, %ymm1
4550 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4551 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4552 ; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm1
4553 ; AVX2-FCP-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4554 ; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm0
4555 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4556 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
4557 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4558 ; AVX2-FCP-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm1
4559 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4560 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4561 ; AVX2-FCP-NEXT: vmovaps 224(%rdi), %ymm13
4562 ; AVX2-FCP-NEXT: vmovaps 192(%rdi), %ymm12
4563 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm12[0,3,2,3]
4564 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
4565 ; AVX2-FCP-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
4566 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4567 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4568 ; AVX2-FCP-NEXT: vmovaps 416(%rdi), %ymm11
4569 ; AVX2-FCP-NEXT: vmovaps 384(%rdi), %ymm10
4570 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm10[0,3,2,3]
4571 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
4572 ; AVX2-FCP-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
4573 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4574 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4575 ; AVX2-FCP-NEXT: vmovaps 608(%rdi), %ymm9
4576 ; AVX2-FCP-NEXT: vmovaps 576(%rdi), %ymm8
4577 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm8[0,3,2,3]
4578 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
4579 ; AVX2-FCP-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm1
4580 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4581 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4582 ; AVX2-FCP-NEXT: vmovaps 800(%rdi), %ymm7
4583 ; AVX2-FCP-NEXT: vmovaps 768(%rdi), %ymm6
4584 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm6[0,3,2,3]
4585 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
4586 ; AVX2-FCP-NEXT: vinsertf128 $1, 832(%rdi), %ymm0, %ymm1
4587 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4588 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4589 ; AVX2-FCP-NEXT: vmovaps 992(%rdi), %ymm5
4590 ; AVX2-FCP-NEXT: vmovaps 960(%rdi), %ymm4
4591 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
4592 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
4593 ; AVX2-FCP-NEXT: vinsertf128 $1, 1024(%rdi), %ymm0, %ymm1
4594 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4595 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4596 ; AVX2-FCP-NEXT: vmovaps 1184(%rdi), %ymm3
4597 ; AVX2-FCP-NEXT: vmovaps 1152(%rdi), %ymm2
4598 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
4599 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
4600 ; AVX2-FCP-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm1
4601 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4602 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4603 ; AVX2-FCP-NEXT: vmovaps 1376(%rdi), %ymm1
4604 ; AVX2-FCP-NEXT: vmovaps 1344(%rdi), %ymm0
4605 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm15 = ymm0[0,3,2,3]
4606 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm1[4,5,6,7]
4607 ; AVX2-FCP-NEXT: vinsertf128 $1, 1408(%rdi), %ymm0, %ymm14
4608 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
4609 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4610 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4611 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4612 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4613 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4614 ; AVX2-FCP-NEXT: vbroadcastsd 176(%rdi), %ymm15
4615 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4616 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4617 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4618 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4619 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4620 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4621 ; AVX2-FCP-NEXT: vbroadcastsd 368(%rdi), %ymm15
4622 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4623 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4624 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4625 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4626 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4627 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4628 ; AVX2-FCP-NEXT: vbroadcastsd 560(%rdi), %ymm15
4629 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4630 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4631 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4632 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4633 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4634 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4635 ; AVX2-FCP-NEXT: vbroadcastsd 752(%rdi), %ymm15
4636 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4637 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4638 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4639 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4640 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4641 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4642 ; AVX2-FCP-NEXT: vbroadcastsd 944(%rdi), %ymm15
4643 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4644 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4645 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4646 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4647 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4648 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4649 ; AVX2-FCP-NEXT: vbroadcastsd 1136(%rdi), %ymm15
4650 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4651 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4652 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4653 ; AVX2-FCP-NEXT: vblendps $12, (%rsp), %ymm14, %ymm14 # 32-byte Folded Reload
4654 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4655 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4656 ; AVX2-FCP-NEXT: vbroadcastsd 1328(%rdi), %ymm15
4657 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4658 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4659 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4660 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4661 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4662 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4663 ; AVX2-FCP-NEXT: vbroadcastsd 1520(%rdi), %ymm15
4664 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4665 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4666 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
4667 ; AVX2-FCP-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
4668 ; AVX2-FCP-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
4669 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
4670 ; AVX2-FCP-NEXT: vbroadcastsd 80(%rdi), %ymm15
4671 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
4672 ; AVX2-FCP-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4673 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
4674 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,3,0,1,6,7,4,5]
4675 ; AVX2-FCP-NEXT: vbroadcastsd 272(%rdi), %ymm13
4676 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
4677 ; AVX2-FCP-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4678 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
4679 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,3,0,1,6,7,4,5]
4680 ; AVX2-FCP-NEXT: vbroadcastsd 464(%rdi), %ymm11
4681 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
4682 ; AVX2-FCP-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4683 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
4684 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
4685 ; AVX2-FCP-NEXT: vbroadcastsd 656(%rdi), %ymm9
4686 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
4687 ; AVX2-FCP-NEXT: vmovups %ymm8, (%rsp) # 32-byte Spill
4688 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
4689 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
4690 ; AVX2-FCP-NEXT: vbroadcastsd 848(%rdi), %ymm7
4691 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
4692 ; AVX2-FCP-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4693 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
4694 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
4695 ; AVX2-FCP-NEXT: vbroadcastsd 1040(%rdi), %ymm5
4696 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
4697 ; AVX2-FCP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4698 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
4699 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
4700 ; AVX2-FCP-NEXT: vbroadcastsd 1232(%rdi), %ymm3
4701 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm3[6,7]
4702 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
4703 ; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
4704 ; AVX2-FCP-NEXT: vbroadcastsd 1424(%rdi), %ymm1
4705 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm1[6,7]
4706 ; AVX2-FCP-NEXT: vmovaps 112(%rdi), %xmm0
4707 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4708 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4709 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4710 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4711 ; AVX2-FCP-NEXT: vmovaps 304(%rdi), %xmm0
4712 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4713 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4714 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4715 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4716 ; AVX2-FCP-NEXT: vmovaps 496(%rdi), %xmm0
4717 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4718 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4719 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4720 ; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4721 ; AVX2-FCP-NEXT: vmovaps 688(%rdi), %xmm0
4722 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4723 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4724 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4725 ; AVX2-FCP-NEXT: vmovaps 880(%rdi), %xmm0
4726 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4727 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4728 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4729 ; AVX2-FCP-NEXT: vmovaps 1072(%rdi), %xmm0
4730 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4731 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4732 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4733 ; AVX2-FCP-NEXT: vmovaps 1264(%rdi), %xmm0
4734 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4735 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4736 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4737 ; AVX2-FCP-NEXT: vmovaps 1456(%rdi), %xmm0
4738 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4739 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4740 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4741 ; AVX2-FCP-NEXT: vmovaps 1360(%rdi), %xmm0
4742 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4743 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4744 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4745 ; AVX2-FCP-NEXT: vmovaps 1168(%rdi), %xmm0
4746 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4747 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4748 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4749 ; AVX2-FCP-NEXT: vmovaps 976(%rdi), %xmm0
4750 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4751 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4752 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4753 ; AVX2-FCP-NEXT: vmovaps 784(%rdi), %xmm0
4754 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4755 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4756 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4757 ; AVX2-FCP-NEXT: vmovaps 592(%rdi), %xmm0
4758 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4759 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4760 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4761 ; AVX2-FCP-NEXT: vmovaps 400(%rdi), %xmm0
4762 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4763 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4764 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4765 ; AVX2-FCP-NEXT: vmovaps 208(%rdi), %xmm0
4766 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4767 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
4768 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
4769 ; AVX2-FCP-NEXT: vmovaps 16(%rdi), %xmm0
4770 ; AVX2-FCP-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
4771 ; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm15 = mem[0,1,0,3]
4772 ; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4773 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4774 ; AVX2-FCP-NEXT: vmovaps %ymm15, 448(%rsi)
4775 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4776 ; AVX2-FCP-NEXT: vmovaps %ymm15, 384(%rsi)
4777 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4778 ; AVX2-FCP-NEXT: vmovaps %ymm15, 320(%rsi)
4779 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4780 ; AVX2-FCP-NEXT: vmovaps %ymm15, 256(%rsi)
4781 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4782 ; AVX2-FCP-NEXT: vmovaps %ymm15, 192(%rsi)
4783 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4784 ; AVX2-FCP-NEXT: vmovaps %ymm15, 128(%rsi)
4785 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4786 ; AVX2-FCP-NEXT: vmovaps %ymm15, 64(%rsi)
4787 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4788 ; AVX2-FCP-NEXT: vmovaps %ymm15, (%rsi)
4789 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4790 ; AVX2-FCP-NEXT: vmovaps %ymm15, 480(%rsi)
4791 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4792 ; AVX2-FCP-NEXT: vmovaps %ymm15, 416(%rsi)
4793 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4794 ; AVX2-FCP-NEXT: vmovaps %ymm15, 352(%rsi)
4795 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4796 ; AVX2-FCP-NEXT: vmovaps %ymm15, 288(%rsi)
4797 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4798 ; AVX2-FCP-NEXT: vmovaps %ymm15, 224(%rsi)
4799 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4800 ; AVX2-FCP-NEXT: vmovaps %ymm15, 160(%rsi)
4801 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4802 ; AVX2-FCP-NEXT: vmovaps %ymm15, 96(%rsi)
4803 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
4804 ; AVX2-FCP-NEXT: vmovaps %ymm15, 32(%rsi)
4805 ; AVX2-FCP-NEXT: vmovaps %ymm11, 448(%rdx)
4806 ; AVX2-FCP-NEXT: vmovaps %ymm13, 384(%rdx)
4807 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4808 ; AVX2-FCP-NEXT: vmovaps %ymm11, 320(%rdx)
4809 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4810 ; AVX2-FCP-NEXT: vmovaps %ymm11, 256(%rdx)
4811 ; AVX2-FCP-NEXT: vmovups (%rsp), %ymm11 # 32-byte Reload
4812 ; AVX2-FCP-NEXT: vmovaps %ymm11, 192(%rdx)
4813 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4814 ; AVX2-FCP-NEXT: vmovaps %ymm11, 128(%rdx)
4815 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4816 ; AVX2-FCP-NEXT: vmovaps %ymm11, 64(%rdx)
4817 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4818 ; AVX2-FCP-NEXT: vmovaps %ymm11, (%rdx)
4819 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4820 ; AVX2-FCP-NEXT: vmovaps %ymm11, 480(%rdx)
4821 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4822 ; AVX2-FCP-NEXT: vmovaps %ymm11, 416(%rdx)
4823 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4824 ; AVX2-FCP-NEXT: vmovaps %ymm11, 352(%rdx)
4825 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4826 ; AVX2-FCP-NEXT: vmovaps %ymm11, 288(%rdx)
4827 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4828 ; AVX2-FCP-NEXT: vmovaps %ymm11, 224(%rdx)
4829 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4830 ; AVX2-FCP-NEXT: vmovaps %ymm11, 160(%rdx)
4831 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4832 ; AVX2-FCP-NEXT: vmovaps %ymm11, 96(%rdx)
4833 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
4834 ; AVX2-FCP-NEXT: vmovaps %ymm11, 32(%rdx)
4835 ; AVX2-FCP-NEXT: vmovaps %ymm0, (%rcx)
4836 ; AVX2-FCP-NEXT: vmovaps %ymm1, 64(%rcx)
4837 ; AVX2-FCP-NEXT: vmovaps %ymm2, 128(%rcx)
4838 ; AVX2-FCP-NEXT: vmovaps %ymm3, 192(%rcx)
4839 ; AVX2-FCP-NEXT: vmovaps %ymm4, 256(%rcx)
4840 ; AVX2-FCP-NEXT: vmovaps %ymm5, 320(%rcx)
4841 ; AVX2-FCP-NEXT: vmovaps %ymm6, 384(%rcx)
4842 ; AVX2-FCP-NEXT: vmovaps %ymm7, 448(%rcx)
4843 ; AVX2-FCP-NEXT: vmovaps %ymm8, 480(%rcx)
4844 ; AVX2-FCP-NEXT: vmovaps %ymm9, 416(%rcx)
4845 ; AVX2-FCP-NEXT: vmovaps %ymm10, 352(%rcx)
4846 ; AVX2-FCP-NEXT: vmovaps %ymm12, 288(%rcx)
4847 ; AVX2-FCP-NEXT: vmovaps %ymm14, 224(%rcx)
4848 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4849 ; AVX2-FCP-NEXT: vmovaps %ymm0, 160(%rcx)
4850 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4851 ; AVX2-FCP-NEXT: vmovaps %ymm0, 96(%rcx)
4852 ; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4853 ; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%rcx)
4854 ; AVX2-FCP-NEXT: addq $968, %rsp # imm = 0x3C8
4855 ; AVX2-FCP-NEXT: vzeroupper
4856 ; AVX2-FCP-NEXT: retq
4858 ; AVX512-LABEL: load_i64_stride3_vf64:
4860 ; AVX512-NEXT: vmovdqa64 1472(%rdi), %zmm0
4861 ; AVX512-NEXT: vmovdqa64 1408(%rdi), %zmm14
4862 ; AVX512-NEXT: vmovdqa64 1344(%rdi), %zmm22
4863 ; AVX512-NEXT: vmovdqa64 1280(%rdi), %zmm1
4864 ; AVX512-NEXT: vmovdqa64 1216(%rdi), %zmm13
4865 ; AVX512-NEXT: vmovdqa64 1152(%rdi), %zmm19
4866 ; AVX512-NEXT: vmovdqa64 1088(%rdi), %zmm2
4867 ; AVX512-NEXT: vmovdqa64 1024(%rdi), %zmm12
4868 ; AVX512-NEXT: vmovdqa64 960(%rdi), %zmm27
4869 ; AVX512-NEXT: vmovdqa64 896(%rdi), %zmm3
4870 ; AVX512-NEXT: vmovdqa64 832(%rdi), %zmm10
4871 ; AVX512-NEXT: vmovdqa64 768(%rdi), %zmm26
4872 ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm4
4873 ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm9
4874 ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm29
4875 ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm5
4876 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm8
4877 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm28
4878 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm7
4879 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm25
4880 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm30
4881 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
4882 ; AVX512-NEXT: vmovdqa64 %zmm30, %zmm6
4883 ; AVX512-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
4884 ; AVX512-NEXT: vmovdqa64 %zmm28, %zmm15
4885 ; AVX512-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
4886 ; AVX512-NEXT: vmovdqa64 %zmm29, %zmm16
4887 ; AVX512-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
4888 ; AVX512-NEXT: vmovdqa64 %zmm26, %zmm17
4889 ; AVX512-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
4890 ; AVX512-NEXT: vmovdqa64 %zmm27, %zmm18
4891 ; AVX512-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
4892 ; AVX512-NEXT: vmovdqa64 %zmm19, %zmm20
4893 ; AVX512-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
4894 ; AVX512-NEXT: vmovdqa64 %zmm22, %zmm23
4895 ; AVX512-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
4896 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
4897 ; AVX512-NEXT: vmovdqa64 %zmm30, %zmm24
4898 ; AVX512-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
4899 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
4900 ; AVX512-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
4901 ; AVX512-NEXT: vmovdqa64 %zmm29, %zmm30
4902 ; AVX512-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
4903 ; AVX512-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
4904 ; AVX512-NEXT: vmovdqa64 %zmm28, %zmm29
4905 ; AVX512-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
4906 ; AVX512-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
4907 ; AVX512-NEXT: vmovdqa64 %zmm27, %zmm28
4908 ; AVX512-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
4909 ; AVX512-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
4910 ; AVX512-NEXT: vmovdqa64 %zmm26, %zmm27
4911 ; AVX512-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
4912 ; AVX512-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
4913 ; AVX512-NEXT: vmovdqa64 %zmm22, %zmm26
4914 ; AVX512-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
4915 ; AVX512-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
4916 ; AVX512-NEXT: vmovdqa64 %zmm19, %zmm22
4917 ; AVX512-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
4918 ; AVX512-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
4919 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm19
4920 ; AVX512-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
4921 ; AVX512-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
4922 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
4923 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm25
4924 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
4925 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
4926 ; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
4927 ; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
4928 ; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
4929 ; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
4930 ; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
4931 ; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
4932 ; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
4933 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm6
4934 ; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
4935 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
4936 ; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
4937 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
4938 ; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
4939 ; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
4940 ; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
4941 ; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
4942 ; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
4943 ; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
4944 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
4945 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
4946 ; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
4947 ; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
4948 ; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
4949 ; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
4950 ; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
4951 ; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
4952 ; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
4953 ; AVX512-NEXT: vmovdqa64 %zmm23, 448(%rsi)
4954 ; AVX512-NEXT: vmovdqa64 %zmm20, 384(%rsi)
4955 ; AVX512-NEXT: vmovdqa64 %zmm18, 320(%rsi)
4956 ; AVX512-NEXT: vmovdqa64 %zmm17, 256(%rsi)
4957 ; AVX512-NEXT: vmovdqa64 %zmm16, 192(%rsi)
4958 ; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rsi)
4959 ; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
4960 ; AVX512-NEXT: vmovaps %zmm0, 64(%rsi)
4961 ; AVX512-NEXT: vmovdqa64 %zmm11, (%rsi)
4962 ; AVX512-NEXT: vmovdqa64 %zmm26, 448(%rdx)
4963 ; AVX512-NEXT: vmovdqa64 %zmm27, 256(%rdx)
4964 ; AVX512-NEXT: vmovdqa64 %zmm28, 320(%rdx)
4965 ; AVX512-NEXT: vmovdqa64 %zmm29, 128(%rdx)
4966 ; AVX512-NEXT: vmovdqa64 %zmm30, 192(%rdx)
4967 ; AVX512-NEXT: vmovdqa64 %zmm21, (%rdx)
4968 ; AVX512-NEXT: vmovdqa64 %zmm24, 64(%rdx)
4969 ; AVX512-NEXT: vmovdqa64 %zmm22, 384(%rdx)
4970 ; AVX512-NEXT: vmovdqa64 %zmm13, 384(%rcx)
4971 ; AVX512-NEXT: vmovdqa64 %zmm14, 448(%rcx)
4972 ; AVX512-NEXT: vmovdqa64 %zmm10, 256(%rcx)
4973 ; AVX512-NEXT: vmovdqa64 %zmm12, 320(%rcx)
4974 ; AVX512-NEXT: vmovdqa64 %zmm8, 128(%rcx)
4975 ; AVX512-NEXT: vmovdqa64 %zmm9, 192(%rcx)
4976 ; AVX512-NEXT: vmovdqa64 %zmm19, (%rcx)
4977 ; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rcx)
4978 ; AVX512-NEXT: vzeroupper
4981 ; AVX512-FCP-LABEL: load_i64_stride3_vf64:
4982 ; AVX512-FCP: # %bb.0:
4983 ; AVX512-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm0
4984 ; AVX512-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm14
4985 ; AVX512-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm22
4986 ; AVX512-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm1
4987 ; AVX512-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm13
4988 ; AVX512-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm19
4989 ; AVX512-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm2
4990 ; AVX512-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm12
4991 ; AVX512-FCP-NEXT: vmovdqa64 960(%rdi), %zmm27
4992 ; AVX512-FCP-NEXT: vmovdqa64 896(%rdi), %zmm3
4993 ; AVX512-FCP-NEXT: vmovdqa64 832(%rdi), %zmm10
4994 ; AVX512-FCP-NEXT: vmovdqa64 768(%rdi), %zmm26
4995 ; AVX512-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
4996 ; AVX512-FCP-NEXT: vmovdqa64 640(%rdi), %zmm9
4997 ; AVX512-FCP-NEXT: vmovdqa64 576(%rdi), %zmm29
4998 ; AVX512-FCP-NEXT: vmovdqa64 512(%rdi), %zmm5
4999 ; AVX512-FCP-NEXT: vmovdqa64 448(%rdi), %zmm8
5000 ; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %zmm28
5001 ; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %zmm7
5002 ; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm25
5003 ; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm30
5004 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
5005 ; AVX512-FCP-NEXT: vmovdqa64 %zmm30, %zmm6
5006 ; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
5007 ; AVX512-FCP-NEXT: vmovdqa64 %zmm28, %zmm15
5008 ; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
5009 ; AVX512-FCP-NEXT: vmovdqa64 %zmm29, %zmm16
5010 ; AVX512-FCP-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
5011 ; AVX512-FCP-NEXT: vmovdqa64 %zmm26, %zmm17
5012 ; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
5013 ; AVX512-FCP-NEXT: vmovdqa64 %zmm27, %zmm18
5014 ; AVX512-FCP-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
5015 ; AVX512-FCP-NEXT: vmovdqa64 %zmm19, %zmm20
5016 ; AVX512-FCP-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
5017 ; AVX512-FCP-NEXT: vmovdqa64 %zmm22, %zmm23
5018 ; AVX512-FCP-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
5019 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
5020 ; AVX512-FCP-NEXT: vmovdqa64 %zmm30, %zmm24
5021 ; AVX512-FCP-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
5022 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
5023 ; AVX512-FCP-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
5024 ; AVX512-FCP-NEXT: vmovdqa64 %zmm29, %zmm30
5025 ; AVX512-FCP-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
5026 ; AVX512-FCP-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
5027 ; AVX512-FCP-NEXT: vmovdqa64 %zmm28, %zmm29
5028 ; AVX512-FCP-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
5029 ; AVX512-FCP-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
5030 ; AVX512-FCP-NEXT: vmovdqa64 %zmm27, %zmm28
5031 ; AVX512-FCP-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
5032 ; AVX512-FCP-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
5033 ; AVX512-FCP-NEXT: vmovdqa64 %zmm26, %zmm27
5034 ; AVX512-FCP-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
5035 ; AVX512-FCP-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
5036 ; AVX512-FCP-NEXT: vmovdqa64 %zmm22, %zmm26
5037 ; AVX512-FCP-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
5038 ; AVX512-FCP-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
5039 ; AVX512-FCP-NEXT: vmovdqa64 %zmm19, %zmm22
5040 ; AVX512-FCP-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
5041 ; AVX512-FCP-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
5042 ; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm19
5043 ; AVX512-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
5044 ; AVX512-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
5045 ; AVX512-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
5046 ; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %zmm25
5047 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
5048 ; AVX512-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
5049 ; AVX512-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5050 ; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
5051 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
5052 ; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
5053 ; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
5054 ; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
5055 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
5056 ; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
5057 ; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
5058 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
5059 ; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
5060 ; AVX512-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
5061 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
5062 ; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
5063 ; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
5064 ; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
5065 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
5066 ; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
5067 ; AVX512-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
5068 ; AVX512-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
5069 ; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
5070 ; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
5071 ; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
5072 ; AVX512-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
5073 ; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
5074 ; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
5075 ; AVX512-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
5076 ; AVX512-FCP-NEXT: vmovdqa64 %zmm23, 448(%rsi)
5077 ; AVX512-FCP-NEXT: vmovdqa64 %zmm20, 384(%rsi)
5078 ; AVX512-FCP-NEXT: vmovdqa64 %zmm18, 320(%rsi)
5079 ; AVX512-FCP-NEXT: vmovdqa64 %zmm17, 256(%rsi)
5080 ; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
5081 ; AVX512-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
5082 ; AVX512-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5083 ; AVX512-FCP-NEXT: vmovaps %zmm0, 64(%rsi)
5084 ; AVX512-FCP-NEXT: vmovdqa64 %zmm11, (%rsi)
5085 ; AVX512-FCP-NEXT: vmovdqa64 %zmm26, 448(%rdx)
5086 ; AVX512-FCP-NEXT: vmovdqa64 %zmm27, 256(%rdx)
5087 ; AVX512-FCP-NEXT: vmovdqa64 %zmm28, 320(%rdx)
5088 ; AVX512-FCP-NEXT: vmovdqa64 %zmm29, 128(%rdx)
5089 ; AVX512-FCP-NEXT: vmovdqa64 %zmm30, 192(%rdx)
5090 ; AVX512-FCP-NEXT: vmovdqa64 %zmm21, (%rdx)
5091 ; AVX512-FCP-NEXT: vmovdqa64 %zmm24, 64(%rdx)
5092 ; AVX512-FCP-NEXT: vmovdqa64 %zmm22, 384(%rdx)
5093 ; AVX512-FCP-NEXT: vmovdqa64 %zmm13, 384(%rcx)
5094 ; AVX512-FCP-NEXT: vmovdqa64 %zmm14, 448(%rcx)
5095 ; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 256(%rcx)
5096 ; AVX512-FCP-NEXT: vmovdqa64 %zmm12, 320(%rcx)
5097 ; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 128(%rcx)
5098 ; AVX512-FCP-NEXT: vmovdqa64 %zmm9, 192(%rcx)
5099 ; AVX512-FCP-NEXT: vmovdqa64 %zmm19, (%rcx)
5100 ; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 64(%rcx)
5101 ; AVX512-FCP-NEXT: vzeroupper
5102 ; AVX512-FCP-NEXT: retq
5104 ; AVX512DQ-LABEL: load_i64_stride3_vf64:
5105 ; AVX512DQ: # %bb.0:
5106 ; AVX512DQ-NEXT: vmovdqa64 1472(%rdi), %zmm0
5107 ; AVX512DQ-NEXT: vmovdqa64 1408(%rdi), %zmm14
5108 ; AVX512DQ-NEXT: vmovdqa64 1344(%rdi), %zmm22
5109 ; AVX512DQ-NEXT: vmovdqa64 1280(%rdi), %zmm1
5110 ; AVX512DQ-NEXT: vmovdqa64 1216(%rdi), %zmm13
5111 ; AVX512DQ-NEXT: vmovdqa64 1152(%rdi), %zmm19
5112 ; AVX512DQ-NEXT: vmovdqa64 1088(%rdi), %zmm2
5113 ; AVX512DQ-NEXT: vmovdqa64 1024(%rdi), %zmm12
5114 ; AVX512DQ-NEXT: vmovdqa64 960(%rdi), %zmm27
5115 ; AVX512DQ-NEXT: vmovdqa64 896(%rdi), %zmm3
5116 ; AVX512DQ-NEXT: vmovdqa64 832(%rdi), %zmm10
5117 ; AVX512DQ-NEXT: vmovdqa64 768(%rdi), %zmm26
5118 ; AVX512DQ-NEXT: vmovdqa64 704(%rdi), %zmm4
5119 ; AVX512DQ-NEXT: vmovdqa64 640(%rdi), %zmm9
5120 ; AVX512DQ-NEXT: vmovdqa64 576(%rdi), %zmm29
5121 ; AVX512DQ-NEXT: vmovdqa64 512(%rdi), %zmm5
5122 ; AVX512DQ-NEXT: vmovdqa64 448(%rdi), %zmm8
5123 ; AVX512DQ-NEXT: vmovdqa64 384(%rdi), %zmm28
5124 ; AVX512DQ-NEXT: vmovdqa64 256(%rdi), %zmm7
5125 ; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm25
5126 ; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm30
5127 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
5128 ; AVX512DQ-NEXT: vmovdqa64 %zmm30, %zmm6
5129 ; AVX512DQ-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
5130 ; AVX512DQ-NEXT: vmovdqa64 %zmm28, %zmm15
5131 ; AVX512DQ-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
5132 ; AVX512DQ-NEXT: vmovdqa64 %zmm29, %zmm16
5133 ; AVX512DQ-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
5134 ; AVX512DQ-NEXT: vmovdqa64 %zmm26, %zmm17
5135 ; AVX512DQ-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
5136 ; AVX512DQ-NEXT: vmovdqa64 %zmm27, %zmm18
5137 ; AVX512DQ-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
5138 ; AVX512DQ-NEXT: vmovdqa64 %zmm19, %zmm20
5139 ; AVX512DQ-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
5140 ; AVX512DQ-NEXT: vmovdqa64 %zmm22, %zmm23
5141 ; AVX512DQ-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
5142 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
5143 ; AVX512DQ-NEXT: vmovdqa64 %zmm30, %zmm24
5144 ; AVX512DQ-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
5145 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
5146 ; AVX512DQ-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
5147 ; AVX512DQ-NEXT: vmovdqa64 %zmm29, %zmm30
5148 ; AVX512DQ-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
5149 ; AVX512DQ-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
5150 ; AVX512DQ-NEXT: vmovdqa64 %zmm28, %zmm29
5151 ; AVX512DQ-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
5152 ; AVX512DQ-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
5153 ; AVX512DQ-NEXT: vmovdqa64 %zmm27, %zmm28
5154 ; AVX512DQ-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
5155 ; AVX512DQ-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
5156 ; AVX512DQ-NEXT: vmovdqa64 %zmm26, %zmm27
5157 ; AVX512DQ-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
5158 ; AVX512DQ-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
5159 ; AVX512DQ-NEXT: vmovdqa64 %zmm22, %zmm26
5160 ; AVX512DQ-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
5161 ; AVX512DQ-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
5162 ; AVX512DQ-NEXT: vmovdqa64 %zmm19, %zmm22
5163 ; AVX512DQ-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
5164 ; AVX512DQ-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
5165 ; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm19
5166 ; AVX512DQ-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
5167 ; AVX512DQ-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
5168 ; AVX512DQ-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
5169 ; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %zmm25
5170 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
5171 ; AVX512DQ-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
5172 ; AVX512DQ-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5173 ; AVX512DQ-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
5174 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
5175 ; AVX512DQ-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
5176 ; AVX512DQ-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
5177 ; AVX512DQ-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
5178 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
5179 ; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm6
5180 ; AVX512DQ-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
5181 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
5182 ; AVX512DQ-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
5183 ; AVX512DQ-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
5184 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
5185 ; AVX512DQ-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
5186 ; AVX512DQ-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
5187 ; AVX512DQ-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
5188 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
5189 ; AVX512DQ-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
5190 ; AVX512DQ-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
5191 ; AVX512DQ-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
5192 ; AVX512DQ-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
5193 ; AVX512DQ-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
5194 ; AVX512DQ-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
5195 ; AVX512DQ-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
5196 ; AVX512DQ-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
5197 ; AVX512DQ-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
5198 ; AVX512DQ-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
5199 ; AVX512DQ-NEXT: vmovdqa64 %zmm23, 448(%rsi)
5200 ; AVX512DQ-NEXT: vmovdqa64 %zmm20, 384(%rsi)
5201 ; AVX512DQ-NEXT: vmovdqa64 %zmm18, 320(%rsi)
5202 ; AVX512DQ-NEXT: vmovdqa64 %zmm17, 256(%rsi)
5203 ; AVX512DQ-NEXT: vmovdqa64 %zmm16, 192(%rsi)
5204 ; AVX512DQ-NEXT: vmovdqa64 %zmm15, 128(%rsi)
5205 ; AVX512DQ-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5206 ; AVX512DQ-NEXT: vmovaps %zmm0, 64(%rsi)
5207 ; AVX512DQ-NEXT: vmovdqa64 %zmm11, (%rsi)
5208 ; AVX512DQ-NEXT: vmovdqa64 %zmm26, 448(%rdx)
5209 ; AVX512DQ-NEXT: vmovdqa64 %zmm27, 256(%rdx)
5210 ; AVX512DQ-NEXT: vmovdqa64 %zmm28, 320(%rdx)
5211 ; AVX512DQ-NEXT: vmovdqa64 %zmm29, 128(%rdx)
5212 ; AVX512DQ-NEXT: vmovdqa64 %zmm30, 192(%rdx)
5213 ; AVX512DQ-NEXT: vmovdqa64 %zmm21, (%rdx)
5214 ; AVX512DQ-NEXT: vmovdqa64 %zmm24, 64(%rdx)
5215 ; AVX512DQ-NEXT: vmovdqa64 %zmm22, 384(%rdx)
5216 ; AVX512DQ-NEXT: vmovdqa64 %zmm13, 384(%rcx)
5217 ; AVX512DQ-NEXT: vmovdqa64 %zmm14, 448(%rcx)
5218 ; AVX512DQ-NEXT: vmovdqa64 %zmm10, 256(%rcx)
5219 ; AVX512DQ-NEXT: vmovdqa64 %zmm12, 320(%rcx)
5220 ; AVX512DQ-NEXT: vmovdqa64 %zmm8, 128(%rcx)
5221 ; AVX512DQ-NEXT: vmovdqa64 %zmm9, 192(%rcx)
5222 ; AVX512DQ-NEXT: vmovdqa64 %zmm19, (%rcx)
5223 ; AVX512DQ-NEXT: vmovdqa64 %zmm7, 64(%rcx)
5224 ; AVX512DQ-NEXT: vzeroupper
5225 ; AVX512DQ-NEXT: retq
5227 ; AVX512DQ-FCP-LABEL: load_i64_stride3_vf64:
5228 ; AVX512DQ-FCP: # %bb.0:
5229 ; AVX512DQ-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm0
5230 ; AVX512DQ-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm14
5231 ; AVX512DQ-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm22
5232 ; AVX512DQ-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm1
5233 ; AVX512DQ-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm13
5234 ; AVX512DQ-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm19
5235 ; AVX512DQ-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm2
5236 ; AVX512DQ-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm12
5237 ; AVX512DQ-FCP-NEXT: vmovdqa64 960(%rdi), %zmm27
5238 ; AVX512DQ-FCP-NEXT: vmovdqa64 896(%rdi), %zmm3
5239 ; AVX512DQ-FCP-NEXT: vmovdqa64 832(%rdi), %zmm10
5240 ; AVX512DQ-FCP-NEXT: vmovdqa64 768(%rdi), %zmm26
5241 ; AVX512DQ-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
5242 ; AVX512DQ-FCP-NEXT: vmovdqa64 640(%rdi), %zmm9
5243 ; AVX512DQ-FCP-NEXT: vmovdqa64 576(%rdi), %zmm29
5244 ; AVX512DQ-FCP-NEXT: vmovdqa64 512(%rdi), %zmm5
5245 ; AVX512DQ-FCP-NEXT: vmovdqa64 448(%rdi), %zmm8
5246 ; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %zmm28
5247 ; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %zmm7
5248 ; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm25
5249 ; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm30
5250 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
5251 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm30, %zmm6
5252 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
5253 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm28, %zmm15
5254 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
5255 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, %zmm16
5256 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
5257 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, %zmm17
5258 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
5259 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, %zmm18
5260 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
5261 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, %zmm20
5262 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
5263 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, %zmm23
5264 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
5265 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
5266 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm30, %zmm24
5267 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
5268 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
5269 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
5270 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, %zmm30
5271 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
5272 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
5273 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm28, %zmm29
5274 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
5275 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
5276 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, %zmm28
5277 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
5278 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
5279 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, %zmm27
5280 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
5281 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
5282 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, %zmm26
5283 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
5284 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
5285 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, %zmm22
5286 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
5287 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
5288 ; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm19
5289 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
5290 ; AVX512DQ-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
5291 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
5292 ; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %zmm25
5293 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
5294 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
5295 ; AVX512DQ-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5296 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
5297 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
5298 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
5299 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
5300 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
5301 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
5302 ; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
5303 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
5304 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
5305 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
5306 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
5307 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
5308 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
5309 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
5310 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
5311 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
5312 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
5313 ; AVX512DQ-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
5314 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
5315 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
5316 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
5317 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
5318 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
5319 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
5320 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
5321 ; AVX512DQ-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
5322 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, 448(%rsi)
5323 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, 384(%rsi)
5324 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, 320(%rsi)
5325 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, 256(%rsi)
5326 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
5327 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
5328 ; AVX512DQ-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5329 ; AVX512DQ-FCP-NEXT: vmovaps %zmm0, 64(%rsi)
5330 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, (%rsi)
5331 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, 448(%rdx)
5332 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, 256(%rdx)
5333 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm28, 320(%rdx)
5334 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm29, 128(%rdx)
5335 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm30, 192(%rdx)
5336 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm21, (%rdx)
5337 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, 64(%rdx)
5338 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, 384(%rdx)
5339 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, 384(%rcx)
5340 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, 448(%rcx)
5341 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 256(%rcx)
5342 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, 320(%rcx)
5343 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 128(%rcx)
5344 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, 192(%rcx)
5345 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, (%rcx)
5346 ; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 64(%rcx)
5347 ; AVX512DQ-FCP-NEXT: vzeroupper
5348 ; AVX512DQ-FCP-NEXT: retq
5350 ; AVX512BW-LABEL: load_i64_stride3_vf64:
5351 ; AVX512BW: # %bb.0:
5352 ; AVX512BW-NEXT: vmovdqa64 1472(%rdi), %zmm0
5353 ; AVX512BW-NEXT: vmovdqa64 1408(%rdi), %zmm14
5354 ; AVX512BW-NEXT: vmovdqa64 1344(%rdi), %zmm22
5355 ; AVX512BW-NEXT: vmovdqa64 1280(%rdi), %zmm1
5356 ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm13
5357 ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm19
5358 ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm2
5359 ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm12
5360 ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm27
5361 ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm3
5362 ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm10
5363 ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm26
5364 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm4
5365 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm9
5366 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm29
5367 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm5
5368 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm8
5369 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm28
5370 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm7
5371 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm25
5372 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm30
5373 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
5374 ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm6
5375 ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
5376 ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm15
5377 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
5378 ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm16
5379 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
5380 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm17
5381 ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
5382 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm18
5383 ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
5384 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm20
5385 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
5386 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm23
5387 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
5388 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
5389 ; AVX512BW-NEXT: vmovdqa64 %zmm30, %zmm24
5390 ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
5391 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
5392 ; AVX512BW-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
5393 ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm30
5394 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
5395 ; AVX512BW-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
5396 ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm29
5397 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
5398 ; AVX512BW-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
5399 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm28
5400 ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
5401 ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
5402 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm27
5403 ; AVX512BW-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
5404 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
5405 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm26
5406 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
5407 ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
5408 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm22
5409 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
5410 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
5411 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm19
5412 ; AVX512BW-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
5413 ; AVX512BW-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
5414 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
5415 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm25
5416 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
5417 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
5418 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5419 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
5420 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
5421 ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
5422 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
5423 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
5424 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
5425 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm6
5426 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
5427 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
5428 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
5429 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
5430 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
5431 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
5432 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
5433 ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
5434 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
5435 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
5436 ; AVX512BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
5437 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
5438 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
5439 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
5440 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
5441 ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
5442 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
5443 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
5444 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
5445 ; AVX512BW-NEXT: vmovdqa64 %zmm23, 448(%rsi)
5446 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 384(%rsi)
5447 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 320(%rsi)
5448 ; AVX512BW-NEXT: vmovdqa64 %zmm17, 256(%rsi)
5449 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 192(%rsi)
5450 ; AVX512BW-NEXT: vmovdqa64 %zmm15, 128(%rsi)
5451 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5452 ; AVX512BW-NEXT: vmovaps %zmm0, 64(%rsi)
5453 ; AVX512BW-NEXT: vmovdqa64 %zmm11, (%rsi)
5454 ; AVX512BW-NEXT: vmovdqa64 %zmm26, 448(%rdx)
5455 ; AVX512BW-NEXT: vmovdqa64 %zmm27, 256(%rdx)
5456 ; AVX512BW-NEXT: vmovdqa64 %zmm28, 320(%rdx)
5457 ; AVX512BW-NEXT: vmovdqa64 %zmm29, 128(%rdx)
5458 ; AVX512BW-NEXT: vmovdqa64 %zmm30, 192(%rdx)
5459 ; AVX512BW-NEXT: vmovdqa64 %zmm21, (%rdx)
5460 ; AVX512BW-NEXT: vmovdqa64 %zmm24, 64(%rdx)
5461 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 384(%rdx)
5462 ; AVX512BW-NEXT: vmovdqa64 %zmm13, 384(%rcx)
5463 ; AVX512BW-NEXT: vmovdqa64 %zmm14, 448(%rcx)
5464 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 256(%rcx)
5465 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 320(%rcx)
5466 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 128(%rcx)
5467 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 192(%rcx)
5468 ; AVX512BW-NEXT: vmovdqa64 %zmm19, (%rcx)
5469 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 64(%rcx)
5470 ; AVX512BW-NEXT: vzeroupper
5471 ; AVX512BW-NEXT: retq
5473 ; AVX512BW-FCP-LABEL: load_i64_stride3_vf64:
5474 ; AVX512BW-FCP: # %bb.0:
5475 ; AVX512BW-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm0
5476 ; AVX512BW-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm14
5477 ; AVX512BW-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm22
5478 ; AVX512BW-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm1
5479 ; AVX512BW-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm13
5480 ; AVX512BW-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm19
5481 ; AVX512BW-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm2
5482 ; AVX512BW-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm12
5483 ; AVX512BW-FCP-NEXT: vmovdqa64 960(%rdi), %zmm27
5484 ; AVX512BW-FCP-NEXT: vmovdqa64 896(%rdi), %zmm3
5485 ; AVX512BW-FCP-NEXT: vmovdqa64 832(%rdi), %zmm10
5486 ; AVX512BW-FCP-NEXT: vmovdqa64 768(%rdi), %zmm26
5487 ; AVX512BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
5488 ; AVX512BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm9
5489 ; AVX512BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm29
5490 ; AVX512BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm5
5491 ; AVX512BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm8
5492 ; AVX512BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm28
5493 ; AVX512BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm7
5494 ; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm25
5495 ; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm30
5496 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
5497 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm30, %zmm6
5498 ; AVX512BW-FCP-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
5499 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm28, %zmm15
5500 ; AVX512BW-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
5501 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm29, %zmm16
5502 ; AVX512BW-FCP-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
5503 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm26, %zmm17
5504 ; AVX512BW-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
5505 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm27, %zmm18
5506 ; AVX512BW-FCP-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
5507 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm19, %zmm20
5508 ; AVX512BW-FCP-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
5509 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm23
5510 ; AVX512BW-FCP-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
5511 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
5512 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm30, %zmm24
5513 ; AVX512BW-FCP-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
5514 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
5515 ; AVX512BW-FCP-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
5516 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm29, %zmm30
5517 ; AVX512BW-FCP-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
5518 ; AVX512BW-FCP-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
5519 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm28, %zmm29
5520 ; AVX512BW-FCP-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
5521 ; AVX512BW-FCP-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
5522 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm27, %zmm28
5523 ; AVX512BW-FCP-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
5524 ; AVX512BW-FCP-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
5525 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm26, %zmm27
5526 ; AVX512BW-FCP-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
5527 ; AVX512BW-FCP-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
5528 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm26
5529 ; AVX512BW-FCP-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
5530 ; AVX512BW-FCP-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
5531 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm19, %zmm22
5532 ; AVX512BW-FCP-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
5533 ; AVX512BW-FCP-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
5534 ; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm19
5535 ; AVX512BW-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
5536 ; AVX512BW-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
5537 ; AVX512BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
5538 ; AVX512BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm25
5539 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
5540 ; AVX512BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
5541 ; AVX512BW-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5542 ; AVX512BW-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
5543 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
5544 ; AVX512BW-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
5545 ; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
5546 ; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
5547 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
5548 ; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
5549 ; AVX512BW-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
5550 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
5551 ; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
5552 ; AVX512BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
5553 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
5554 ; AVX512BW-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
5555 ; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
5556 ; AVX512BW-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
5557 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
5558 ; AVX512BW-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
5559 ; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
5560 ; AVX512BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
5561 ; AVX512BW-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
5562 ; AVX512BW-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
5563 ; AVX512BW-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
5564 ; AVX512BW-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
5565 ; AVX512BW-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
5566 ; AVX512BW-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
5567 ; AVX512BW-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
5568 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, 448(%rsi)
5569 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm20, 384(%rsi)
5570 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm18, 320(%rsi)
5571 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, 256(%rsi)
5572 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
5573 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
5574 ; AVX512BW-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5575 ; AVX512BW-FCP-NEXT: vmovaps %zmm0, 64(%rsi)
5576 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, (%rsi)
5577 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm26, 448(%rdx)
5578 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm27, 256(%rdx)
5579 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm28, 320(%rdx)
5580 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm29, 128(%rdx)
5581 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm30, 192(%rdx)
5582 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm21, (%rdx)
5583 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm24, 64(%rdx)
5584 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, 384(%rdx)
5585 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, 384(%rcx)
5586 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, 448(%rcx)
5587 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, 256(%rcx)
5588 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, 320(%rcx)
5589 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, 128(%rcx)
5590 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 192(%rcx)
5591 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm19, (%rcx)
5592 ; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%rcx)
5593 ; AVX512BW-FCP-NEXT: vzeroupper
5594 ; AVX512BW-FCP-NEXT: retq
5596 ; AVX512DQ-BW-LABEL: load_i64_stride3_vf64:
5597 ; AVX512DQ-BW: # %bb.0:
5598 ; AVX512DQ-BW-NEXT: vmovdqa64 1472(%rdi), %zmm0
5599 ; AVX512DQ-BW-NEXT: vmovdqa64 1408(%rdi), %zmm14
5600 ; AVX512DQ-BW-NEXT: vmovdqa64 1344(%rdi), %zmm22
5601 ; AVX512DQ-BW-NEXT: vmovdqa64 1280(%rdi), %zmm1
5602 ; AVX512DQ-BW-NEXT: vmovdqa64 1216(%rdi), %zmm13
5603 ; AVX512DQ-BW-NEXT: vmovdqa64 1152(%rdi), %zmm19
5604 ; AVX512DQ-BW-NEXT: vmovdqa64 1088(%rdi), %zmm2
5605 ; AVX512DQ-BW-NEXT: vmovdqa64 1024(%rdi), %zmm12
5606 ; AVX512DQ-BW-NEXT: vmovdqa64 960(%rdi), %zmm27
5607 ; AVX512DQ-BW-NEXT: vmovdqa64 896(%rdi), %zmm3
5608 ; AVX512DQ-BW-NEXT: vmovdqa64 832(%rdi), %zmm10
5609 ; AVX512DQ-BW-NEXT: vmovdqa64 768(%rdi), %zmm26
5610 ; AVX512DQ-BW-NEXT: vmovdqa64 704(%rdi), %zmm4
5611 ; AVX512DQ-BW-NEXT: vmovdqa64 640(%rdi), %zmm9
5612 ; AVX512DQ-BW-NEXT: vmovdqa64 576(%rdi), %zmm29
5613 ; AVX512DQ-BW-NEXT: vmovdqa64 512(%rdi), %zmm5
5614 ; AVX512DQ-BW-NEXT: vmovdqa64 448(%rdi), %zmm8
5615 ; AVX512DQ-BW-NEXT: vmovdqa64 384(%rdi), %zmm28
5616 ; AVX512DQ-BW-NEXT: vmovdqa64 256(%rdi), %zmm7
5617 ; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm25
5618 ; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm30
5619 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
5620 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm30, %zmm6
5621 ; AVX512DQ-BW-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
5622 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm28, %zmm15
5623 ; AVX512DQ-BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
5624 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm29, %zmm16
5625 ; AVX512DQ-BW-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
5626 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm26, %zmm17
5627 ; AVX512DQ-BW-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
5628 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm27, %zmm18
5629 ; AVX512DQ-BW-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
5630 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm19, %zmm20
5631 ; AVX512DQ-BW-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
5632 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, %zmm23
5633 ; AVX512DQ-BW-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
5634 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
5635 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm30, %zmm24
5636 ; AVX512DQ-BW-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
5637 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
5638 ; AVX512DQ-BW-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
5639 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm29, %zmm30
5640 ; AVX512DQ-BW-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
5641 ; AVX512DQ-BW-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
5642 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm28, %zmm29
5643 ; AVX512DQ-BW-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
5644 ; AVX512DQ-BW-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
5645 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm27, %zmm28
5646 ; AVX512DQ-BW-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
5647 ; AVX512DQ-BW-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
5648 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm26, %zmm27
5649 ; AVX512DQ-BW-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
5650 ; AVX512DQ-BW-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
5651 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, %zmm26
5652 ; AVX512DQ-BW-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
5653 ; AVX512DQ-BW-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
5654 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm19, %zmm22
5655 ; AVX512DQ-BW-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
5656 ; AVX512DQ-BW-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
5657 ; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm19
5658 ; AVX512DQ-BW-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
5659 ; AVX512DQ-BW-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
5660 ; AVX512DQ-BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
5661 ; AVX512DQ-BW-NEXT: vmovdqa64 320(%rdi), %zmm25
5662 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
5663 ; AVX512DQ-BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
5664 ; AVX512DQ-BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5665 ; AVX512DQ-BW-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
5666 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
5667 ; AVX512DQ-BW-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
5668 ; AVX512DQ-BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
5669 ; AVX512DQ-BW-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
5670 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
5671 ; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm6
5672 ; AVX512DQ-BW-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
5673 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
5674 ; AVX512DQ-BW-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
5675 ; AVX512DQ-BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
5676 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
5677 ; AVX512DQ-BW-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
5678 ; AVX512DQ-BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
5679 ; AVX512DQ-BW-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
5680 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
5681 ; AVX512DQ-BW-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
5682 ; AVX512DQ-BW-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
5683 ; AVX512DQ-BW-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
5684 ; AVX512DQ-BW-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
5685 ; AVX512DQ-BW-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
5686 ; AVX512DQ-BW-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
5687 ; AVX512DQ-BW-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
5688 ; AVX512DQ-BW-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
5689 ; AVX512DQ-BW-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
5690 ; AVX512DQ-BW-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
5691 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm23, 448(%rsi)
5692 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, 384(%rsi)
5693 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm18, 320(%rsi)
5694 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 256(%rsi)
5695 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, 192(%rsi)
5696 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, 128(%rsi)
5697 ; AVX512DQ-BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5698 ; AVX512DQ-BW-NEXT: vmovaps %zmm0, 64(%rsi)
5699 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, (%rsi)
5700 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm26, 448(%rdx)
5701 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm27, 256(%rdx)
5702 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm28, 320(%rdx)
5703 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm29, 128(%rdx)
5704 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm30, 192(%rdx)
5705 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm21, (%rdx)
5706 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm24, 64(%rdx)
5707 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, 384(%rdx)
5708 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, 384(%rcx)
5709 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, 448(%rcx)
5710 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 256(%rcx)
5711 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, 320(%rcx)
5712 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, 128(%rcx)
5713 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 192(%rcx)
5714 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm19, (%rcx)
5715 ; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, 64(%rcx)
5716 ; AVX512DQ-BW-NEXT: vzeroupper
5717 ; AVX512DQ-BW-NEXT: retq
5719 ; AVX512DQ-BW-FCP-LABEL: load_i64_stride3_vf64:
5720 ; AVX512DQ-BW-FCP: # %bb.0:
5721 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1472(%rdi), %zmm0
5722 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1408(%rdi), %zmm14
5723 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1344(%rdi), %zmm22
5724 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1280(%rdi), %zmm1
5725 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1216(%rdi), %zmm13
5726 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1152(%rdi), %zmm19
5727 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1088(%rdi), %zmm2
5728 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 1024(%rdi), %zmm12
5729 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 960(%rdi), %zmm27
5730 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 896(%rdi), %zmm3
5731 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 832(%rdi), %zmm10
5732 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 768(%rdi), %zmm26
5733 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 704(%rdi), %zmm4
5734 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 640(%rdi), %zmm9
5735 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 576(%rdi), %zmm29
5736 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 512(%rdi), %zmm5
5737 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 448(%rdi), %zmm8
5738 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 384(%rdi), %zmm28
5739 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 256(%rdi), %zmm7
5740 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm25
5741 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm30
5742 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm11 = [0,3,6,9,12,15,0,0]
5743 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm30, %zmm6
5744 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
5745 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm28, %zmm15
5746 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
5747 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm29, %zmm16
5748 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
5749 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm26, %zmm17
5750 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
5751 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm27, %zmm18
5752 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
5753 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm19, %zmm20
5754 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
5755 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm23
5756 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
5757 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm21 = [1,4,7,10,13,0,0,0]
5758 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm30, %zmm24
5759 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
5760 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [10,13,0,3,6,0,0,0]
5761 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
5762 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm29, %zmm30
5763 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
5764 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
5765 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm28, %zmm29
5766 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
5767 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
5768 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm27, %zmm28
5769 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
5770 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
5771 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm26, %zmm27
5772 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
5773 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
5774 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm26
5775 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
5776 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
5777 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm19, %zmm22
5778 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
5779 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
5780 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm19
5781 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
5782 ; AVX512DQ-BW-FCP-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
5783 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
5784 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 320(%rdi), %zmm25
5785 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
5786 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
5787 ; AVX512DQ-BW-FCP-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5788 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
5789 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
5790 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
5791 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
5792 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
5793 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
5794 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm6
5795 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
5796 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
5797 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
5798 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
5799 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
5800 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
5801 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
5802 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
5803 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
5804 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
5805 ; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
5806 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
5807 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
5808 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
5809 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
5810 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
5811 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
5812 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
5813 ; AVX512DQ-BW-FCP-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
5814 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, 448(%rsi)
5815 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm20, 384(%rsi)
5816 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm18, 320(%rsi)
5817 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, 256(%rsi)
5818 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm16, 192(%rsi)
5819 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, 128(%rsi)
5820 ; AVX512DQ-BW-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5821 ; AVX512DQ-BW-FCP-NEXT: vmovaps %zmm0, 64(%rsi)
5822 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, (%rsi)
5823 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm26, 448(%rdx)
5824 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm27, 256(%rdx)
5825 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm28, 320(%rdx)
5826 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm29, 128(%rdx)
5827 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm30, 192(%rdx)
5828 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm21, (%rdx)
5829 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm24, 64(%rdx)
5830 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, 384(%rdx)
5831 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, 384(%rcx)
5832 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, 448(%rcx)
5833 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, 256(%rcx)
5834 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, 320(%rcx)
5835 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, 128(%rcx)
5836 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 192(%rcx)
5837 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm19, (%rcx)
5838 ; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 64(%rcx)
5839 ; AVX512DQ-BW-FCP-NEXT: vzeroupper
5840 ; AVX512DQ-BW-FCP-NEXT: retq
5841 %wide.vec = load <192 x i64>, ptr %in.vec, align 64
5842 %strided.vec0 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93, i32 96, i32 99, i32 102, i32 105, i32 108, i32 111, i32 114, i32 117, i32 120, i32 123, i32 126, i32 129, i32 132, i32 135, i32 138, i32 141, i32 144, i32 147, i32 150, i32 153, i32 156, i32 159, i32 162, i32 165, i32 168, i32 171, i32 174, i32 177, i32 180, i32 183, i32 186, i32 189>
5843 %strided.vec1 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94, i32 97, i32 100, i32 103, i32 106, i32 109, i32 112, i32 115, i32 118, i32 121, i32 124, i32 127, i32 130, i32 133, i32 136, i32 139, i32 142, i32 145, i32 148, i32 151, i32 154, i32 157, i32 160, i32 163, i32 166, i32 169, i32 172, i32 175, i32 178, i32 181, i32 184, i32 187, i32 190>
5844 %strided.vec2 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95, i32 98, i32 101, i32 104, i32 107, i32 110, i32 113, i32 116, i32 119, i32 122, i32 125, i32 128, i32 131, i32 134, i32 137, i32 140, i32 143, i32 146, i32 149, i32 152, i32 155, i32 158, i32 161, i32 164, i32 167, i32 170, i32 173, i32 176, i32 179, i32 182, i32 185, i32 188, i32 191>
5845 store <64 x i64> %strided.vec0, ptr %out.vec0, align 64
5846 store <64 x i64> %strided.vec1, ptr %out.vec1, align 64
5847 store <64 x i64> %strided.vec2, ptr %out.vec2, align 64