1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i64_stride3_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
19 ; SSE-LABEL: load_i64_stride3_vf2:
21 ; SSE-NEXT: movapd (%rdi), %xmm0
22 ; SSE-NEXT: movapd 16(%rdi), %xmm1
23 ; SSE-NEXT: movapd 32(%rdi), %xmm2
24 ; SSE-NEXT: movapd %xmm1, %xmm3
25 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
26 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm2[0]
27 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
28 ; SSE-NEXT: movapd %xmm3, (%rsi)
29 ; SSE-NEXT: movapd %xmm0, (%rdx)
30 ; SSE-NEXT: movapd %xmm2, (%rcx)
33 ; AVX1-ONLY-LABEL: load_i64_stride3_vf2:
35 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
36 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
37 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
38 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm1[4,5,6,7]
39 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
40 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7]
41 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, (%rsi)
42 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
43 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rcx)
44 ; AVX1-ONLY-NEXT: retq
46 ; AVX2-ONLY-LABEL: load_i64_stride3_vf2:
48 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0
49 ; AVX2-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
50 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
51 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3]
52 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
53 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
54 ; AVX2-ONLY-NEXT: vmovdqa %xmm3, (%rsi)
55 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
56 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, (%rcx)
57 ; AVX2-ONLY-NEXT: retq
59 ; AVX512-SLOW-LABEL: load_i64_stride3_vf2:
60 ; AVX512-SLOW: # %bb.0:
61 ; AVX512-SLOW-NEXT: vpermpd {{.*#+}} zmm0 = mem[0,3,2,3,4,7,6,7]
62 ; AVX512-SLOW-NEXT: vmovdqa 32(%rdi), %xmm1
63 ; AVX512-SLOW-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
64 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
65 ; AVX512-SLOW-NEXT: vmovaps %xmm0, (%rsi)
66 ; AVX512-SLOW-NEXT: vmovdqa %xmm2, (%rdx)
67 ; AVX512-SLOW-NEXT: vmovdqa %xmm1, (%rcx)
68 ; AVX512-SLOW-NEXT: vzeroupper
69 ; AVX512-SLOW-NEXT: retq
71 ; AVX512-FAST-LABEL: load_i64_stride3_vf2:
72 ; AVX512-FAST: # %bb.0:
73 ; AVX512-FAST-NEXT: vmovaps {{.*#+}} xmm0 = [1,4]
74 ; AVX512-FAST-NEXT: vmovaps (%rdi), %zmm1
75 ; AVX512-FAST-NEXT: vpermpd %zmm1, %zmm0, %zmm0
76 ; AVX512-FAST-NEXT: vpermpd {{.*#+}} zmm1 = zmm1[0,3,2,3,4,7,6,7]
77 ; AVX512-FAST-NEXT: vmovaps 16(%rdi), %xmm2
78 ; AVX512-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
79 ; AVX512-FAST-NEXT: vmovaps %xmm1, (%rsi)
80 ; AVX512-FAST-NEXT: vmovaps %xmm0, (%rdx)
81 ; AVX512-FAST-NEXT: vmovaps %xmm2, (%rcx)
82 ; AVX512-FAST-NEXT: vzeroupper
83 ; AVX512-FAST-NEXT: retq
84 %wide.vec = load <6 x i64>, ptr %in.vec, align 64
85 %strided.vec0 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 0, i32 3>
86 %strided.vec1 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 1, i32 4>
87 %strided.vec2 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> <i32 2, i32 5>
88 store <2 x i64> %strided.vec0, ptr %out.vec0, align 64
89 store <2 x i64> %strided.vec1, ptr %out.vec1, align 64
90 store <2 x i64> %strided.vec2, ptr %out.vec2, align 64
94 define void @load_i64_stride3_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
95 ; SSE-LABEL: load_i64_stride3_vf4:
97 ; SSE-NEXT: movapd 80(%rdi), %xmm0
98 ; SSE-NEXT: movapd (%rdi), %xmm1
99 ; SSE-NEXT: movapd 16(%rdi), %xmm2
100 ; SSE-NEXT: movapd 32(%rdi), %xmm3
101 ; SSE-NEXT: movapd 48(%rdi), %xmm4
102 ; SSE-NEXT: movapd 64(%rdi), %xmm5
103 ; SSE-NEXT: movapd %xmm5, %xmm6
104 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm4[0],xmm6[1]
105 ; SSE-NEXT: movapd %xmm2, %xmm7
106 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1]
107 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm0[0]
108 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm3[0]
109 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
110 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
111 ; SSE-NEXT: movapd %xmm6, 16(%rsi)
112 ; SSE-NEXT: movapd %xmm7, (%rsi)
113 ; SSE-NEXT: movapd %xmm4, 16(%rdx)
114 ; SSE-NEXT: movapd %xmm1, (%rdx)
115 ; SSE-NEXT: movapd %xmm0, 16(%rcx)
116 ; SSE-NEXT: movapd %xmm3, (%rcx)
119 ; AVX1-ONLY-LABEL: load_i64_stride3_vf4:
120 ; AVX1-ONLY: # %bb.0:
121 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm0
122 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
123 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = mem[0,1],ymm0[2,3]
124 ; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm1, %ymm1
125 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2],ymm1[3]
126 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1],ymm0[0],ymm2[3],ymm0[2]
127 ; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm4
128 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3]
129 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
130 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
131 ; AVX1-ONLY-NEXT: vmovapd %ymm3, (%rsi)
132 ; AVX1-ONLY-NEXT: vmovapd %ymm2, (%rdx)
133 ; AVX1-ONLY-NEXT: vmovapd %ymm0, (%rcx)
134 ; AVX1-ONLY-NEXT: vzeroupper
135 ; AVX1-ONLY-NEXT: retq
137 ; AVX2-ONLY-LABEL: load_i64_stride3_vf4:
138 ; AVX2-ONLY: # %bb.0:
139 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm0
140 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm1
141 ; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm2
142 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[0,3,2,3]
143 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5,6,7]
144 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
145 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
146 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
147 ; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm1
148 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
149 ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
150 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
151 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
152 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
153 ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rsi)
154 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rdx)
155 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx)
156 ; AVX2-ONLY-NEXT: vzeroupper
157 ; AVX2-ONLY-NEXT: retq
159 ; AVX512-LABEL: load_i64_stride3_vf4:
161 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
162 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
163 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,3,6,9]
164 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
165 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [1,4,7,10]
166 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
167 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,5,8,11]
168 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4
169 ; AVX512-NEXT: vmovdqa %ymm2, (%rsi)
170 ; AVX512-NEXT: vmovdqa %ymm3, (%rdx)
171 ; AVX512-NEXT: vmovdqa %ymm4, (%rcx)
172 ; AVX512-NEXT: vzeroupper
174 %wide.vec = load <12 x i64>, ptr %in.vec, align 64
175 %strided.vec0 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
176 %strided.vec1 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
177 %strided.vec2 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
178 store <4 x i64> %strided.vec0, ptr %out.vec0, align 64
179 store <4 x i64> %strided.vec1, ptr %out.vec1, align 64
180 store <4 x i64> %strided.vec2, ptr %out.vec2, align 64
184 define void @load_i64_stride3_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
185 ; SSE-LABEL: load_i64_stride3_vf8:
187 ; SSE-NEXT: movapd 128(%rdi), %xmm2
188 ; SSE-NEXT: movapd 176(%rdi), %xmm1
189 ; SSE-NEXT: movapd 80(%rdi), %xmm0
190 ; SSE-NEXT: movapd 96(%rdi), %xmm3
191 ; SSE-NEXT: movapd 112(%rdi), %xmm8
192 ; SSE-NEXT: movapd 144(%rdi), %xmm5
193 ; SSE-NEXT: movapd 160(%rdi), %xmm9
194 ; SSE-NEXT: movapd (%rdi), %xmm6
195 ; SSE-NEXT: movapd 16(%rdi), %xmm10
196 ; SSE-NEXT: movapd 32(%rdi), %xmm4
197 ; SSE-NEXT: movapd 48(%rdi), %xmm7
198 ; SSE-NEXT: movapd 64(%rdi), %xmm11
199 ; SSE-NEXT: movapd %xmm11, %xmm12
200 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm7[0],xmm12[1]
201 ; SSE-NEXT: movapd %xmm9, %xmm13
202 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm5[0],xmm13[1]
203 ; SSE-NEXT: movapd %xmm8, %xmm14
204 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm3[0],xmm14[1]
205 ; SSE-NEXT: movapd %xmm10, %xmm15
206 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm6[0],xmm15[1]
207 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm0[0]
208 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm1[0]
209 ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm2[0]
210 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm4[0]
211 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm11[0],xmm0[1]
212 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm9[0],xmm1[1]
213 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm8[0],xmm2[1]
214 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm10[0],xmm4[1]
215 ; SSE-NEXT: movapd %xmm14, 32(%rsi)
216 ; SSE-NEXT: movapd %xmm13, 48(%rsi)
217 ; SSE-NEXT: movapd %xmm15, (%rsi)
218 ; SSE-NEXT: movapd %xmm12, 16(%rsi)
219 ; SSE-NEXT: movapd %xmm3, 32(%rdx)
220 ; SSE-NEXT: movapd %xmm5, 48(%rdx)
221 ; SSE-NEXT: movapd %xmm6, (%rdx)
222 ; SSE-NEXT: movapd %xmm7, 16(%rdx)
223 ; SSE-NEXT: movapd %xmm2, 32(%rcx)
224 ; SSE-NEXT: movapd %xmm1, 48(%rcx)
225 ; SSE-NEXT: movapd %xmm4, (%rcx)
226 ; SSE-NEXT: movapd %xmm0, 16(%rcx)
229 ; AVX1-ONLY-LABEL: load_i64_stride3_vf8:
230 ; AVX1-ONLY: # %bb.0:
231 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm0
232 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm1
233 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm2
234 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm1[2,3]
235 ; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm2, %ymm2
236 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm3[0],ymm2[1],ymm3[2],ymm2[3]
237 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm5
238 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm0[2,3]
239 ; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm5, %ymm5
240 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2],ymm5[3]
241 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[1],ymm1[0],ymm3[3],ymm1[2]
242 ; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm8
243 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3]
244 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[1],ymm0[0],ymm6[3],ymm0[2]
245 ; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm8
246 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3]
247 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
248 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3]
249 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
250 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2],ymm0[3]
251 ; AVX1-ONLY-NEXT: vmovapd %ymm7, 32(%rsi)
252 ; AVX1-ONLY-NEXT: vmovapd %ymm4, (%rsi)
253 ; AVX1-ONLY-NEXT: vmovapd %ymm6, 32(%rdx)
254 ; AVX1-ONLY-NEXT: vmovapd %ymm3, (%rdx)
255 ; AVX1-ONLY-NEXT: vmovapd %ymm0, 32(%rcx)
256 ; AVX1-ONLY-NEXT: vmovapd %ymm1, (%rcx)
257 ; AVX1-ONLY-NEXT: vzeroupper
258 ; AVX1-ONLY-NEXT: retq
260 ; AVX2-ONLY-LABEL: load_i64_stride3_vf8:
261 ; AVX2-ONLY: # %bb.0:
262 ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm0
263 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm1
264 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm2
265 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm3
266 ; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
267 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm5 = ymm3[0,3,2,3]
268 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm2[4,5,6,7]
269 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
270 ; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm5
271 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = ymm1[0,3,2,3]
272 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
273 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
274 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
275 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
276 ; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm3
277 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
278 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
279 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
280 ; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm1
281 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
282 ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
283 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
284 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3]
285 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
286 ; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm3
287 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2,3]
288 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
289 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
290 ; AVX2-ONLY-NEXT: vmovaps %ymm5, 32(%rsi)
291 ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rsi)
292 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rdx)
293 ; AVX2-ONLY-NEXT: vmovaps %ymm2, (%rdx)
294 ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rcx)
295 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx)
296 ; AVX2-ONLY-NEXT: vzeroupper
297 ; AVX2-ONLY-NEXT: retq
299 ; AVX512-LABEL: load_i64_stride3_vf8:
301 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
302 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
303 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
304 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,3,6,9,12,15,u,u>
305 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
306 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13]
307 ; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
308 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <1,4,7,10,13,u,u,u>
309 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
310 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14]
311 ; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
312 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,13,0,3,6,u,u,u>
313 ; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
314 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15]
315 ; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
316 ; AVX512-NEXT: vmovdqa64 %zmm4, (%rsi)
317 ; AVX512-NEXT: vmovdqa64 %zmm5, (%rdx)
318 ; AVX512-NEXT: vmovdqa64 %zmm0, (%rcx)
319 ; AVX512-NEXT: vzeroupper
321 %wide.vec = load <24 x i64>, ptr %in.vec, align 64
322 %strided.vec0 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
323 %strided.vec1 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
324 %strided.vec2 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
325 store <8 x i64> %strided.vec0, ptr %out.vec0, align 64
326 store <8 x i64> %strided.vec1, ptr %out.vec1, align 64
327 store <8 x i64> %strided.vec2, ptr %out.vec2, align 64
331 define void @load_i64_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
332 ; SSE-LABEL: load_i64_stride3_vf16:
334 ; SSE-NEXT: subq $24, %rsp
335 ; SSE-NEXT: movapd 128(%rdi), %xmm0
336 ; SSE-NEXT: movapd 176(%rdi), %xmm1
337 ; SSE-NEXT: movapd 224(%rdi), %xmm4
338 ; SSE-NEXT: movapd 272(%rdi), %xmm3
339 ; SSE-NEXT: movapd 80(%rdi), %xmm2
340 ; SSE-NEXT: movapd 96(%rdi), %xmm5
341 ; SSE-NEXT: movapd 112(%rdi), %xmm12
342 ; SSE-NEXT: movapd 144(%rdi), %xmm6
343 ; SSE-NEXT: movapd 160(%rdi), %xmm14
344 ; SSE-NEXT: movapd 192(%rdi), %xmm7
345 ; SSE-NEXT: movapd 208(%rdi), %xmm11
346 ; SSE-NEXT: movapd 240(%rdi), %xmm10
347 ; SSE-NEXT: movapd 256(%rdi), %xmm13
348 ; SSE-NEXT: movapd 48(%rdi), %xmm9
349 ; SSE-NEXT: movapd 64(%rdi), %xmm15
350 ; SSE-NEXT: movapd %xmm15, %xmm8
351 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm9[0],xmm8[1]
352 ; SSE-NEXT: movapd %xmm8, (%rsp) # 16-byte Spill
353 ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm2[0]
354 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm15[0],xmm2[1]
355 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
356 ; SSE-NEXT: movapd %xmm14, %xmm15
357 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm6[0],xmm15[1]
358 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm1[0]
359 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
360 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm14[0],xmm1[1]
361 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
362 ; SSE-NEXT: movapd %xmm12, %xmm14
363 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm5[0],xmm14[1]
364 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm0[0]
365 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
366 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm12[0],xmm0[1]
367 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
368 ; SSE-NEXT: movapd %xmm13, %xmm12
369 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm10[0],xmm12[1]
370 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm3[0]
371 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm13[0],xmm3[1]
372 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
373 ; SSE-NEXT: movapd %xmm11, %xmm8
374 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm7[0],xmm8[1]
375 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm4[0]
376 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
377 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm11[0],xmm4[1]
378 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
379 ; SSE-NEXT: movapd 336(%rdi), %xmm13
380 ; SSE-NEXT: movapd 352(%rdi), %xmm0
381 ; SSE-NEXT: movapd %xmm0, %xmm7
382 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm13[0],xmm7[1]
383 ; SSE-NEXT: movapd 368(%rdi), %xmm11
384 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm11[0]
385 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm0[0],xmm11[1]
386 ; SSE-NEXT: movapd 288(%rdi), %xmm0
387 ; SSE-NEXT: movapd 304(%rdi), %xmm2
388 ; SSE-NEXT: movapd %xmm2, %xmm1
389 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
390 ; SSE-NEXT: movapd 320(%rdi), %xmm6
391 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm6[0]
392 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
393 ; SSE-NEXT: movapd (%rdi), %xmm2
394 ; SSE-NEXT: movapd 16(%rdi), %xmm5
395 ; SSE-NEXT: movapd %xmm5, %xmm3
396 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
397 ; SSE-NEXT: movapd 32(%rdi), %xmm4
398 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm4[0]
399 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1]
400 ; SSE-NEXT: movapd %xmm1, 96(%rsi)
401 ; SSE-NEXT: movapd %xmm14, 32(%rsi)
402 ; SSE-NEXT: movapd %xmm7, 112(%rsi)
403 ; SSE-NEXT: movapd %xmm15, 48(%rsi)
404 ; SSE-NEXT: movapd %xmm8, 64(%rsi)
405 ; SSE-NEXT: movapd %xmm3, (%rsi)
406 ; SSE-NEXT: movapd %xmm12, 80(%rsi)
407 ; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
408 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
409 ; SSE-NEXT: movapd %xmm0, 96(%rdx)
410 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
411 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
412 ; SSE-NEXT: movapd %xmm13, 112(%rdx)
413 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
414 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
415 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
416 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
417 ; SSE-NEXT: movapd %xmm2, (%rdx)
418 ; SSE-NEXT: movapd %xmm10, 80(%rdx)
419 ; SSE-NEXT: movapd %xmm9, 16(%rdx)
420 ; SSE-NEXT: movapd %xmm6, 96(%rcx)
421 ; SSE-NEXT: movapd %xmm11, 112(%rcx)
422 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
423 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
424 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
425 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
426 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
427 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
428 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
429 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
430 ; SSE-NEXT: movapd %xmm4, (%rcx)
431 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
432 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
433 ; SSE-NEXT: addq $24, %rsp
436 ; AVX1-ONLY-LABEL: load_i64_stride3_vf16:
437 ; AVX1-ONLY: # %bb.0:
438 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm1
439 ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm0
440 ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm2
441 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm5
442 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = mem[0,1],ymm5[2,3]
443 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = mem[0,1],ymm2[2,3]
444 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm0[2,3]
445 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = mem[0,1],ymm1[2,3]
446 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm7[1],ymm5[0],ymm7[3],ymm5[2]
447 ; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm6
448 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3]
449 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm8[1],ymm2[0],ymm8[3],ymm2[2]
450 ; AVX1-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm10
451 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1,2],ymm10[3]
452 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm10 = ymm9[1],ymm1[0],ymm9[3],ymm1[2]
453 ; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm11
454 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3]
455 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm3[1],ymm0[0],ymm3[3],ymm0[2]
456 ; AVX1-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm12
457 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3]
458 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm12
459 ; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm12, %ymm12
460 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2],ymm12[3]
461 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],mem[2,3]
462 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm12[0],ymm5[1],ymm12[2],ymm5[3]
463 ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm12
464 ; AVX1-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm12, %ymm12
465 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2],ymm12[3]
466 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
467 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3]
468 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm12
469 ; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm12, %ymm12
470 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2],ymm12[3]
471 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
472 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm12[0],ymm1[1],ymm12[2],ymm1[3]
473 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm12
474 ; AVX1-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm12, %ymm12
475 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm12[1],ymm3[2],ymm12[3]
476 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
477 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
478 ; AVX1-ONLY-NEXT: vmovapd %ymm9, (%rsi)
479 ; AVX1-ONLY-NEXT: vmovapd %ymm3, 64(%rsi)
480 ; AVX1-ONLY-NEXT: vmovapd %ymm8, 96(%rsi)
481 ; AVX1-ONLY-NEXT: vmovapd %ymm7, 32(%rsi)
482 ; AVX1-ONLY-NEXT: vmovapd %ymm11, 64(%rdx)
483 ; AVX1-ONLY-NEXT: vmovapd %ymm10, (%rdx)
484 ; AVX1-ONLY-NEXT: vmovapd %ymm6, 96(%rdx)
485 ; AVX1-ONLY-NEXT: vmovapd %ymm4, 32(%rdx)
486 ; AVX1-ONLY-NEXT: vmovapd %ymm0, 64(%rcx)
487 ; AVX1-ONLY-NEXT: vmovapd %ymm1, (%rcx)
488 ; AVX1-ONLY-NEXT: vmovapd %ymm2, 96(%rcx)
489 ; AVX1-ONLY-NEXT: vmovapd %ymm5, 32(%rcx)
490 ; AVX1-ONLY-NEXT: vzeroupper
491 ; AVX1-ONLY-NEXT: retq
493 ; AVX2-ONLY-LABEL: load_i64_stride3_vf16:
494 ; AVX2-ONLY: # %bb.0:
495 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm8
496 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm9
497 ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm6
498 ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm7
499 ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm5
500 ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm10
501 ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm3
502 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm11
503 ; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
504 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm11[0,3,2,3]
505 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
506 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
507 ; AVX2-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm1
508 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm2 = ymm10[0,3,2,3]
509 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
510 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
511 ; AVX2-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm2
512 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = ymm7[0,3,2,3]
513 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
514 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
515 ; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm4
516 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm12 = ymm9[0,3,2,3]
517 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm8[4,5,6,7]
518 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2,3,4,5],ymm4[6,7]
519 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3],ymm3[4,5,6,7]
520 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
521 ; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm11
522 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm11[6,7]
523 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
524 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,3,0,1,6,7,4,5]
525 ; AVX2-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm10
526 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm10[6,7]
527 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
528 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
529 ; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm9
530 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
531 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
532 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
533 ; AVX2-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm7
534 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
535 ; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm7
536 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
537 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3]
538 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
539 ; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm9
540 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],mem[2,3]
541 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
542 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
543 ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm10
544 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
545 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm11 = mem[0,1,0,3]
546 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
547 ; AVX2-ONLY-NEXT: vmovaps 208(%rdi), %xmm11
548 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],mem[2,3]
549 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm12 = mem[0,1,0,3]
550 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
551 ; AVX2-ONLY-NEXT: vmovaps %ymm4, (%rsi)
552 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 64(%rsi)
553 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rsi)
554 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rsi)
555 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rdx)
556 ; AVX2-ONLY-NEXT: vmovaps %ymm8, (%rdx)
557 ; AVX2-ONLY-NEXT: vmovaps %ymm5, 96(%rdx)
558 ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%rdx)
559 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 64(%rcx)
560 ; AVX2-ONLY-NEXT: vmovaps %ymm10, (%rcx)
561 ; AVX2-ONLY-NEXT: vmovaps %ymm9, 96(%rcx)
562 ; AVX2-ONLY-NEXT: vmovaps %ymm7, 32(%rcx)
563 ; AVX2-ONLY-NEXT: vzeroupper
564 ; AVX2-ONLY-NEXT: retq
566 ; AVX512-LABEL: load_i64_stride3_vf16:
568 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm0
569 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm1
570 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm2
571 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
572 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm4
573 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm5
574 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,3,6,9,12,15,u,u>
575 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm7
576 ; AVX512-NEXT: vpermt2q %zmm1, %zmm6, %zmm7
577 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13]
578 ; AVX512-NEXT: vpermt2q %zmm0, %zmm8, %zmm7
579 ; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm6
580 ; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm6
581 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = <1,4,7,10,13,u,u,u>
582 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm9
583 ; AVX512-NEXT: vpermt2q %zmm1, %zmm8, %zmm9
584 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14]
585 ; AVX512-NEXT: vpermt2q %zmm0, %zmm10, %zmm9
586 ; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
587 ; AVX512-NEXT: vpermt2q %zmm4, %zmm10, %zmm8
588 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = <10,13,0,3,6,u,u,u>
589 ; AVX512-NEXT: vpermt2q %zmm5, %zmm10, %zmm1
590 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15]
591 ; AVX512-NEXT: vpermt2q %zmm0, %zmm5, %zmm1
592 ; AVX512-NEXT: vpermt2q %zmm2, %zmm10, %zmm3
593 ; AVX512-NEXT: vpermt2q %zmm4, %zmm5, %zmm3
594 ; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rsi)
595 ; AVX512-NEXT: vmovdqa64 %zmm6, (%rsi)
596 ; AVX512-NEXT: vmovdqa64 %zmm9, 64(%rdx)
597 ; AVX512-NEXT: vmovdqa64 %zmm8, (%rdx)
598 ; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rcx)
599 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rcx)
600 ; AVX512-NEXT: vzeroupper
602 %wide.vec = load <48 x i64>, ptr %in.vec, align 64
603 %strided.vec0 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
604 %strided.vec1 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46>
605 %strided.vec2 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47>
606 store <16 x i64> %strided.vec0, ptr %out.vec0, align 64
607 store <16 x i64> %strided.vec1, ptr %out.vec1, align 64
608 store <16 x i64> %strided.vec2, ptr %out.vec2, align 64
612 define void @load_i64_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
613 ; SSE-LABEL: load_i64_stride3_vf32:
615 ; SSE-NEXT: subq $408, %rsp # imm = 0x198
616 ; SSE-NEXT: movapd 224(%rdi), %xmm6
617 ; SSE-NEXT: movapd 272(%rdi), %xmm9
618 ; SSE-NEXT: movapd 128(%rdi), %xmm5
619 ; SSE-NEXT: movapd 176(%rdi), %xmm8
620 ; SSE-NEXT: movapd 80(%rdi), %xmm7
621 ; SSE-NEXT: movapd 96(%rdi), %xmm10
622 ; SSE-NEXT: movapd 112(%rdi), %xmm0
623 ; SSE-NEXT: movapd 144(%rdi), %xmm11
624 ; SSE-NEXT: movapd 160(%rdi), %xmm1
625 ; SSE-NEXT: movapd 192(%rdi), %xmm12
626 ; SSE-NEXT: movapd 208(%rdi), %xmm2
627 ; SSE-NEXT: movapd 240(%rdi), %xmm13
628 ; SSE-NEXT: movapd 256(%rdi), %xmm3
629 ; SSE-NEXT: movapd 48(%rdi), %xmm14
630 ; SSE-NEXT: movapd 64(%rdi), %xmm4
631 ; SSE-NEXT: movapd %xmm4, %xmm15
632 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm14[0],xmm15[1]
633 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
634 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm7[0]
635 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
636 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1]
637 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
638 ; SSE-NEXT: movapd %xmm0, %xmm4
639 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm10[0],xmm4[1]
640 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
641 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm5[0]
642 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
643 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
644 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
645 ; SSE-NEXT: movapd %xmm1, %xmm0
646 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm11[0],xmm0[1]
647 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
648 ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm8[0]
649 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
650 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
651 ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
652 ; SSE-NEXT: movapd %xmm2, %xmm0
653 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm12[0],xmm0[1]
654 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
655 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm6[0]
656 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
657 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
658 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
659 ; SSE-NEXT: movapd %xmm3, %xmm0
660 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm13[0],xmm0[1]
661 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
662 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm9[0]
663 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
664 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm3[0],xmm9[1]
665 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
666 ; SSE-NEXT: movapd 288(%rdi), %xmm2
667 ; SSE-NEXT: movapd 304(%rdi), %xmm0
668 ; SSE-NEXT: movapd %xmm0, %xmm1
669 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
670 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
671 ; SSE-NEXT: movapd 320(%rdi), %xmm1
672 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
673 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
674 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
675 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
676 ; SSE-NEXT: movapd 336(%rdi), %xmm2
677 ; SSE-NEXT: movapd 352(%rdi), %xmm0
678 ; SSE-NEXT: movapd %xmm0, %xmm1
679 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
680 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
681 ; SSE-NEXT: movapd 368(%rdi), %xmm1
682 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
683 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
684 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
685 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
686 ; SSE-NEXT: movapd 384(%rdi), %xmm2
687 ; SSE-NEXT: movapd 400(%rdi), %xmm0
688 ; SSE-NEXT: movapd %xmm0, %xmm1
689 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
690 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
691 ; SSE-NEXT: movapd 416(%rdi), %xmm1
692 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
693 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
694 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
695 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
696 ; SSE-NEXT: movapd 432(%rdi), %xmm2
697 ; SSE-NEXT: movapd 448(%rdi), %xmm0
698 ; SSE-NEXT: movapd %xmm0, %xmm1
699 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
700 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
701 ; SSE-NEXT: movapd 464(%rdi), %xmm1
702 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
703 ; SSE-NEXT: movapd %xmm2, (%rsp) # 16-byte Spill
704 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
705 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
706 ; SSE-NEXT: movapd 480(%rdi), %xmm2
707 ; SSE-NEXT: movapd 496(%rdi), %xmm0
708 ; SSE-NEXT: movapd %xmm0, %xmm1
709 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
710 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
711 ; SSE-NEXT: movapd 512(%rdi), %xmm1
712 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
713 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
714 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
715 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
716 ; SSE-NEXT: movapd 528(%rdi), %xmm15
717 ; SSE-NEXT: movapd 544(%rdi), %xmm0
718 ; SSE-NEXT: movapd %xmm0, %xmm1
719 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1]
720 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
721 ; SSE-NEXT: movapd 560(%rdi), %xmm1
722 ; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm1[0]
723 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
724 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
725 ; SSE-NEXT: movapd 576(%rdi), %xmm12
726 ; SSE-NEXT: movapd 592(%rdi), %xmm0
727 ; SSE-NEXT: movapd %xmm0, %xmm14
728 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm12[0],xmm14[1]
729 ; SSE-NEXT: movapd 608(%rdi), %xmm1
730 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm1[0]
731 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
732 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
733 ; SSE-NEXT: movapd 624(%rdi), %xmm8
734 ; SSE-NEXT: movapd 640(%rdi), %xmm0
735 ; SSE-NEXT: movapd %xmm0, %xmm11
736 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm8[0],xmm11[1]
737 ; SSE-NEXT: movapd 656(%rdi), %xmm13
738 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm13[0]
739 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm0[0],xmm13[1]
740 ; SSE-NEXT: movapd 672(%rdi), %xmm6
741 ; SSE-NEXT: movapd 688(%rdi), %xmm0
742 ; SSE-NEXT: movapd %xmm0, %xmm5
743 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
744 ; SSE-NEXT: movapd 704(%rdi), %xmm10
745 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm10[0]
746 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
747 ; SSE-NEXT: movapd 720(%rdi), %xmm4
748 ; SSE-NEXT: movapd 736(%rdi), %xmm2
749 ; SSE-NEXT: movapd %xmm2, %xmm3
750 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
751 ; SSE-NEXT: movapd 752(%rdi), %xmm7
752 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm7[0]
753 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm2[0],xmm7[1]
754 ; SSE-NEXT: movapd (%rdi), %xmm2
755 ; SSE-NEXT: movapd 16(%rdi), %xmm0
756 ; SSE-NEXT: movapd %xmm0, %xmm1
757 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
758 ; SSE-NEXT: movapd 32(%rdi), %xmm9
759 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm9[0]
760 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
761 ; SSE-NEXT: movapd %xmm5, 224(%rsi)
762 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
763 ; SSE-NEXT: movaps %xmm0, 160(%rsi)
764 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
765 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
766 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
767 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
768 ; SSE-NEXT: movapd %xmm3, 240(%rsi)
769 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
770 ; SSE-NEXT: movaps %xmm0, 176(%rsi)
771 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
772 ; SSE-NEXT: movaps %xmm0, 112(%rsi)
773 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
774 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
775 ; SSE-NEXT: movapd %xmm14, 192(%rsi)
776 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
777 ; SSE-NEXT: movaps %xmm0, 128(%rsi)
778 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
779 ; SSE-NEXT: movaps %xmm0, 64(%rsi)
780 ; SSE-NEXT: movapd %xmm1, (%rsi)
781 ; SSE-NEXT: movapd %xmm11, 208(%rsi)
782 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
783 ; SSE-NEXT: movaps %xmm0, 144(%rsi)
784 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
785 ; SSE-NEXT: movaps %xmm0, 80(%rsi)
786 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
787 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
788 ; SSE-NEXT: movapd %xmm6, 224(%rdx)
789 ; SSE-NEXT: movapd %xmm4, 240(%rdx)
790 ; SSE-NEXT: movapd %xmm12, 192(%rdx)
791 ; SSE-NEXT: movapd %xmm8, 208(%rdx)
792 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
793 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
794 ; SSE-NEXT: movapd %xmm15, 176(%rdx)
795 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
796 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
797 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
798 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
799 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
800 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
801 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
802 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
803 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
804 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
805 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
806 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
807 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
808 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
809 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
810 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
811 ; SSE-NEXT: movapd %xmm2, (%rdx)
812 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
813 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
814 ; SSE-NEXT: movapd %xmm7, 240(%rcx)
815 ; SSE-NEXT: movapd %xmm10, 224(%rcx)
816 ; SSE-NEXT: movapd %xmm13, 208(%rcx)
817 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
818 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
819 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
820 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
821 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
822 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
823 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
824 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
825 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
826 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
827 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
828 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
829 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
830 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
831 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
832 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
833 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
834 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
835 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
836 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
837 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
838 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
839 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
840 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
841 ; SSE-NEXT: movapd %xmm9, (%rcx)
842 ; SSE-NEXT: addq $408, %rsp # imm = 0x198
845 ; AVX1-ONLY-LABEL: load_i64_stride3_vf32:
846 ; AVX1-ONLY: # %bb.0:
847 ; AVX1-ONLY-NEXT: subq $232, %rsp
848 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm7
849 ; AVX1-ONLY-NEXT: vmovapd 704(%rdi), %ymm13
850 ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %ymm9
851 ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm8
852 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm10
853 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = mem[0,1],ymm10[2,3]
854 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = mem[0,1],ymm8[2,3]
855 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm9[2,3]
856 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
857 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = mem[0,1],ymm13[2,3]
858 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
859 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm15 = mem[0,1],ymm7[2,3]
860 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm12[1],ymm10[0],ymm12[3],ymm10[2]
861 ; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm1
862 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
863 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
864 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm11[1],ymm8[0],ymm11[3],ymm8[2]
865 ; AVX1-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm1
866 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
867 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
868 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm3[1],ymm9[0],ymm3[3],ymm9[2]
869 ; AVX1-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm1
870 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
871 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
872 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[1],ymm13[0],ymm2[3],ymm13[2]
873 ; AVX1-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm1
874 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
875 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
876 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm15[1],ymm7[0],ymm15[3],ymm7[2]
877 ; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm1
878 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
879 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
880 ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm3
881 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = mem[0,1],ymm3[2,3]
882 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm4[1],ymm3[0],ymm4[3],ymm3[2]
883 ; AVX1-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm2
884 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3]
885 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
886 ; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm1
887 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm1[2,3]
888 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm6[1],ymm1[0],ymm6[3],ymm1[2]
889 ; AVX1-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm5
890 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1,2],ymm5[3]
891 ; AVX1-ONLY-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
892 ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm2
893 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = mem[0,1],ymm2[2,3]
894 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm5[1],ymm2[0],ymm5[3],ymm2[2]
895 ; AVX1-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm14
896 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3]
897 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
898 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm0
899 ; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
900 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
901 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1],mem[2,3]
902 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm14 = ymm0[0],ymm10[1],ymm0[2],ymm10[3]
903 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm0
904 ; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
905 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm15 = ymm15[0],ymm0[1],ymm15[2],ymm0[3]
906 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],mem[2,3]
907 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2],ymm7[3]
908 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
909 ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm0
910 ; AVX1-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
911 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0],ymm0[1],ymm11[2],ymm0[3]
912 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm8[0,1],mem[2,3]
913 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm0[0],ymm7[1],ymm0[2],ymm7[3]
914 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm0
915 ; AVX1-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm0
916 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm0[1],ymm4[2],ymm0[3]
917 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3]
918 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3]
919 ; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm3
920 ; AVX1-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm3, %ymm3
921 ; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload
922 ; AVX1-ONLY-NEXT: # ymm7 = mem[0],ymm3[1],mem[2],ymm3[3]
923 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm9[0,1],mem[2,3]
924 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2],ymm8[3]
925 ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm8
926 ; AVX1-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm8, %ymm8
927 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2],ymm8[3]
928 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
929 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm8[0],ymm1[1],ymm8[2],ymm1[3]
930 ; AVX1-ONLY-NEXT: vmovaps 688(%rdi), %xmm8
931 ; AVX1-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm8, %ymm8
932 ; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm9 # 32-byte Folded Reload
933 ; AVX1-ONLY-NEXT: # ymm9 = mem[0],ymm8[1],mem[2],ymm8[3]
934 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm13 = ymm13[0,1],mem[2,3]
935 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0],ymm13[1],ymm8[2],ymm13[3]
936 ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm13
937 ; AVX1-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm13, %ymm13
938 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0],ymm13[1],ymm5[2],ymm13[3]
939 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
940 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm13[0],ymm2[1],ymm13[2],ymm2[3]
941 ; AVX1-ONLY-NEXT: vmovapd %ymm5, 192(%rsi)
942 ; AVX1-ONLY-NEXT: vmovapd %ymm6, 128(%rsi)
943 ; AVX1-ONLY-NEXT: vmovapd %ymm4, 64(%rsi)
944 ; AVX1-ONLY-NEXT: vmovapd %ymm15, (%rsi)
945 ; AVX1-ONLY-NEXT: vmovapd %ymm9, 224(%rsi)
946 ; AVX1-ONLY-NEXT: vmovapd %ymm7, 160(%rsi)
947 ; AVX1-ONLY-NEXT: vmovapd %ymm11, 96(%rsi)
948 ; AVX1-ONLY-NEXT: vmovapd %ymm12, 32(%rsi)
949 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
950 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rdx)
951 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
952 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rdx)
953 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
954 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
955 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
956 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rdx)
957 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
958 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rdx)
959 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
960 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rdx)
961 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
962 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rdx)
963 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
964 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rdx)
965 ; AVX1-ONLY-NEXT: vmovapd %ymm2, 192(%rcx)
966 ; AVX1-ONLY-NEXT: vmovapd %ymm8, 224(%rcx)
967 ; AVX1-ONLY-NEXT: vmovapd %ymm1, 128(%rcx)
968 ; AVX1-ONLY-NEXT: vmovapd %ymm3, 160(%rcx)
969 ; AVX1-ONLY-NEXT: vmovapd %ymm0, 64(%rcx)
970 ; AVX1-ONLY-NEXT: vmovapd %ymm10, 96(%rcx)
971 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
972 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rcx)
973 ; AVX1-ONLY-NEXT: vmovapd %ymm14, 32(%rcx)
974 ; AVX1-ONLY-NEXT: addq $232, %rsp
975 ; AVX1-ONLY-NEXT: vzeroupper
976 ; AVX1-ONLY-NEXT: retq
978 ; AVX2-ONLY-LABEL: load_i64_stride3_vf32:
979 ; AVX2-ONLY: # %bb.0:
980 ; AVX2-ONLY-NEXT: subq $232, %rsp
981 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm2
982 ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
983 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm3
984 ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
985 ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm11
986 ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm14
987 ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm10
988 ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm15
989 ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm9
990 ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm8
991 ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm7
992 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm6
993 ; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
994 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm6[0,3,2,3]
995 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
996 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
997 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
998 ; AVX2-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
999 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,3,2,3]
1000 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
1001 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1002 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1003 ; AVX2-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
1004 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm15[0,3,2,3]
1005 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
1006 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1007 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1008 ; AVX2-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
1009 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm14[0,3,2,3]
1010 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
1011 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1012 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1013 ; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
1014 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
1015 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
1016 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1017 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1018 ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm5
1019 ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm4
1020 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
1021 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
1022 ; AVX2-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
1023 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1024 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1025 ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm3
1026 ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm2
1027 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
1028 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
1029 ; AVX2-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
1030 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1031 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1032 ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm1
1033 ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm0
1034 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm12 = ymm0[0,3,2,3]
1035 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm1[4,5,6,7]
1036 ; AVX2-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm13
1037 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
1038 ; AVX2-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1039 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
1040 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
1041 ; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm7
1042 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
1043 ; AVX2-ONLY-NEXT: vmovups %ymm6, (%rsp) # 32-byte Spill
1044 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
1045 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1046 ; AVX2-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm8
1047 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3,4,5],ymm8[6,7]
1048 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm15[2,3],ymm10[4,5,6,7]
1049 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1050 ; AVX2-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm9
1051 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5],ymm9[6,7]
1052 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1],ymm14[2,3],ymm11[4,5,6,7]
1053 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1054 ; AVX2-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm10
1055 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm7[0,1,2,3,4,5],ymm10[6,7]
1056 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1057 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
1058 ; AVX2-ONLY-NEXT: # ymm7 = ymm6[0,1],mem[2,3],ymm6[4,5,6,7]
1059 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,3,0,1,6,7,4,5]
1060 ; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm10
1061 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3,4,5],ymm10[6,7]
1062 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
1063 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
1064 ; AVX2-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm5
1065 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm4[0,1,2,3,4,5],ymm5[6,7]
1066 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
1067 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
1068 ; AVX2-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm3
1069 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3,4,5],ymm3[6,7]
1070 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
1071 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
1072 ; AVX2-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm1
1073 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1074 ; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm0
1075 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1076 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
1077 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1078 ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm0
1079 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1080 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
1081 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1082 ; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm0
1083 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1084 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
1085 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
1086 ; AVX2-ONLY-NEXT: vmovaps 208(%rdi), %xmm0
1087 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
1088 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm4 = mem[0,1,0,3]
1089 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
1090 ; AVX2-ONLY-NEXT: vmovaps 496(%rdi), %xmm4
1091 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
1092 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm5 = mem[0,1,0,3]
1093 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
1094 ; AVX2-ONLY-NEXT: vmovaps 400(%rdi), %xmm5
1095 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
1096 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm7 = mem[0,1,0,3]
1097 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
1098 ; AVX2-ONLY-NEXT: vmovaps 688(%rdi), %xmm7
1099 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3]
1100 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm10 = mem[0,1,0,3]
1101 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm10[4,5,6,7]
1102 ; AVX2-ONLY-NEXT: vmovaps 592(%rdi), %xmm10
1103 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],mem[2,3]
1104 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3]
1105 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
1106 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1107 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 192(%rsi)
1108 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1109 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 128(%rsi)
1110 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1111 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 64(%rsi)
1112 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1113 ; AVX2-ONLY-NEXT: vmovaps %ymm10, (%rsi)
1114 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1115 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 224(%rsi)
1116 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1117 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 160(%rsi)
1118 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1119 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 96(%rsi)
1120 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
1121 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 32(%rsi)
1122 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 192(%rdx)
1123 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 128(%rdx)
1124 ; AVX2-ONLY-NEXT: vmovaps %ymm13, 64(%rdx)
1125 ; AVX2-ONLY-NEXT: vmovaps %ymm12, (%rdx)
1126 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 224(%rdx)
1127 ; AVX2-ONLY-NEXT: vmovaps %ymm9, 160(%rdx)
1128 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 96(%rdx)
1129 ; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
1130 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 32(%rdx)
1131 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 192(%rcx)
1132 ; AVX2-ONLY-NEXT: vmovaps %ymm7, 224(%rcx)
1133 ; AVX2-ONLY-NEXT: vmovaps %ymm5, 128(%rcx)
1134 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 160(%rcx)
1135 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 64(%rcx)
1136 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rcx)
1137 ; AVX2-ONLY-NEXT: vmovaps %ymm3, (%rcx)
1138 ; AVX2-ONLY-NEXT: vmovaps %ymm14, 32(%rcx)
1139 ; AVX2-ONLY-NEXT: addq $232, %rsp
1140 ; AVX2-ONLY-NEXT: vzeroupper
1141 ; AVX2-ONLY-NEXT: retq
1143 ; AVX512-LABEL: load_i64_stride3_vf32:
1145 ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm4
1146 ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm0
1147 ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm5
1148 ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm6
1149 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm2
1150 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm7
1151 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm8
1152 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm1
1153 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm9
1154 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm3
1155 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm10
1156 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm11
1157 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <0,3,6,9,12,15,u,u>
1158 ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm13
1159 ; AVX512-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
1160 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,2,3,4,5,10,13]
1161 ; AVX512-NEXT: vpermt2q %zmm8, %zmm14, %zmm13
1162 ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm15
1163 ; AVX512-NEXT: vpermt2q %zmm2, %zmm12, %zmm15
1164 ; AVX512-NEXT: vpermt2q %zmm6, %zmm14, %zmm15
1165 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm16
1166 ; AVX512-NEXT: vpermt2q %zmm0, %zmm12, %zmm16
1167 ; AVX512-NEXT: vpermt2q %zmm4, %zmm14, %zmm16
1168 ; AVX512-NEXT: vpermi2q %zmm3, %zmm9, %zmm12
1169 ; AVX512-NEXT: vpermt2q %zmm10, %zmm14, %zmm12
1170 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = <1,4,7,10,13,u,u,u>
1171 ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm17
1172 ; AVX512-NEXT: vpermt2q %zmm2, %zmm14, %zmm17
1173 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = [0,1,2,3,4,8,11,14]
1174 ; AVX512-NEXT: vpermt2q %zmm6, %zmm18, %zmm17
1175 ; AVX512-NEXT: vmovdqa64 %zmm11, %zmm19
1176 ; AVX512-NEXT: vpermt2q %zmm1, %zmm14, %zmm19
1177 ; AVX512-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
1178 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm20
1179 ; AVX512-NEXT: vpermt2q %zmm0, %zmm14, %zmm20
1180 ; AVX512-NEXT: vpermt2q %zmm4, %zmm18, %zmm20
1181 ; AVX512-NEXT: vpermi2q %zmm3, %zmm9, %zmm14
1182 ; AVX512-NEXT: vpermt2q %zmm10, %zmm18, %zmm14
1183 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm18 = <10,13,0,3,6,u,u,u>
1184 ; AVX512-NEXT: vpermt2q %zmm11, %zmm18, %zmm1
1185 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,2,3,4,9,12,15]
1186 ; AVX512-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
1187 ; AVX512-NEXT: vpermt2q %zmm5, %zmm18, %zmm0
1188 ; AVX512-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
1189 ; AVX512-NEXT: vpermt2q %zmm7, %zmm18, %zmm2
1190 ; AVX512-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
1191 ; AVX512-NEXT: vpermt2q %zmm9, %zmm18, %zmm3
1192 ; AVX512-NEXT: vpermt2q %zmm10, %zmm11, %zmm3
1193 ; AVX512-NEXT: vmovdqa64 %zmm16, 192(%rsi)
1194 ; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rsi)
1195 ; AVX512-NEXT: vmovdqa64 %zmm13, 64(%rsi)
1196 ; AVX512-NEXT: vmovdqa64 %zmm12, (%rsi)
1197 ; AVX512-NEXT: vmovdqa64 %zmm20, 192(%rdx)
1198 ; AVX512-NEXT: vmovdqa64 %zmm14, (%rdx)
1199 ; AVX512-NEXT: vmovdqa64 %zmm19, 64(%rdx)
1200 ; AVX512-NEXT: vmovdqa64 %zmm17, 128(%rdx)
1201 ; AVX512-NEXT: vmovdqa64 %zmm2, 128(%rcx)
1202 ; AVX512-NEXT: vmovdqa64 %zmm0, 192(%rcx)
1203 ; AVX512-NEXT: vmovdqa64 %zmm3, (%rcx)
1204 ; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rcx)
1205 ; AVX512-NEXT: vzeroupper
1207 %wide.vec = load <96 x i64>, ptr %in.vec, align 64
1208 %strided.vec0 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93>
1209 %strided.vec1 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94>
1210 %strided.vec2 = shufflevector <96 x i64> %wide.vec, <96 x i64> poison, <32 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95>
1211 store <32 x i64> %strided.vec0, ptr %out.vec0, align 64
1212 store <32 x i64> %strided.vec1, ptr %out.vec1, align 64
1213 store <32 x i64> %strided.vec2, ptr %out.vec2, align 64
1217 define void @load_i64_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2) nounwind {
1218 ; SSE-LABEL: load_i64_stride3_vf64:
1220 ; SSE-NEXT: subq $1176, %rsp # imm = 0x498
1221 ; SSE-NEXT: movapd 272(%rdi), %xmm9
1222 ; SSE-NEXT: movapd 224(%rdi), %xmm8
1223 ; SSE-NEXT: movapd 176(%rdi), %xmm7
1224 ; SSE-NEXT: movapd 128(%rdi), %xmm6
1225 ; SSE-NEXT: movapd 80(%rdi), %xmm5
1226 ; SSE-NEXT: movapd 240(%rdi), %xmm10
1227 ; SSE-NEXT: movapd 256(%rdi), %xmm0
1228 ; SSE-NEXT: movapd 192(%rdi), %xmm11
1229 ; SSE-NEXT: movapd 208(%rdi), %xmm1
1230 ; SSE-NEXT: movapd 144(%rdi), %xmm12
1231 ; SSE-NEXT: movapd 160(%rdi), %xmm2
1232 ; SSE-NEXT: movapd 96(%rdi), %xmm13
1233 ; SSE-NEXT: movapd 112(%rdi), %xmm3
1234 ; SSE-NEXT: movapd 48(%rdi), %xmm14
1235 ; SSE-NEXT: movapd 64(%rdi), %xmm4
1236 ; SSE-NEXT: movapd %xmm4, %xmm15
1237 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm14[0],xmm15[1]
1238 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1239 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm5[0]
1240 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1241 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
1242 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1243 ; SSE-NEXT: movapd %xmm3, %xmm4
1244 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm13[0],xmm4[1]
1245 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1246 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm6[0]
1247 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1248 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm3[0],xmm6[1]
1249 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1250 ; SSE-NEXT: movapd %xmm2, %xmm3
1251 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm12[0],xmm3[1]
1252 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1253 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm7[0]
1254 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1255 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm2[0],xmm7[1]
1256 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1257 ; SSE-NEXT: movapd %xmm1, %xmm2
1258 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm11[0],xmm2[1]
1259 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1260 ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm8[0]
1261 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1262 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm1[0],xmm8[1]
1263 ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1264 ; SSE-NEXT: movapd %xmm0, %xmm1
1265 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm10[0],xmm1[1]
1266 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1267 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm9[0]
1268 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1269 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm0[0],xmm9[1]
1270 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1271 ; SSE-NEXT: movapd 288(%rdi), %xmm2
1272 ; SSE-NEXT: movapd 304(%rdi), %xmm0
1273 ; SSE-NEXT: movapd %xmm0, %xmm1
1274 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1275 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1276 ; SSE-NEXT: movapd 320(%rdi), %xmm1
1277 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1278 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1279 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1280 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1281 ; SSE-NEXT: movapd 336(%rdi), %xmm2
1282 ; SSE-NEXT: movapd 352(%rdi), %xmm0
1283 ; SSE-NEXT: movapd %xmm0, %xmm1
1284 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1285 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1286 ; SSE-NEXT: movapd 368(%rdi), %xmm1
1287 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1288 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1289 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1290 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1291 ; SSE-NEXT: movapd 384(%rdi), %xmm2
1292 ; SSE-NEXT: movapd 400(%rdi), %xmm0
1293 ; SSE-NEXT: movapd %xmm0, %xmm1
1294 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1295 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1296 ; SSE-NEXT: movapd 416(%rdi), %xmm1
1297 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1298 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1299 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1300 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1301 ; SSE-NEXT: movapd 432(%rdi), %xmm2
1302 ; SSE-NEXT: movapd 448(%rdi), %xmm0
1303 ; SSE-NEXT: movapd %xmm0, %xmm1
1304 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1305 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1306 ; SSE-NEXT: movapd 464(%rdi), %xmm1
1307 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1308 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1309 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1310 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1311 ; SSE-NEXT: movapd 480(%rdi), %xmm2
1312 ; SSE-NEXT: movapd 496(%rdi), %xmm0
1313 ; SSE-NEXT: movapd %xmm0, %xmm1
1314 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1315 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1316 ; SSE-NEXT: movapd 512(%rdi), %xmm1
1317 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1318 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1319 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1320 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1321 ; SSE-NEXT: movapd 528(%rdi), %xmm2
1322 ; SSE-NEXT: movapd 544(%rdi), %xmm0
1323 ; SSE-NEXT: movapd %xmm0, %xmm1
1324 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1325 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1326 ; SSE-NEXT: movapd 560(%rdi), %xmm1
1327 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1328 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1329 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1330 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1331 ; SSE-NEXT: movapd 576(%rdi), %xmm2
1332 ; SSE-NEXT: movapd 592(%rdi), %xmm0
1333 ; SSE-NEXT: movapd %xmm0, %xmm1
1334 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1335 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1336 ; SSE-NEXT: movapd 608(%rdi), %xmm1
1337 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1338 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1339 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1340 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1341 ; SSE-NEXT: movapd 624(%rdi), %xmm2
1342 ; SSE-NEXT: movapd 640(%rdi), %xmm0
1343 ; SSE-NEXT: movapd %xmm0, %xmm1
1344 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1345 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1346 ; SSE-NEXT: movapd 656(%rdi), %xmm1
1347 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1348 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1349 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1350 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1351 ; SSE-NEXT: movapd 672(%rdi), %xmm2
1352 ; SSE-NEXT: movapd 688(%rdi), %xmm0
1353 ; SSE-NEXT: movapd %xmm0, %xmm1
1354 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1355 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1356 ; SSE-NEXT: movapd 704(%rdi), %xmm1
1357 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1358 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1359 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1360 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1361 ; SSE-NEXT: movapd 720(%rdi), %xmm2
1362 ; SSE-NEXT: movapd 736(%rdi), %xmm0
1363 ; SSE-NEXT: movapd %xmm0, %xmm1
1364 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1365 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1366 ; SSE-NEXT: movapd 752(%rdi), %xmm1
1367 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1368 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1369 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1370 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1371 ; SSE-NEXT: movapd 768(%rdi), %xmm2
1372 ; SSE-NEXT: movapd 784(%rdi), %xmm0
1373 ; SSE-NEXT: movapd %xmm0, %xmm1
1374 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1375 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1376 ; SSE-NEXT: movapd 800(%rdi), %xmm1
1377 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1378 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1379 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1380 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1381 ; SSE-NEXT: movapd 816(%rdi), %xmm2
1382 ; SSE-NEXT: movapd 832(%rdi), %xmm0
1383 ; SSE-NEXT: movapd %xmm0, %xmm1
1384 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1385 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1386 ; SSE-NEXT: movapd 848(%rdi), %xmm1
1387 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1388 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1389 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1390 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1391 ; SSE-NEXT: movapd 864(%rdi), %xmm2
1392 ; SSE-NEXT: movapd 880(%rdi), %xmm0
1393 ; SSE-NEXT: movapd %xmm0, %xmm1
1394 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1395 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1396 ; SSE-NEXT: movapd 896(%rdi), %xmm1
1397 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1398 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1399 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1400 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1401 ; SSE-NEXT: movapd 912(%rdi), %xmm2
1402 ; SSE-NEXT: movapd 928(%rdi), %xmm0
1403 ; SSE-NEXT: movapd %xmm0, %xmm1
1404 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1405 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1406 ; SSE-NEXT: movapd 944(%rdi), %xmm1
1407 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1408 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1409 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1410 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1411 ; SSE-NEXT: movapd 960(%rdi), %xmm2
1412 ; SSE-NEXT: movapd 976(%rdi), %xmm0
1413 ; SSE-NEXT: movapd %xmm0, %xmm1
1414 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1415 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1416 ; SSE-NEXT: movapd 992(%rdi), %xmm1
1417 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1418 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1419 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1420 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1421 ; SSE-NEXT: movapd 1008(%rdi), %xmm2
1422 ; SSE-NEXT: movapd 1024(%rdi), %xmm0
1423 ; SSE-NEXT: movapd %xmm0, %xmm1
1424 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1425 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1426 ; SSE-NEXT: movapd 1040(%rdi), %xmm1
1427 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1428 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1429 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1430 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1431 ; SSE-NEXT: movapd 1056(%rdi), %xmm2
1432 ; SSE-NEXT: movapd 1072(%rdi), %xmm0
1433 ; SSE-NEXT: movapd %xmm0, %xmm1
1434 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1435 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1436 ; SSE-NEXT: movapd 1088(%rdi), %xmm1
1437 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1438 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1439 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1440 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1441 ; SSE-NEXT: movapd 1104(%rdi), %xmm2
1442 ; SSE-NEXT: movapd 1120(%rdi), %xmm0
1443 ; SSE-NEXT: movapd %xmm0, %xmm1
1444 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1445 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1446 ; SSE-NEXT: movapd 1136(%rdi), %xmm1
1447 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1448 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1449 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1450 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1451 ; SSE-NEXT: movapd 1152(%rdi), %xmm2
1452 ; SSE-NEXT: movapd 1168(%rdi), %xmm0
1453 ; SSE-NEXT: movapd %xmm0, %xmm1
1454 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1455 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1456 ; SSE-NEXT: movapd 1184(%rdi), %xmm1
1457 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1458 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1459 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1460 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1461 ; SSE-NEXT: movapd 1200(%rdi), %xmm2
1462 ; SSE-NEXT: movapd 1216(%rdi), %xmm0
1463 ; SSE-NEXT: movapd %xmm0, %xmm1
1464 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1465 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1466 ; SSE-NEXT: movapd 1232(%rdi), %xmm1
1467 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1468 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1469 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1470 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1471 ; SSE-NEXT: movapd 1248(%rdi), %xmm2
1472 ; SSE-NEXT: movapd 1264(%rdi), %xmm0
1473 ; SSE-NEXT: movapd %xmm0, %xmm14
1474 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm2[0],xmm14[1]
1475 ; SSE-NEXT: movapd 1280(%rdi), %xmm1
1476 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1477 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1478 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1479 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1480 ; SSE-NEXT: movapd 1296(%rdi), %xmm15
1481 ; SSE-NEXT: movapd 1312(%rdi), %xmm0
1482 ; SSE-NEXT: movapd %xmm0, %xmm11
1483 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm15[0],xmm11[1]
1484 ; SSE-NEXT: movapd 1328(%rdi), %xmm1
1485 ; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm1[0]
1486 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1487 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1488 ; SSE-NEXT: movapd 1344(%rdi), %xmm12
1489 ; SSE-NEXT: movapd 1360(%rdi), %xmm0
1490 ; SSE-NEXT: movapd %xmm0, %xmm7
1491 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm12[0],xmm7[1]
1492 ; SSE-NEXT: movapd 1376(%rdi), %xmm1
1493 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm1[0]
1494 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1495 ; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill
1496 ; SSE-NEXT: movapd 1392(%rdi), %xmm10
1497 ; SSE-NEXT: movapd 1408(%rdi), %xmm0
1498 ; SSE-NEXT: movapd %xmm0, %xmm2
1499 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm10[0],xmm2[1]
1500 ; SSE-NEXT: movapd 1424(%rdi), %xmm1
1501 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm1[0]
1502 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1503 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1504 ; SSE-NEXT: movapd 1440(%rdi), %xmm9
1505 ; SSE-NEXT: movapd 1456(%rdi), %xmm0
1506 ; SSE-NEXT: movapd %xmm0, %xmm1
1507 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm9[0],xmm1[1]
1508 ; SSE-NEXT: movapd 1472(%rdi), %xmm3
1509 ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm3[0]
1510 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
1511 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1512 ; SSE-NEXT: movapd 1488(%rdi), %xmm0
1513 ; SSE-NEXT: movapd 1504(%rdi), %xmm8
1514 ; SSE-NEXT: movapd %xmm8, %xmm3
1515 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
1516 ; SSE-NEXT: movapd 1520(%rdi), %xmm13
1517 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm13[0]
1518 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm8[0],xmm13[1]
1519 ; SSE-NEXT: movapd (%rdi), %xmm8
1520 ; SSE-NEXT: movapd 16(%rdi), %xmm5
1521 ; SSE-NEXT: movapd %xmm5, %xmm6
1522 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm8[0],xmm6[1]
1523 ; SSE-NEXT: movapd 32(%rdi), %xmm4
1524 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm4[0]
1525 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1]
1526 ; SSE-NEXT: movapd %xmm3, 496(%rsi)
1527 ; SSE-NEXT: movapd %xmm1, 480(%rsi)
1528 ; SSE-NEXT: movapd %xmm2, 464(%rsi)
1529 ; SSE-NEXT: movapd %xmm7, 448(%rsi)
1530 ; SSE-NEXT: movapd %xmm11, 432(%rsi)
1531 ; SSE-NEXT: movapd %xmm14, 416(%rsi)
1532 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1533 ; SSE-NEXT: movaps %xmm1, 400(%rsi)
1534 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1535 ; SSE-NEXT: movaps %xmm1, 384(%rsi)
1536 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1537 ; SSE-NEXT: movaps %xmm1, 368(%rsi)
1538 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1539 ; SSE-NEXT: movaps %xmm1, 352(%rsi)
1540 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1541 ; SSE-NEXT: movaps %xmm1, 336(%rsi)
1542 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1543 ; SSE-NEXT: movaps %xmm1, 320(%rsi)
1544 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1545 ; SSE-NEXT: movaps %xmm1, 304(%rsi)
1546 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1547 ; SSE-NEXT: movaps %xmm1, 288(%rsi)
1548 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1549 ; SSE-NEXT: movaps %xmm1, 272(%rsi)
1550 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1551 ; SSE-NEXT: movaps %xmm1, 256(%rsi)
1552 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1553 ; SSE-NEXT: movaps %xmm1, 240(%rsi)
1554 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1555 ; SSE-NEXT: movaps %xmm1, 224(%rsi)
1556 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1557 ; SSE-NEXT: movaps %xmm1, 208(%rsi)
1558 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1559 ; SSE-NEXT: movaps %xmm1, 192(%rsi)
1560 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1561 ; SSE-NEXT: movaps %xmm1, 176(%rsi)
1562 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1563 ; SSE-NEXT: movaps %xmm1, 160(%rsi)
1564 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1565 ; SSE-NEXT: movaps %xmm1, 144(%rsi)
1566 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1567 ; SSE-NEXT: movaps %xmm1, 128(%rsi)
1568 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1569 ; SSE-NEXT: movaps %xmm1, 112(%rsi)
1570 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1571 ; SSE-NEXT: movaps %xmm1, 96(%rsi)
1572 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1573 ; SSE-NEXT: movaps %xmm1, 80(%rsi)
1574 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1575 ; SSE-NEXT: movaps %xmm1, 64(%rsi)
1576 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1577 ; SSE-NEXT: movaps %xmm1, 48(%rsi)
1578 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1579 ; SSE-NEXT: movaps %xmm1, 32(%rsi)
1580 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1581 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
1582 ; SSE-NEXT: movapd %xmm6, (%rsi)
1583 ; SSE-NEXT: movapd %xmm0, 496(%rdx)
1584 ; SSE-NEXT: movapd %xmm9, 480(%rdx)
1585 ; SSE-NEXT: movapd %xmm10, 464(%rdx)
1586 ; SSE-NEXT: movapd %xmm12, 448(%rdx)
1587 ; SSE-NEXT: movapd %xmm15, 432(%rdx)
1588 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1589 ; SSE-NEXT: movaps %xmm0, 416(%rdx)
1590 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1591 ; SSE-NEXT: movaps %xmm0, 400(%rdx)
1592 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1593 ; SSE-NEXT: movaps %xmm0, 384(%rdx)
1594 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1595 ; SSE-NEXT: movaps %xmm0, 368(%rdx)
1596 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1597 ; SSE-NEXT: movaps %xmm0, 352(%rdx)
1598 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1599 ; SSE-NEXT: movaps %xmm0, 336(%rdx)
1600 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1601 ; SSE-NEXT: movaps %xmm0, 320(%rdx)
1602 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1603 ; SSE-NEXT: movaps %xmm0, 304(%rdx)
1604 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1605 ; SSE-NEXT: movaps %xmm0, 288(%rdx)
1606 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1607 ; SSE-NEXT: movaps %xmm0, 272(%rdx)
1608 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1609 ; SSE-NEXT: movaps %xmm0, 256(%rdx)
1610 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1611 ; SSE-NEXT: movaps %xmm0, 240(%rdx)
1612 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1613 ; SSE-NEXT: movaps %xmm0, 224(%rdx)
1614 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1615 ; SSE-NEXT: movaps %xmm0, 208(%rdx)
1616 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1617 ; SSE-NEXT: movaps %xmm0, 192(%rdx)
1618 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1619 ; SSE-NEXT: movaps %xmm0, 176(%rdx)
1620 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1621 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
1622 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1623 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
1624 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1625 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
1626 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1627 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
1628 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1629 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
1630 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1631 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
1632 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1633 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
1634 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1635 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
1636 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1637 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
1638 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1639 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
1640 ; SSE-NEXT: movapd %xmm8, (%rdx)
1641 ; SSE-NEXT: movapd %xmm13, 496(%rcx)
1642 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1643 ; SSE-NEXT: movaps %xmm0, 480(%rcx)
1644 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1645 ; SSE-NEXT: movaps %xmm0, 464(%rcx)
1646 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1647 ; SSE-NEXT: movaps %xmm0, 448(%rcx)
1648 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1649 ; SSE-NEXT: movaps %xmm0, 432(%rcx)
1650 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1651 ; SSE-NEXT: movaps %xmm0, 416(%rcx)
1652 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1653 ; SSE-NEXT: movaps %xmm0, 400(%rcx)
1654 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1655 ; SSE-NEXT: movaps %xmm0, 384(%rcx)
1656 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1657 ; SSE-NEXT: movaps %xmm0, 368(%rcx)
1658 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1659 ; SSE-NEXT: movaps %xmm0, 352(%rcx)
1660 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1661 ; SSE-NEXT: movaps %xmm0, 336(%rcx)
1662 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1663 ; SSE-NEXT: movaps %xmm0, 320(%rcx)
1664 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1665 ; SSE-NEXT: movaps %xmm0, 304(%rcx)
1666 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1667 ; SSE-NEXT: movaps %xmm0, 288(%rcx)
1668 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1669 ; SSE-NEXT: movaps %xmm0, 272(%rcx)
1670 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1671 ; SSE-NEXT: movaps %xmm0, 256(%rcx)
1672 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1673 ; SSE-NEXT: movaps %xmm0, 240(%rcx)
1674 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1675 ; SSE-NEXT: movaps %xmm0, 224(%rcx)
1676 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1677 ; SSE-NEXT: movaps %xmm0, 208(%rcx)
1678 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1679 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
1680 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1681 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
1682 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1683 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
1684 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1685 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
1686 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1687 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
1688 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1689 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
1690 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1691 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
1692 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1693 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
1694 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1695 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
1696 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1697 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
1698 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1699 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
1700 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1701 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1702 ; SSE-NEXT: movapd %xmm4, (%rcx)
1703 ; SSE-NEXT: addq $1176, %rsp # imm = 0x498
1706 ; AVX1-ONLY-LABEL: load_i64_stride3_vf64:
1707 ; AVX1-ONLY: # %bb.0:
1708 ; AVX1-ONLY-NEXT: subq $1096, %rsp # imm = 0x448
1709 ; AVX1-ONLY-NEXT: vmovapd 896(%rdi), %ymm0
1710 ; AVX1-ONLY-NEXT: vmovapd 704(%rdi), %ymm1
1711 ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %ymm2
1712 ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %ymm3
1713 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm4
1714 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = mem[0,1],ymm4[2,3]
1715 ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1716 ; AVX1-ONLY-NEXT: vmovapd %ymm4, %ymm6
1717 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1718 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = mem[0,1],ymm3[2,3]
1719 ; AVX1-ONLY-NEXT: vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1720 ; AVX1-ONLY-NEXT: vmovapd %ymm3, %ymm5
1721 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1722 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = mem[0,1],ymm2[2,3]
1723 ; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1724 ; AVX1-ONLY-NEXT: vmovapd %ymm2, %ymm4
1725 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1726 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = mem[0,1],ymm1[2,3]
1727 ; AVX1-ONLY-NEXT: vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1728 ; AVX1-ONLY-NEXT: vmovapd %ymm1, %ymm3
1729 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1730 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = mem[0,1],ymm0[2,3]
1731 ; AVX1-ONLY-NEXT: vmovupd %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1732 ; AVX1-ONLY-NEXT: vmovapd %ymm0, %ymm2
1733 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1734 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm7[1],ymm6[0],ymm7[3],ymm6[2]
1735 ; AVX1-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm1
1736 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1737 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1738 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm8[1],ymm5[0],ymm8[3],ymm5[2]
1739 ; AVX1-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm1
1740 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1741 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1742 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm9[1],ymm4[0],ymm9[3],ymm4[2]
1743 ; AVX1-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm1
1744 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1745 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1746 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm10[1],ymm3[0],ymm10[3],ymm3[2]
1747 ; AVX1-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm1
1748 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1749 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1750 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm11[1],ymm2[0],ymm11[3],ymm2[2]
1751 ; AVX1-ONLY-NEXT: vbroadcastsd 944(%rdi), %ymm1
1752 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1753 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1754 ; AVX1-ONLY-NEXT: vmovapd 1088(%rdi), %ymm9
1755 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm9[2,3]
1756 ; AVX1-ONLY-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
1757 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm9[0],ymm0[3],ymm9[2]
1758 ; AVX1-ONLY-NEXT: vbroadcastsd 1136(%rdi), %ymm1
1759 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1760 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1761 ; AVX1-ONLY-NEXT: vmovapd 1280(%rdi), %ymm8
1762 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm8[2,3]
1763 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1764 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm8[0],ymm0[3],ymm8[2]
1765 ; AVX1-ONLY-NEXT: vbroadcastsd 1328(%rdi), %ymm1
1766 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1767 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1768 ; AVX1-ONLY-NEXT: vmovapd 1472(%rdi), %ymm7
1769 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm7[2,3]
1770 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1771 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm7[0],ymm0[3],ymm7[2]
1772 ; AVX1-ONLY-NEXT: vbroadcastsd 1520(%rdi), %ymm1
1773 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1774 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1775 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %ymm0
1776 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1777 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = mem[0,1],ymm0[2,3]
1778 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1779 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
1780 ; AVX1-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm1
1781 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1782 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1783 ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm13
1784 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm13[2,3]
1785 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1786 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm13[0],ymm0[3],ymm13[2]
1787 ; AVX1-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm1
1788 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1789 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1790 ; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm12
1791 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm12[2,3]
1792 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1793 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm12[0],ymm0[3],ymm12[2]
1794 ; AVX1-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm1
1795 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1796 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1797 ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm10
1798 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm10[2,3]
1799 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1800 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm10[0],ymm0[3],ymm10[2]
1801 ; AVX1-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm1
1802 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1803 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1804 ; AVX1-ONLY-NEXT: vmovapd 800(%rdi), %ymm5
1805 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm14 = mem[0,1],ymm5[2,3]
1806 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm14[1],ymm5[0],ymm14[3],ymm5[2]
1807 ; AVX1-ONLY-NEXT: vbroadcastsd 848(%rdi), %ymm1
1808 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1809 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1810 ; AVX1-ONLY-NEXT: vmovapd 992(%rdi), %ymm4
1811 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = mem[0,1],ymm4[2,3]
1812 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm11[1],ymm4[0],ymm11[3],ymm4[2]
1813 ; AVX1-ONLY-NEXT: vbroadcastsd 1040(%rdi), %ymm1
1814 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
1815 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1816 ; AVX1-ONLY-NEXT: vmovapd 1184(%rdi), %ymm2
1817 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = mem[0,1],ymm2[2,3]
1818 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm6[1],ymm2[0],ymm6[3],ymm2[2]
1819 ; AVX1-ONLY-NEXT: vbroadcastsd 1232(%rdi), %ymm3
1820 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3]
1821 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1822 ; AVX1-ONLY-NEXT: vmovapd 1376(%rdi), %ymm1
1823 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = mem[0,1],ymm1[2,3]
1824 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm3[1],ymm1[0],ymm3[3],ymm1[2]
1825 ; AVX1-ONLY-NEXT: vbroadcastsd 1424(%rdi), %ymm15
1826 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3]
1827 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1828 ; AVX1-ONLY-NEXT: vmovaps 112(%rdi), %xmm0
1829 ; AVX1-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
1830 ; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
1831 ; AVX1-ONLY-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
1832 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1833 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
1834 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
1835 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
1836 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1837 ; AVX1-ONLY-NEXT: vmovaps 304(%rdi), %xmm0
1838 ; AVX1-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
1839 ; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
1840 ; AVX1-ONLY-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
1841 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1842 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
1843 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
1844 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
1845 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1846 ; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm0
1847 ; AVX1-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
1848 ; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
1849 ; AVX1-ONLY-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
1850 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1851 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
1852 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
1853 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
1854 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1855 ; AVX1-ONLY-NEXT: vmovaps 688(%rdi), %xmm0
1856 ; AVX1-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
1857 ; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
1858 ; AVX1-ONLY-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
1859 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1860 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
1861 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
1862 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
1863 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1864 ; AVX1-ONLY-NEXT: vmovaps 880(%rdi), %xmm0
1865 ; AVX1-ONLY-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
1866 ; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
1867 ; AVX1-ONLY-NEXT: # ymm15 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
1868 ; AVX1-ONLY-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1869 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
1870 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
1871 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3],ymm0[4,5],ymm15[6,7]
1872 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1873 ; AVX1-ONLY-NEXT: vmovaps 1072(%rdi), %xmm0
1874 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1120(%rdi), %ymm0, %ymm0
1875 ; AVX1-ONLY-NEXT: vblendpd $5, (%rsp), %ymm0, %ymm15 # 32-byte Folded Reload
1876 ; AVX1-ONLY-NEXT: # ymm15 = mem[0],ymm0[1],mem[2],ymm0[3]
1877 ; AVX1-ONLY-NEXT: vmovupd %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1878 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],mem[2,3]
1879 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2],ymm9[3]
1880 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1881 ; AVX1-ONLY-NEXT: vmovaps 1264(%rdi), %xmm0
1882 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1312(%rdi), %ymm0, %ymm0
1883 ; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
1884 ; AVX1-ONLY-NEXT: # ymm15 = mem[0],ymm0[1],mem[2],ymm0[3]
1885 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1],mem[2,3]
1886 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3]
1887 ; AVX1-ONLY-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
1888 ; AVX1-ONLY-NEXT: vmovaps 1456(%rdi), %xmm0
1889 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1504(%rdi), %ymm0, %ymm0
1890 ; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
1891 ; AVX1-ONLY-NEXT: # ymm8 = mem[0],ymm0[1],mem[2],ymm0[3]
1892 ; AVX1-ONLY-NEXT: vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1893 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],mem[2,3]
1894 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2],ymm7[3]
1895 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1896 ; AVX1-ONLY-NEXT: vmovaps 1360(%rdi), %xmm0
1897 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1408(%rdi), %ymm0, %ymm0
1898 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm3[0],ymm0[1],ymm3[2],ymm0[3]
1899 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],mem[2,3]
1900 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
1901 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1902 ; AVX1-ONLY-NEXT: vmovaps 1168(%rdi), %xmm0
1903 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm1
1904 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm6[0],ymm1[1],ymm6[2],ymm1[3]
1905 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
1906 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1],ymm1[2],ymm2[3]
1907 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1908 ; AVX1-ONLY-NEXT: vmovaps 976(%rdi), %xmm1
1909 ; AVX1-ONLY-NEXT: vinsertf128 $1, 1024(%rdi), %ymm1, %ymm1
1910 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm11[0],ymm1[1],ymm11[2],ymm1[3]
1911 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3]
1912 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm1[0],ymm4[1],ymm1[2],ymm4[3]
1913 ; AVX1-ONLY-NEXT: vmovaps 784(%rdi), %xmm1
1914 ; AVX1-ONLY-NEXT: vinsertf128 $1, 832(%rdi), %ymm1, %ymm1
1915 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0],ymm1[1],ymm14[2],ymm1[3]
1916 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],mem[2,3]
1917 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3]
1918 ; AVX1-ONLY-NEXT: vmovaps 592(%rdi), %xmm4
1919 ; AVX1-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm4, %ymm4
1920 ; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
1921 ; AVX1-ONLY-NEXT: # ymm5 = mem[0],ymm4[1],mem[2],ymm4[3]
1922 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm10[0,1],mem[2,3]
1923 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm4[0],ymm9[1],ymm4[2],ymm9[3]
1924 ; AVX1-ONLY-NEXT: vmovaps 400(%rdi), %xmm4
1925 ; AVX1-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm4, %ymm4
1926 ; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm9 # 32-byte Folded Reload
1927 ; AVX1-ONLY-NEXT: # ymm9 = mem[0],ymm4[1],mem[2],ymm4[3]
1928 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm12[0,1],mem[2,3]
1929 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm11[1],ymm4[2],ymm11[3]
1930 ; AVX1-ONLY-NEXT: vmovaps 208(%rdi), %xmm11
1931 ; AVX1-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm11, %ymm11
1932 ; AVX1-ONLY-NEXT: vblendpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm12 # 32-byte Folded Reload
1933 ; AVX1-ONLY-NEXT: # ymm12 = mem[0],ymm11[1],mem[2],ymm11[3]
1934 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm13 = ymm13[0,1],mem[2,3]
1935 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm13 = ymm11[0],ymm13[1],ymm11[2],ymm13[3]
1936 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm11
1937 ; AVX1-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm11, %ymm11
1938 ; AVX1-ONLY-NEXT: vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
1939 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1],ymm11[2,3],mem[4,5],ymm11[6,7]
1940 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
1941 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
1942 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm8[2,3],ymm11[4,5],ymm8[6,7]
1943 ; AVX1-ONLY-NEXT: vmovapd %ymm7, 448(%rsi)
1944 ; AVX1-ONLY-NEXT: vmovapd %ymm3, 384(%rsi)
1945 ; AVX1-ONLY-NEXT: vmovapd %ymm2, 320(%rsi)
1946 ; AVX1-ONLY-NEXT: vmovapd %ymm14, 256(%rsi)
1947 ; AVX1-ONLY-NEXT: vmovapd %ymm5, 192(%rsi)
1948 ; AVX1-ONLY-NEXT: vmovapd %ymm9, 128(%rsi)
1949 ; AVX1-ONLY-NEXT: vmovapd %ymm12, 64(%rsi)
1950 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rsi)
1951 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1952 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rsi)
1953 ; AVX1-ONLY-NEXT: vmovapd %ymm15, 416(%rsi)
1954 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1955 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rsi)
1956 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1957 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rsi)
1958 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1959 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rsi)
1960 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1961 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rsi)
1962 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1963 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rsi)
1964 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1965 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rsi)
1966 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1967 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rdx)
1968 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1969 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rdx)
1970 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1971 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 320(%rdx)
1972 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1973 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 256(%rdx)
1974 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1975 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%rdx)
1976 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1977 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%rdx)
1978 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1979 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%rdx)
1980 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1981 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%rdx)
1982 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1983 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rdx)
1984 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1985 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rdx)
1986 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1987 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rdx)
1988 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1989 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rdx)
1990 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1991 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rdx)
1992 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1993 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rdx)
1994 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1995 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rdx)
1996 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1997 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rdx)
1998 ; AVX1-ONLY-NEXT: vmovaps %ymm11, (%rcx)
1999 ; AVX1-ONLY-NEXT: vmovapd %ymm13, 64(%rcx)
2000 ; AVX1-ONLY-NEXT: vmovapd %ymm4, 128(%rcx)
2001 ; AVX1-ONLY-NEXT: vmovapd %ymm10, 192(%rcx)
2002 ; AVX1-ONLY-NEXT: vmovapd %ymm1, 256(%rcx)
2003 ; AVX1-ONLY-NEXT: vmovapd %ymm6, 320(%rcx)
2004 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2005 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 384(%rcx)
2006 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2007 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 448(%rcx)
2008 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2009 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 480(%rcx)
2010 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
2011 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 416(%rcx)
2012 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2013 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 352(%rcx)
2014 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2015 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 288(%rcx)
2016 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2017 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%rcx)
2018 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2019 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%rcx)
2020 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2021 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%rcx)
2022 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2023 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
2024 ; AVX1-ONLY-NEXT: addq $1096, %rsp # imm = 0x448
2025 ; AVX1-ONLY-NEXT: vzeroupper
2026 ; AVX1-ONLY-NEXT: retq
2028 ; AVX2-ONLY-LABEL: load_i64_stride3_vf64:
2029 ; AVX2-ONLY: # %bb.0:
2030 ; AVX2-ONLY-NEXT: subq $968, %rsp # imm = 0x3C8
2031 ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %ymm2
2032 ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2033 ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %ymm3
2034 ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2035 ; AVX2-ONLY-NEXT: vmovaps 704(%rdi), %ymm4
2036 ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2037 ; AVX2-ONLY-NEXT: vmovaps 672(%rdi), %ymm5
2038 ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2039 ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %ymm6
2040 ; AVX2-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2041 ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm7
2042 ; AVX2-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2043 ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm8
2044 ; AVX2-ONLY-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2045 ; AVX2-ONLY-NEXT: vmovaps 288(%rdi), %ymm9
2046 ; AVX2-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2047 ; AVX2-ONLY-NEXT: vmovaps 128(%rdi), %ymm10
2048 ; AVX2-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2049 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %ymm1
2050 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2051 ; AVX2-ONLY-NEXT: vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
2052 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
2053 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
2054 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2055 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2056 ; AVX2-ONLY-NEXT: vinsertf128 $1, 352(%rdi), %ymm0, %ymm0
2057 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm9[0,3,2,3]
2058 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
2059 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2060 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2061 ; AVX2-ONLY-NEXT: vinsertf128 $1, 544(%rdi), %ymm0, %ymm0
2062 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm7[0,3,2,3]
2063 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
2064 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2065 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2066 ; AVX2-ONLY-NEXT: vinsertf128 $1, 736(%rdi), %ymm0, %ymm0
2067 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm5[0,3,2,3]
2068 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
2069 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2070 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2071 ; AVX2-ONLY-NEXT: vinsertf128 $1, 928(%rdi), %ymm0, %ymm0
2072 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = ymm3[0,3,2,3]
2073 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
2074 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2075 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2076 ; AVX2-ONLY-NEXT: vmovaps 1088(%rdi), %ymm1
2077 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2078 ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %ymm0
2079 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2080 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
2081 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2082 ; AVX2-ONLY-NEXT: vinsertf128 $1, 1120(%rdi), %ymm0, %ymm1
2083 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2084 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2085 ; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %ymm1
2086 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2087 ; AVX2-ONLY-NEXT: vmovaps 1248(%rdi), %ymm0
2088 ; AVX2-ONLY-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
2089 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
2090 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2091 ; AVX2-ONLY-NEXT: vinsertf128 $1, 1312(%rdi), %ymm0, %ymm1
2092 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2093 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2094 ; AVX2-ONLY-NEXT: vmovaps 1472(%rdi), %ymm1
2095 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2096 ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %ymm0
2097 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2098 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
2099 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2100 ; AVX2-ONLY-NEXT: vinsertf128 $1, 1504(%rdi), %ymm0, %ymm1
2101 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2102 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2103 ; AVX2-ONLY-NEXT: vmovaps 32(%rdi), %ymm1
2104 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2105 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
2106 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2107 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
2108 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2109 ; AVX2-ONLY-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm1
2110 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2111 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2112 ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm13
2113 ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %ymm12
2114 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm12[0,3,2,3]
2115 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
2116 ; AVX2-ONLY-NEXT: vinsertf128 $1, 256(%rdi), %ymm0, %ymm1
2117 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2118 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2119 ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %ymm11
2120 ; AVX2-ONLY-NEXT: vmovaps 384(%rdi), %ymm10
2121 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm10[0,3,2,3]
2122 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
2123 ; AVX2-ONLY-NEXT: vinsertf128 $1, 448(%rdi), %ymm0, %ymm1
2124 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2125 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2126 ; AVX2-ONLY-NEXT: vmovaps 608(%rdi), %ymm9
2127 ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm8
2128 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm8[0,3,2,3]
2129 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
2130 ; AVX2-ONLY-NEXT: vinsertf128 $1, 640(%rdi), %ymm0, %ymm1
2131 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2132 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2133 ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm7
2134 ; AVX2-ONLY-NEXT: vmovaps 768(%rdi), %ymm6
2135 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm6[0,3,2,3]
2136 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
2137 ; AVX2-ONLY-NEXT: vinsertf128 $1, 832(%rdi), %ymm0, %ymm1
2138 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2139 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2140 ; AVX2-ONLY-NEXT: vmovaps 992(%rdi), %ymm5
2141 ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm4
2142 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm4[0,3,2,3]
2143 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
2144 ; AVX2-ONLY-NEXT: vinsertf128 $1, 1024(%rdi), %ymm0, %ymm1
2145 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2146 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2147 ; AVX2-ONLY-NEXT: vmovaps 1184(%rdi), %ymm3
2148 ; AVX2-ONLY-NEXT: vmovaps 1152(%rdi), %ymm2
2149 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm0 = ymm2[0,3,2,3]
2150 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
2151 ; AVX2-ONLY-NEXT: vinsertf128 $1, 1216(%rdi), %ymm0, %ymm1
2152 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2153 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2154 ; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %ymm1
2155 ; AVX2-ONLY-NEXT: vmovaps 1344(%rdi), %ymm0
2156 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm15 = ymm0[0,3,2,3]
2157 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm1[4,5,6,7]
2158 ; AVX2-ONLY-NEXT: vinsertf128 $1, 1408(%rdi), %ymm0, %ymm14
2159 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
2160 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2161 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2162 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2163 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2164 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2165 ; AVX2-ONLY-NEXT: vbroadcastsd 176(%rdi), %ymm15
2166 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2167 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2168 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2169 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2170 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2171 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2172 ; AVX2-ONLY-NEXT: vbroadcastsd 368(%rdi), %ymm15
2173 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2174 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2175 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2176 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2177 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2178 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2179 ; AVX2-ONLY-NEXT: vbroadcastsd 560(%rdi), %ymm15
2180 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2181 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2182 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2183 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2184 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2185 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2186 ; AVX2-ONLY-NEXT: vbroadcastsd 752(%rdi), %ymm15
2187 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2188 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2189 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2190 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2191 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2192 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2193 ; AVX2-ONLY-NEXT: vbroadcastsd 944(%rdi), %ymm15
2194 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2195 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2196 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2197 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2198 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2199 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2200 ; AVX2-ONLY-NEXT: vbroadcastsd 1136(%rdi), %ymm15
2201 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2202 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2203 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2204 ; AVX2-ONLY-NEXT: vblendps $12, (%rsp), %ymm14, %ymm14 # 32-byte Folded Reload
2205 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2206 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2207 ; AVX2-ONLY-NEXT: vbroadcastsd 1328(%rdi), %ymm15
2208 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2209 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2210 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2211 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2212 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2213 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2214 ; AVX2-ONLY-NEXT: vbroadcastsd 1520(%rdi), %ymm15
2215 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2216 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2217 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
2218 ; AVX2-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
2219 ; AVX2-ONLY-NEXT: # ymm14 = ymm14[0,1],mem[2,3],ymm14[4,5,6,7]
2220 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
2221 ; AVX2-ONLY-NEXT: vbroadcastsd 80(%rdi), %ymm15
2222 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
2223 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2224 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
2225 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,3,0,1,6,7,4,5]
2226 ; AVX2-ONLY-NEXT: vbroadcastsd 272(%rdi), %ymm13
2227 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7]
2228 ; AVX2-ONLY-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2229 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
2230 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,3,0,1,6,7,4,5]
2231 ; AVX2-ONLY-NEXT: vbroadcastsd 464(%rdi), %ymm11
2232 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
2233 ; AVX2-ONLY-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2234 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
2235 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
2236 ; AVX2-ONLY-NEXT: vbroadcastsd 656(%rdi), %ymm9
2237 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
2238 ; AVX2-ONLY-NEXT: vmovups %ymm8, (%rsp) # 32-byte Spill
2239 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
2240 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,3,0,1,6,7,4,5]
2241 ; AVX2-ONLY-NEXT: vbroadcastsd 848(%rdi), %ymm7
2242 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
2243 ; AVX2-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2244 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
2245 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
2246 ; AVX2-ONLY-NEXT: vbroadcastsd 1040(%rdi), %ymm5
2247 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
2248 ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2249 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
2250 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
2251 ; AVX2-ONLY-NEXT: vbroadcastsd 1232(%rdi), %ymm3
2252 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm3[6,7]
2253 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
2254 ; AVX2-ONLY-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
2255 ; AVX2-ONLY-NEXT: vbroadcastsd 1424(%rdi), %ymm1
2256 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm1[6,7]
2257 ; AVX2-ONLY-NEXT: vmovaps 112(%rdi), %xmm0
2258 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2259 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2260 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2261 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2262 ; AVX2-ONLY-NEXT: vmovaps 304(%rdi), %xmm0
2263 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2264 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2265 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2266 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2267 ; AVX2-ONLY-NEXT: vmovaps 496(%rdi), %xmm0
2268 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2269 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2270 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2271 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2272 ; AVX2-ONLY-NEXT: vmovaps 688(%rdi), %xmm0
2273 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2274 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2275 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2276 ; AVX2-ONLY-NEXT: vmovaps 880(%rdi), %xmm0
2277 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2278 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2279 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2280 ; AVX2-ONLY-NEXT: vmovaps 1072(%rdi), %xmm0
2281 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2282 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2283 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2284 ; AVX2-ONLY-NEXT: vmovaps 1264(%rdi), %xmm0
2285 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2286 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2287 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2288 ; AVX2-ONLY-NEXT: vmovaps 1456(%rdi), %xmm0
2289 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2290 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2291 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2292 ; AVX2-ONLY-NEXT: vmovaps 1360(%rdi), %xmm0
2293 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2294 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2295 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2296 ; AVX2-ONLY-NEXT: vmovaps 1168(%rdi), %xmm0
2297 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2298 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2299 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2300 ; AVX2-ONLY-NEXT: vmovaps 976(%rdi), %xmm0
2301 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2302 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2303 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2304 ; AVX2-ONLY-NEXT: vmovaps 784(%rdi), %xmm0
2305 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2306 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2307 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2308 ; AVX2-ONLY-NEXT: vmovaps 592(%rdi), %xmm0
2309 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2310 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2311 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2312 ; AVX2-ONLY-NEXT: vmovaps 400(%rdi), %xmm0
2313 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2314 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2315 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2316 ; AVX2-ONLY-NEXT: vmovaps 208(%rdi), %xmm0
2317 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2318 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,1,0,3]
2319 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2320 ; AVX2-ONLY-NEXT: vmovaps 16(%rdi), %xmm0
2321 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
2322 ; AVX2-ONLY-NEXT: vpermpd {{.*#+}} ymm15 = mem[0,1,0,3]
2323 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
2324 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2325 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 448(%rsi)
2326 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2327 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 384(%rsi)
2328 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2329 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 320(%rsi)
2330 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2331 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 256(%rsi)
2332 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2333 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 192(%rsi)
2334 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2335 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 128(%rsi)
2336 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2337 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 64(%rsi)
2338 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2339 ; AVX2-ONLY-NEXT: vmovaps %ymm15, (%rsi)
2340 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2341 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 480(%rsi)
2342 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2343 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 416(%rsi)
2344 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2345 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 352(%rsi)
2346 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2347 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 288(%rsi)
2348 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2349 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 224(%rsi)
2350 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2351 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 160(%rsi)
2352 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2353 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 96(%rsi)
2354 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
2355 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 32(%rsi)
2356 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 448(%rdx)
2357 ; AVX2-ONLY-NEXT: vmovaps %ymm13, 384(%rdx)
2358 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2359 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 320(%rdx)
2360 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2361 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 256(%rdx)
2362 ; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm11 # 32-byte Reload
2363 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 192(%rdx)
2364 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2365 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 128(%rdx)
2366 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2367 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 64(%rdx)
2368 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2369 ; AVX2-ONLY-NEXT: vmovaps %ymm11, (%rdx)
2370 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2371 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 480(%rdx)
2372 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2373 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 416(%rdx)
2374 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2375 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 352(%rdx)
2376 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2377 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 288(%rdx)
2378 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2379 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 224(%rdx)
2380 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2381 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 160(%rdx)
2382 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2383 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 96(%rdx)
2384 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
2385 ; AVX2-ONLY-NEXT: vmovaps %ymm11, 32(%rdx)
2386 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rcx)
2387 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
2388 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 128(%rcx)
2389 ; AVX2-ONLY-NEXT: vmovaps %ymm3, 192(%rcx)
2390 ; AVX2-ONLY-NEXT: vmovaps %ymm4, 256(%rcx)
2391 ; AVX2-ONLY-NEXT: vmovaps %ymm5, 320(%rcx)
2392 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 384(%rcx)
2393 ; AVX2-ONLY-NEXT: vmovaps %ymm7, 448(%rcx)
2394 ; AVX2-ONLY-NEXT: vmovaps %ymm8, 480(%rcx)
2395 ; AVX2-ONLY-NEXT: vmovaps %ymm9, 416(%rcx)
2396 ; AVX2-ONLY-NEXT: vmovaps %ymm10, 352(%rcx)
2397 ; AVX2-ONLY-NEXT: vmovaps %ymm12, 288(%rcx)
2398 ; AVX2-ONLY-NEXT: vmovaps %ymm14, 224(%rcx)
2399 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2400 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 160(%rcx)
2401 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2402 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%rcx)
2403 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2404 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 32(%rcx)
2405 ; AVX2-ONLY-NEXT: addq $968, %rsp # imm = 0x3C8
2406 ; AVX2-ONLY-NEXT: vzeroupper
2407 ; AVX2-ONLY-NEXT: retq
2409 ; AVX512-LABEL: load_i64_stride3_vf64:
2411 ; AVX512-NEXT: vmovdqa64 1472(%rdi), %zmm0
2412 ; AVX512-NEXT: vmovdqa64 1408(%rdi), %zmm14
2413 ; AVX512-NEXT: vmovdqa64 1344(%rdi), %zmm22
2414 ; AVX512-NEXT: vmovdqa64 1280(%rdi), %zmm1
2415 ; AVX512-NEXT: vmovdqa64 1216(%rdi), %zmm13
2416 ; AVX512-NEXT: vmovdqa64 1152(%rdi), %zmm19
2417 ; AVX512-NEXT: vmovdqa64 1088(%rdi), %zmm2
2418 ; AVX512-NEXT: vmovdqa64 1024(%rdi), %zmm12
2419 ; AVX512-NEXT: vmovdqa64 960(%rdi), %zmm27
2420 ; AVX512-NEXT: vmovdqa64 896(%rdi), %zmm3
2421 ; AVX512-NEXT: vmovdqa64 832(%rdi), %zmm10
2422 ; AVX512-NEXT: vmovdqa64 768(%rdi), %zmm26
2423 ; AVX512-NEXT: vmovdqa64 704(%rdi), %zmm4
2424 ; AVX512-NEXT: vmovdqa64 640(%rdi), %zmm9
2425 ; AVX512-NEXT: vmovdqa64 576(%rdi), %zmm29
2426 ; AVX512-NEXT: vmovdqa64 512(%rdi), %zmm5
2427 ; AVX512-NEXT: vmovdqa64 448(%rdi), %zmm8
2428 ; AVX512-NEXT: vmovdqa64 384(%rdi), %zmm28
2429 ; AVX512-NEXT: vmovdqa64 256(%rdi), %zmm7
2430 ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm25
2431 ; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm30
2432 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = <0,3,6,9,12,15,u,u>
2433 ; AVX512-NEXT: vmovdqa64 %zmm30, %zmm6
2434 ; AVX512-NEXT: vpermt2q %zmm7, %zmm11, %zmm6
2435 ; AVX512-NEXT: vmovdqa64 %zmm28, %zmm15
2436 ; AVX512-NEXT: vpermt2q %zmm8, %zmm11, %zmm15
2437 ; AVX512-NEXT: vmovdqa64 %zmm29, %zmm16
2438 ; AVX512-NEXT: vpermt2q %zmm9, %zmm11, %zmm16
2439 ; AVX512-NEXT: vmovdqa64 %zmm26, %zmm17
2440 ; AVX512-NEXT: vpermt2q %zmm10, %zmm11, %zmm17
2441 ; AVX512-NEXT: vmovdqa64 %zmm27, %zmm18
2442 ; AVX512-NEXT: vpermt2q %zmm12, %zmm11, %zmm18
2443 ; AVX512-NEXT: vmovdqa64 %zmm19, %zmm20
2444 ; AVX512-NEXT: vpermt2q %zmm13, %zmm11, %zmm20
2445 ; AVX512-NEXT: vmovdqa64 %zmm22, %zmm23
2446 ; AVX512-NEXT: vpermt2q %zmm14, %zmm11, %zmm23
2447 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm21 = <1,4,7,10,13,u,u,u>
2448 ; AVX512-NEXT: vmovdqa64 %zmm30, %zmm24
2449 ; AVX512-NEXT: vpermt2q %zmm7, %zmm21, %zmm24
2450 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = <10,13,0,3,6,u,u,u>
2451 ; AVX512-NEXT: vpermt2q %zmm30, %zmm31, %zmm7
2452 ; AVX512-NEXT: vmovdqa64 %zmm29, %zmm30
2453 ; AVX512-NEXT: vpermt2q %zmm9, %zmm21, %zmm30
2454 ; AVX512-NEXT: vpermt2q %zmm29, %zmm31, %zmm9
2455 ; AVX512-NEXT: vmovdqa64 %zmm28, %zmm29
2456 ; AVX512-NEXT: vpermt2q %zmm8, %zmm21, %zmm29
2457 ; AVX512-NEXT: vpermt2q %zmm28, %zmm31, %zmm8
2458 ; AVX512-NEXT: vmovdqa64 %zmm27, %zmm28
2459 ; AVX512-NEXT: vpermt2q %zmm12, %zmm21, %zmm28
2460 ; AVX512-NEXT: vpermt2q %zmm27, %zmm31, %zmm12
2461 ; AVX512-NEXT: vmovdqa64 %zmm26, %zmm27
2462 ; AVX512-NEXT: vpermt2q %zmm10, %zmm21, %zmm27
2463 ; AVX512-NEXT: vpermt2q %zmm26, %zmm31, %zmm10
2464 ; AVX512-NEXT: vmovdqa64 %zmm22, %zmm26
2465 ; AVX512-NEXT: vpermt2q %zmm14, %zmm21, %zmm26
2466 ; AVX512-NEXT: vpermt2q %zmm22, %zmm31, %zmm14
2467 ; AVX512-NEXT: vmovdqa64 %zmm19, %zmm22
2468 ; AVX512-NEXT: vpermt2q %zmm13, %zmm21, %zmm22
2469 ; AVX512-NEXT: vpermt2q %zmm19, %zmm31, %zmm13
2470 ; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm19
2471 ; AVX512-NEXT: vpermi2q %zmm19, %zmm25, %zmm11
2472 ; AVX512-NEXT: vpermi2q %zmm19, %zmm25, %zmm21
2473 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm19
2474 ; AVX512-NEXT: vmovdqa64 320(%rdi), %zmm25
2475 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,5,10,13]
2476 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm6
2477 ; AVX512-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2478 ; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm15
2479 ; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm16
2480 ; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm17
2481 ; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm18
2482 ; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm20
2483 ; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm23
2484 ; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm6
2485 ; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm11
2486 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,8,11,14]
2487 ; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm22
2488 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm24
2489 ; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm30
2490 ; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm29
2491 ; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm28
2492 ; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm27
2493 ; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm26
2494 ; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm21
2495 ; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm31 = [0,1,2,3,4,9,12,15]
2496 ; AVX512-NEXT: vpermt2q %zmm25, %zmm31, %zmm7
2497 ; AVX512-NEXT: vpermt2q %zmm4, %zmm31, %zmm9
2498 ; AVX512-NEXT: vpermt2q %zmm5, %zmm31, %zmm8
2499 ; AVX512-NEXT: vpermt2q %zmm2, %zmm31, %zmm12
2500 ; AVX512-NEXT: vpermt2q %zmm3, %zmm31, %zmm10
2501 ; AVX512-NEXT: vpermt2q %zmm0, %zmm31, %zmm14
2502 ; AVX512-NEXT: vpermt2q %zmm1, %zmm31, %zmm13
2503 ; AVX512-NEXT: vpermt2q %zmm6, %zmm31, %zmm19
2504 ; AVX512-NEXT: vmovdqa64 %zmm23, 448(%rsi)
2505 ; AVX512-NEXT: vmovdqa64 %zmm20, 384(%rsi)
2506 ; AVX512-NEXT: vmovdqa64 %zmm18, 320(%rsi)
2507 ; AVX512-NEXT: vmovdqa64 %zmm17, 256(%rsi)
2508 ; AVX512-NEXT: vmovdqa64 %zmm16, 192(%rsi)
2509 ; AVX512-NEXT: vmovdqa64 %zmm15, 128(%rsi)
2510 ; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
2511 ; AVX512-NEXT: vmovaps %zmm0, 64(%rsi)
2512 ; AVX512-NEXT: vmovdqa64 %zmm11, (%rsi)
2513 ; AVX512-NEXT: vmovdqa64 %zmm26, 448(%rdx)
2514 ; AVX512-NEXT: vmovdqa64 %zmm27, 256(%rdx)
2515 ; AVX512-NEXT: vmovdqa64 %zmm28, 320(%rdx)
2516 ; AVX512-NEXT: vmovdqa64 %zmm29, 128(%rdx)
2517 ; AVX512-NEXT: vmovdqa64 %zmm30, 192(%rdx)
2518 ; AVX512-NEXT: vmovdqa64 %zmm21, (%rdx)
2519 ; AVX512-NEXT: vmovdqa64 %zmm24, 64(%rdx)
2520 ; AVX512-NEXT: vmovdqa64 %zmm22, 384(%rdx)
2521 ; AVX512-NEXT: vmovdqa64 %zmm13, 384(%rcx)
2522 ; AVX512-NEXT: vmovdqa64 %zmm14, 448(%rcx)
2523 ; AVX512-NEXT: vmovdqa64 %zmm10, 256(%rcx)
2524 ; AVX512-NEXT: vmovdqa64 %zmm12, 320(%rcx)
2525 ; AVX512-NEXT: vmovdqa64 %zmm8, 128(%rcx)
2526 ; AVX512-NEXT: vmovdqa64 %zmm9, 192(%rcx)
2527 ; AVX512-NEXT: vmovdqa64 %zmm19, (%rcx)
2528 ; AVX512-NEXT: vmovdqa64 %zmm7, 64(%rcx)
2529 ; AVX512-NEXT: vzeroupper
2531 %wide.vec = load <192 x i64>, ptr %in.vec, align 64
2532 %strided.vec0 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93, i32 96, i32 99, i32 102, i32 105, i32 108, i32 111, i32 114, i32 117, i32 120, i32 123, i32 126, i32 129, i32 132, i32 135, i32 138, i32 141, i32 144, i32 147, i32 150, i32 153, i32 156, i32 159, i32 162, i32 165, i32 168, i32 171, i32 174, i32 177, i32 180, i32 183, i32 186, i32 189>
2533 %strided.vec1 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94, i32 97, i32 100, i32 103, i32 106, i32 109, i32 112, i32 115, i32 118, i32 121, i32 124, i32 127, i32 130, i32 133, i32 136, i32 139, i32 142, i32 145, i32 148, i32 151, i32 154, i32 157, i32 160, i32 163, i32 166, i32 169, i32 172, i32 175, i32 178, i32 181, i32 184, i32 187, i32 190>
2534 %strided.vec2 = shufflevector <192 x i64> %wide.vec, <192 x i64> poison, <64 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23, i32 26, i32 29, i32 32, i32 35, i32 38, i32 41, i32 44, i32 47, i32 50, i32 53, i32 56, i32 59, i32 62, i32 65, i32 68, i32 71, i32 74, i32 77, i32 80, i32 83, i32 86, i32 89, i32 92, i32 95, i32 98, i32 101, i32 104, i32 107, i32 110, i32 113, i32 116, i32 119, i32 122, i32 125, i32 128, i32 131, i32 134, i32 137, i32 140, i32 143, i32 146, i32 149, i32 152, i32 155, i32 158, i32 161, i32 164, i32 167, i32 170, i32 173, i32 176, i32 179, i32 182, i32 185, i32 188, i32 191>
2535 store <64 x i64> %strided.vec0, ptr %out.vec0, align 64
2536 store <64 x i64> %strided.vec1, ptr %out.vec1, align 64
2537 store <64 x i64> %strided.vec2, ptr %out.vec2, align 64
2540 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
2545 ; AVX2-FAST-PERLANE: {{.*}}
2548 ; AVX512BW-FAST: {{.*}}
2549 ; AVX512BW-ONLY-FAST: {{.*}}
2550 ; AVX512BW-ONLY-SLOW: {{.*}}
2551 ; AVX512BW-SLOW: {{.*}}
2552 ; AVX512DQ-FAST: {{.*}}
2553 ; AVX512DQ-SLOW: {{.*}}
2554 ; AVX512DQBW-FAST: {{.*}}
2555 ; AVX512DQBW-SLOW: {{.*}}
2557 ; AVX512F-FAST: {{.*}}
2558 ; AVX512F-ONLY-FAST: {{.*}}
2559 ; AVX512F-ONLY-SLOW: {{.*}}
2560 ; AVX512F-SLOW: {{.*}}
2563 ; FALLBACK10: {{.*}}
2564 ; FALLBACK11: {{.*}}
2565 ; FALLBACK12: {{.*}}