1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
12 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
13 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
14 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
16 ; These patterns are produced by LoopVectorizer for interleaved loads.
18 define void @load_i64_stride5_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
19 ; SSE-LABEL: load_i64_stride5_vf2:
21 ; SSE-NEXT: movapd 64(%rdi), %xmm0
22 ; SSE-NEXT: movapd (%rdi), %xmm1
23 ; SSE-NEXT: movapd 16(%rdi), %xmm2
24 ; SSE-NEXT: movapd 32(%rdi), %xmm3
25 ; SSE-NEXT: movapd 48(%rdi), %xmm4
26 ; SSE-NEXT: movapd %xmm3, %xmm5
27 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
28 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm4[0]
29 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
30 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm0[0]
31 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
32 ; SSE-NEXT: movapd %xmm5, (%rsi)
33 ; SSE-NEXT: movapd %xmm1, (%rdx)
34 ; SSE-NEXT: movapd %xmm4, (%rcx)
35 ; SSE-NEXT: movapd %xmm2, (%r8)
36 ; SSE-NEXT: movapd %xmm0, (%r9)
39 ; AVX1-ONLY-LABEL: load_i64_stride5_vf2:
41 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0
42 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm1
43 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
44 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3
45 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm2[4,5,6,7]
46 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
47 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4,5,6,7]
48 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm5
49 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
50 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
51 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, (%rsi)
52 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rdx)
53 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, (%rcx)
54 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%r8)
55 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, (%r9)
56 ; AVX1-ONLY-NEXT: retq
58 ; AVX2-ONLY-LABEL: load_i64_stride5_vf2:
60 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
61 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm1
62 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm2
63 ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm3
64 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm4
65 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm5 = xmm1[0,1],xmm2[2,3]
66 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
67 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
68 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
69 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
70 ; AVX2-ONLY-NEXT: vmovdqa %xmm5, (%rsi)
71 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, (%rdx)
72 ; AVX2-ONLY-NEXT: vextractf128 $1, %ymm0, (%rcx)
73 ; AVX2-ONLY-NEXT: vmovdqa %xmm3, (%r8)
74 ; AVX2-ONLY-NEXT: vmovdqa %xmm2, (%r9)
75 ; AVX2-ONLY-NEXT: vzeroupper
76 ; AVX2-ONLY-NEXT: retq
78 ; AVX512-LABEL: load_i64_stride5_vf2:
80 ; AVX512-NEXT: vmovdqa (%rdi), %xmm0
81 ; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1
82 ; AVX512-NEXT: vmovdqa 48(%rdi), %xmm2
83 ; AVX512-NEXT: vmovdqa 64(%rdi), %xmm3
84 ; AVX512-NEXT: vpblendd {{.*#+}} xmm4 = xmm0[0,1],xmm1[2,3]
85 ; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
86 ; AVX512-NEXT: vmovaps (%rdi), %ymm2
87 ; AVX512-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],mem[2,3],ymm2[4,5],mem[6,7]
88 ; AVX512-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
89 ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3]
90 ; AVX512-NEXT: vmovdqa %xmm4, (%rsi)
91 ; AVX512-NEXT: vmovdqa %xmm0, (%rdx)
92 ; AVX512-NEXT: vextractf128 $1, %ymm2, (%rcx)
93 ; AVX512-NEXT: vmovdqa %xmm5, (%r8)
94 ; AVX512-NEXT: vmovdqa %xmm1, (%r9)
95 ; AVX512-NEXT: vzeroupper
97 %wide.vec = load <10 x i64>, ptr %in.vec, align 64
98 %strided.vec0 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 0, i32 5>
99 %strided.vec1 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 1, i32 6>
100 %strided.vec2 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 2, i32 7>
101 %strided.vec3 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 3, i32 8>
102 %strided.vec4 = shufflevector <10 x i64> %wide.vec, <10 x i64> poison, <2 x i32> <i32 4, i32 9>
103 store <2 x i64> %strided.vec0, ptr %out.vec0, align 64
104 store <2 x i64> %strided.vec1, ptr %out.vec1, align 64
105 store <2 x i64> %strided.vec2, ptr %out.vec2, align 64
106 store <2 x i64> %strided.vec3, ptr %out.vec3, align 64
107 store <2 x i64> %strided.vec4, ptr %out.vec4, align 64
111 define void @load_i64_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
112 ; SSE-LABEL: load_i64_stride5_vf4:
114 ; SSE-NEXT: movapd 144(%rdi), %xmm1
115 ; SSE-NEXT: movapd 64(%rdi), %xmm0
116 ; SSE-NEXT: movapd 96(%rdi), %xmm2
117 ; SSE-NEXT: movapd 128(%rdi), %xmm3
118 ; SSE-NEXT: movapd (%rdi), %xmm4
119 ; SSE-NEXT: movapd 16(%rdi), %xmm5
120 ; SSE-NEXT: movapd 32(%rdi), %xmm6
121 ; SSE-NEXT: movapd 48(%rdi), %xmm7
122 ; SSE-NEXT: movapd 80(%rdi), %xmm8
123 ; SSE-NEXT: movapd 112(%rdi), %xmm9
124 ; SSE-NEXT: movapd %xmm9, %xmm10
125 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm8[0],xmm10[1]
126 ; SSE-NEXT: movapd %xmm6, %xmm11
127 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm4[0],xmm11[1]
128 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm3[0]
129 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm7[0]
130 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
131 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm5[0],xmm7[1]
132 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm0[0]
133 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
134 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1]
135 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm9[0],xmm1[1]
136 ; SSE-NEXT: movapd %xmm10, 16(%rsi)
137 ; SSE-NEXT: movapd %xmm11, (%rsi)
138 ; SSE-NEXT: movapd %xmm8, 16(%rdx)
139 ; SSE-NEXT: movapd %xmm4, (%rdx)
140 ; SSE-NEXT: movapd %xmm3, 16(%rcx)
141 ; SSE-NEXT: movapd %xmm7, (%rcx)
142 ; SSE-NEXT: movapd %xmm2, 16(%r8)
143 ; SSE-NEXT: movapd %xmm5, (%r8)
144 ; SSE-NEXT: movapd %xmm1, 16(%r9)
145 ; SSE-NEXT: movapd %xmm0, (%r9)
148 ; AVX1-ONLY-LABEL: load_i64_stride5_vf4:
149 ; AVX1-ONLY: # %bb.0:
150 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm0
151 ; AVX1-ONLY-NEXT: vmovapd 96(%rdi), %ymm1
152 ; AVX1-ONLY-NEXT: vmovapd 64(%rdi), %ymm2
153 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3]
154 ; AVX1-ONLY-NEXT: vmovapd (%rdi), %xmm4
155 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm5
156 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %xmm6
157 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7
158 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm8 = xmm4[0],xmm6[1]
159 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm8[0,1],ymm3[2,3]
160 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm8
161 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm9
162 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[3],ymm9[2]
163 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
164 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
165 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm4
166 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm8 = xmm4[0,1],xmm8[2,3]
167 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
168 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
169 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
170 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
171 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[3],ymm0[2]
172 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm8
173 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
174 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
175 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
176 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm6[0],xmm8[1]
177 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
178 ; AVX1-ONLY-NEXT: vmovapd %ymm3, (%rsi)
179 ; AVX1-ONLY-NEXT: vmovapd %ymm2, (%rdx)
180 ; AVX1-ONLY-NEXT: vmovaps %ymm7, (%rcx)
181 ; AVX1-ONLY-NEXT: vmovapd %ymm4, (%r8)
182 ; AVX1-ONLY-NEXT: vmovapd %ymm0, (%r9)
183 ; AVX1-ONLY-NEXT: vzeroupper
184 ; AVX1-ONLY-NEXT: retq
186 ; AVX2-ONLY-LABEL: load_i64_stride5_vf4:
187 ; AVX2-ONLY: # %bb.0:
188 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
189 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm1
190 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm2
191 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm3
192 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5],ymm2[6,7]
193 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm5
194 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm6
195 ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm7
196 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm8
197 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm9 = xmm5[0,1],xmm6[2,3]
198 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6,7]
199 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
200 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
201 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
202 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
203 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
204 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm5
205 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
206 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
207 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3]
208 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
209 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm7 = ymm2[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
210 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,3]
211 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
212 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
213 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm6[0,1],xmm8[2,3]
214 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
215 ; AVX2-ONLY-NEXT: vmovdqa %ymm4, (%rsi)
216 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, (%rdx)
217 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%rcx)
218 ; AVX2-ONLY-NEXT: vmovdqa %ymm5, (%r8)
219 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%r9)
220 ; AVX2-ONLY-NEXT: vzeroupper
221 ; AVX2-ONLY-NEXT: retq
223 ; AVX512-SLOW-LABEL: load_i64_stride5_vf4:
224 ; AVX512-SLOW: # %bb.0:
225 ; AVX512-SLOW-NEXT: vmovdqa64 (%rdi), %zmm0
226 ; AVX512-SLOW-NEXT: vmovdqa64 64(%rdi), %zmm1
227 ; AVX512-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,10,15]
228 ; AVX512-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
229 ; AVX512-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = <1,6,11,u>
230 ; AVX512-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
231 ; AVX512-SLOW-NEXT: vmovdqa 128(%rdi), %xmm4
232 ; AVX512-SLOW-NEXT: vpbroadcastq %xmm4, %ymm5
233 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
234 ; AVX512-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
235 ; AVX512-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <2,7,12,u>
236 ; AVX512-SLOW-NEXT: vpermi2q %zmm1, %zmm0, %zmm5
237 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
238 ; AVX512-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = <11,0,5,u>
239 ; AVX512-SLOW-NEXT: vpermi2q %zmm0, %zmm1, %zmm5
240 ; AVX512-SLOW-NEXT: vpbroadcastq 144(%rdi), %ymm6
241 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
242 ; AVX512-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = <12,1,6,u>
243 ; AVX512-SLOW-NEXT: vpermi2q %zmm0, %zmm1, %zmm6
244 ; AVX512-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],mem[6,7]
245 ; AVX512-SLOW-NEXT: vmovdqa %ymm2, (%rsi)
246 ; AVX512-SLOW-NEXT: vmovdqa %ymm3, (%rdx)
247 ; AVX512-SLOW-NEXT: vmovdqa %ymm4, (%rcx)
248 ; AVX512-SLOW-NEXT: vmovdqa %ymm5, (%r8)
249 ; AVX512-SLOW-NEXT: vmovdqa %ymm0, (%r9)
250 ; AVX512-SLOW-NEXT: vzeroupper
251 ; AVX512-SLOW-NEXT: retq
253 ; AVX512-FAST-LABEL: load_i64_stride5_vf4:
254 ; AVX512-FAST: # %bb.0:
255 ; AVX512-FAST-NEXT: vmovdqa64 (%rdi), %zmm0
256 ; AVX512-FAST-NEXT: vmovdqa64 64(%rdi), %zmm1
257 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,10,15]
258 ; AVX512-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
259 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = <1,6,11,u>
260 ; AVX512-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
261 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,4]
262 ; AVX512-FAST-NEXT: vmovdqa 128(%rdi), %ymm5
263 ; AVX512-FAST-NEXT: vpermi2q %ymm5, %ymm3, %ymm4
264 ; AVX512-FAST-NEXT: vinserti128 $1, 128(%rdi), %ymm0, %ymm3
265 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <2,7,12,u>
266 ; AVX512-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm6
267 ; AVX512-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
268 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = <11,0,5,u>
269 ; AVX512-FAST-NEXT: vpermi2q %zmm0, %zmm1, %zmm6
270 ; AVX512-FAST-NEXT: vpbroadcastq 144(%rdi), %ymm7
271 ; AVX512-FAST-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
272 ; AVX512-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = <12,1,6,u>
273 ; AVX512-FAST-NEXT: vpermi2q %zmm0, %zmm1, %zmm7
274 ; AVX512-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm5[6,7]
275 ; AVX512-FAST-NEXT: vmovdqa %ymm2, (%rsi)
276 ; AVX512-FAST-NEXT: vmovdqa %ymm4, (%rdx)
277 ; AVX512-FAST-NEXT: vmovdqa %ymm3, (%rcx)
278 ; AVX512-FAST-NEXT: vmovdqa %ymm6, (%r8)
279 ; AVX512-FAST-NEXT: vmovdqa %ymm0, (%r9)
280 ; AVX512-FAST-NEXT: vzeroupper
281 ; AVX512-FAST-NEXT: retq
282 %wide.vec = load <20 x i64>, ptr %in.vec, align 64
283 %strided.vec0 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 0, i32 5, i32 10, i32 15>
284 %strided.vec1 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 1, i32 6, i32 11, i32 16>
285 %strided.vec2 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 2, i32 7, i32 12, i32 17>
286 %strided.vec3 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 3, i32 8, i32 13, i32 18>
287 %strided.vec4 = shufflevector <20 x i64> %wide.vec, <20 x i64> poison, <4 x i32> <i32 4, i32 9, i32 14, i32 19>
288 store <4 x i64> %strided.vec0, ptr %out.vec0, align 64
289 store <4 x i64> %strided.vec1, ptr %out.vec1, align 64
290 store <4 x i64> %strided.vec2, ptr %out.vec2, align 64
291 store <4 x i64> %strided.vec3, ptr %out.vec3, align 64
292 store <4 x i64> %strided.vec4, ptr %out.vec4, align 64
296 define void @load_i64_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
297 ; SSE-LABEL: load_i64_stride5_vf8:
299 ; SSE-NEXT: movapd 304(%rdi), %xmm2
300 ; SSE-NEXT: movapd 64(%rdi), %xmm1
301 ; SSE-NEXT: movapd 224(%rdi), %xmm0
302 ; SSE-NEXT: movapd 256(%rdi), %xmm4
303 ; SSE-NEXT: movapd 176(%rdi), %xmm3
304 ; SSE-NEXT: movapd 288(%rdi), %xmm7
305 ; SSE-NEXT: movapd 208(%rdi), %xmm6
306 ; SSE-NEXT: movapd (%rdi), %xmm9
307 ; SSE-NEXT: movapd 16(%rdi), %xmm5
308 ; SSE-NEXT: movapd 32(%rdi), %xmm14
309 ; SSE-NEXT: movapd 48(%rdi), %xmm8
310 ; SSE-NEXT: movapd 240(%rdi), %xmm11
311 ; SSE-NEXT: movapd 272(%rdi), %xmm13
312 ; SSE-NEXT: movapd 160(%rdi), %xmm10
313 ; SSE-NEXT: movapd 192(%rdi), %xmm15
314 ; SSE-NEXT: movapd %xmm15, %xmm12
315 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm10[0],xmm12[1]
316 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm6[0]
317 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm3[0],xmm6[1]
318 ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm0[0]
319 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
320 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm15[0],xmm0[1]
321 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
322 ; SSE-NEXT: movapd %xmm14, %xmm15
323 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm9[0],xmm15[1]
324 ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm8[0]
325 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1]
326 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm1[0]
327 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm14[0],xmm1[1]
328 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
329 ; SSE-NEXT: movapd %xmm13, %xmm14
330 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm11[0],xmm14[1]
331 ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm7[0]
332 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1]
333 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm2[0]
334 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
335 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm13[0],xmm2[1]
336 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
337 ; SSE-NEXT: movapd 80(%rdi), %xmm13
338 ; SSE-NEXT: movapd 112(%rdi), %xmm4
339 ; SSE-NEXT: movapd %xmm4, %xmm2
340 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm13[0],xmm2[1]
341 ; SSE-NEXT: movapd 128(%rdi), %xmm0
342 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm0[0]
343 ; SSE-NEXT: movapd 96(%rdi), %xmm1
344 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
345 ; SSE-NEXT: movapd 144(%rdi), %xmm3
346 ; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm3[0]
347 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
348 ; SSE-NEXT: movapd %xmm2, 16(%rsi)
349 ; SSE-NEXT: movapd %xmm14, 48(%rsi)
350 ; SSE-NEXT: movapd %xmm15, (%rsi)
351 ; SSE-NEXT: movapd %xmm12, 32(%rsi)
352 ; SSE-NEXT: movapd %xmm13, 16(%rdx)
353 ; SSE-NEXT: movapd %xmm11, 48(%rdx)
354 ; SSE-NEXT: movapd %xmm9, (%rdx)
355 ; SSE-NEXT: movapd %xmm10, 32(%rdx)
356 ; SSE-NEXT: movapd %xmm0, 16(%rcx)
357 ; SSE-NEXT: movapd %xmm7, 48(%rcx)
358 ; SSE-NEXT: movapd %xmm8, (%rcx)
359 ; SSE-NEXT: movapd %xmm6, 32(%rcx)
360 ; SSE-NEXT: movapd %xmm1, 16(%r8)
361 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
362 ; SSE-NEXT: movaps %xmm0, 48(%r8)
363 ; SSE-NEXT: movapd %xmm5, (%r8)
364 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
365 ; SSE-NEXT: movaps %xmm0, 32(%r8)
366 ; SSE-NEXT: movapd %xmm3, 16(%r9)
367 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
368 ; SSE-NEXT: movaps %xmm0, 48(%r9)
369 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
370 ; SSE-NEXT: movaps %xmm0, (%r9)
371 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
372 ; SSE-NEXT: movaps %xmm0, 32(%r9)
375 ; AVX1-ONLY-LABEL: load_i64_stride5_vf8:
376 ; AVX1-ONLY: # %bb.0:
377 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm1
378 ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm0
379 ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm9
380 ; AVX1-ONLY-NEXT: vmovapd 96(%rdi), %ymm2
381 ; AVX1-ONLY-NEXT: vmovapd 64(%rdi), %ymm7
382 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm7[0,1,2],ymm2[3]
383 ; AVX1-ONLY-NEXT: vmovapd (%rdi), %xmm10
384 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm8
385 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %xmm4
386 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm11
387 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm5 = xmm10[0],xmm4[1]
388 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3]
389 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
390 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm9[0,1,2],ymm0[3]
391 ; AVX1-ONLY-NEXT: vmovapd %ymm0, %ymm3
392 ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm5
393 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm12
394 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm13 = xmm12[0],xmm5[1]
395 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm13[0,1],ymm6[2,3]
396 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm13
397 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm14
398 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm7 = ymm7[0],ymm14[0],ymm7[3],ymm14[2]
399 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
400 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3]
401 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm14
402 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm10
403 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[0],ymm10[0],ymm9[3],ymm10[2]
404 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm15
405 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = xmm12[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
406 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3]
407 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm12
408 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm10 = xmm12[0,1],xmm13[2,3]
409 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
410 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm8[0,1,2,3],xmm11[4,5,6,7]
411 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
412 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm11
413 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm13 = xmm11[0,1],xmm14[2,3]
414 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
415 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm14
416 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm15 = xmm14[0,1,2,3],xmm15[4,5,6,7]
417 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
418 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
419 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm12 = ymm12[0],ymm1[0],ymm12[3],ymm1[2]
420 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm15
421 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
422 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3]
423 ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm12
424 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
425 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[3],ymm12[2]
426 ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm0
427 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm14 = xmm14[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
428 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm14[0,1],ymm11[2,3]
429 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3]
430 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm2 = xmm4[0],xmm15[1]
431 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
432 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1,2],ymm12[3]
433 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
434 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
435 ; AVX1-ONLY-NEXT: vmovapd %ymm6, 32(%rsi)
436 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
437 ; AVX1-ONLY-NEXT: vmovaps %ymm2, (%rsi)
438 ; AVX1-ONLY-NEXT: vmovapd %ymm9, 32(%rdx)
439 ; AVX1-ONLY-NEXT: vmovapd %ymm7, (%rdx)
440 ; AVX1-ONLY-NEXT: vmovaps %ymm13, 32(%rcx)
441 ; AVX1-ONLY-NEXT: vmovaps %ymm10, (%rcx)
442 ; AVX1-ONLY-NEXT: vmovapd %ymm11, 32(%r8)
443 ; AVX1-ONLY-NEXT: vmovapd %ymm8, (%r8)
444 ; AVX1-ONLY-NEXT: vmovapd %ymm0, 32(%r9)
445 ; AVX1-ONLY-NEXT: vmovapd %ymm1, (%r9)
446 ; AVX1-ONLY-NEXT: vzeroupper
447 ; AVX1-ONLY-NEXT: retq
449 ; AVX2-ONLY-LABEL: load_i64_stride5_vf8:
450 ; AVX2-ONLY: # %bb.0:
451 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm9
452 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm11
453 ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm0
454 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm2
455 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm1
456 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm12
457 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm3
458 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm8
459 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5],ymm3[6,7]
460 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm13
461 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm5
462 ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm14
463 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm13[0,1],xmm5[2,3]
464 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
465 ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
466 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm12[0,1,2,3,4,5],ymm1[6,7]
467 ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %xmm6
468 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %xmm15
469 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm10 = xmm15[0,1],xmm6[2,3]
470 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5,6,7]
471 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm10
472 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = xmm13[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
473 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm8 = ymm8[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
474 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
475 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm13[0,1,2,3],ymm8[4,5,6,7]
476 ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm13
477 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = xmm15[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
478 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm12 = ymm12[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
479 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,2,1]
480 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
481 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5],mem[6,7]
482 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %xmm13
483 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm13 = xmm13[0,1],mem[2,3]
484 ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
485 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm11 = ymm11[2,3],ymm13[2,3]
486 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1],mem[2,3],ymm9[4,5],mem[6,7]
487 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %xmm13
488 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm13 = xmm13[0,1],mem[2,3]
489 ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
490 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm13[2,3]
491 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
492 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm14 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
493 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,1,0,3]
494 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
495 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm14
496 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm15 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
497 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,0,3]
498 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = mem[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
499 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm15[4,5,6,7]
500 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
501 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm10[2,3]
502 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
503 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
504 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm6[0,1],xmm14[2,3]
505 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
506 ; AVX2-ONLY-NEXT: vmovdqa %ymm7, 32(%rsi)
507 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
508 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rsi)
509 ; AVX2-ONLY-NEXT: vmovdqa %ymm12, 32(%rdx)
510 ; AVX2-ONLY-NEXT: vmovdqa %ymm8, (%rdx)
511 ; AVX2-ONLY-NEXT: vmovdqa %ymm9, 32(%rcx)
512 ; AVX2-ONLY-NEXT: vmovdqa %ymm11, (%rcx)
513 ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 32(%r8)
514 ; AVX2-ONLY-NEXT: vmovdqa %ymm13, (%r8)
515 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 32(%r9)
516 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, (%r9)
517 ; AVX2-ONLY-NEXT: vzeroupper
518 ; AVX2-ONLY-NEXT: retq
520 ; AVX512F-LABEL: load_i64_stride5_vf8:
522 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm0
523 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
524 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm2
525 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm3
526 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm4
527 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [12,1,6,0,12,1,6,0]
528 ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
529 ; AVX512F-NEXT: vpermi2q %zmm3, %zmm4, %zmm5
530 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15]
531 ; AVX512F-NEXT: vpermi2q %zmm2, %zmm1, %zmm6
532 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
533 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,11]
534 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm5, %zmm6
535 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = <1,6,11,u>
536 ; AVX512F-NEXT: vpermi2q %zmm2, %zmm1, %zmm5
537 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [5,10,15,0,5,10,15,0]
538 ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
539 ; AVX512F-NEXT: vpermi2q %zmm4, %zmm3, %zmm7
540 ; AVX512F-NEXT: movb $7, %al
541 ; AVX512F-NEXT: kmovw %eax, %k1
542 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm7 {%k1}
543 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,5,6,12]
544 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm7, %zmm5
545 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [6,11,0,1,6,11,0,1]
546 ; AVX512F-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
547 ; AVX512F-NEXT: vpermi2q %zmm4, %zmm3, %zmm7
548 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u>
549 ; AVX512F-NEXT: vpermi2q %zmm2, %zmm1, %zmm8
550 ; AVX512F-NEXT: movb $56, %al
551 ; AVX512F-NEXT: kmovw %eax, %k1
552 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
553 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,13]
554 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm8, %zmm7
555 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [7,12,0,2,7,12,0,2]
556 ; AVX512F-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
557 ; AVX512F-NEXT: vpermi2q %zmm4, %zmm3, %zmm8
558 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = <11,0,5,u>
559 ; AVX512F-NEXT: vpermi2q %zmm1, %zmm2, %zmm9
560 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm9 {%k1}
561 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,9,14]
562 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm9, %zmm8
563 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,0,11,0,5,0,11]
564 ; AVX512F-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
565 ; AVX512F-NEXT: vpermi2q %zmm3, %zmm4, %zmm9
566 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = <12,1,6,u>
567 ; AVX512F-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
568 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
569 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,10,15]
570 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm1
571 ; AVX512F-NEXT: vmovdqa64 %zmm6, (%rsi)
572 ; AVX512F-NEXT: vmovdqa64 %zmm5, (%rdx)
573 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rcx)
574 ; AVX512F-NEXT: vmovdqa64 %zmm8, (%r8)
575 ; AVX512F-NEXT: vmovdqa64 %zmm1, (%r9)
576 ; AVX512F-NEXT: vzeroupper
579 ; AVX512BW-LABEL: load_i64_stride5_vf8:
581 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm0
582 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
583 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm2
584 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm3
585 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm4
586 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [12,1,6,0,12,1,6,0]
587 ; AVX512BW-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
588 ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm4, %zmm5
589 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm6 = [0,5,10,15]
590 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm1, %zmm6
591 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
592 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,11]
593 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm5, %zmm6
594 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = <1,6,11,u>
595 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm1, %zmm5
596 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [5,10,15,0,5,10,15,0]
597 ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
598 ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm3, %zmm7
599 ; AVX512BW-NEXT: movb $7, %al
600 ; AVX512BW-NEXT: kmovd %eax, %k1
601 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm7 {%k1}
602 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,5,6,12]
603 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm7, %zmm5
604 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [6,11,0,1,6,11,0,1]
605 ; AVX512BW-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
606 ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm3, %zmm7
607 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u>
608 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm1, %zmm8
609 ; AVX512BW-NEXT: movb $56, %al
610 ; AVX512BW-NEXT: kmovd %eax, %k1
611 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm8 {%k1}
612 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,5,8,13]
613 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm8, %zmm7
614 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [7,12,0,2,7,12,0,2]
615 ; AVX512BW-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
616 ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm3, %zmm8
617 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm9 = <11,0,5,u>
618 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm2, %zmm9
619 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm9 {%k1}
620 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,9,14]
621 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm9, %zmm8
622 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [0,5,0,11,0,5,0,11]
623 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
624 ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm4, %zmm9
625 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = <12,1,6,u>
626 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
627 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm3 {%k1}
628 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,10,15]
629 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm1
630 ; AVX512BW-NEXT: vmovdqa64 %zmm6, (%rsi)
631 ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rdx)
632 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rcx)
633 ; AVX512BW-NEXT: vmovdqa64 %zmm8, (%r8)
634 ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%r9)
635 ; AVX512BW-NEXT: vzeroupper
636 ; AVX512BW-NEXT: retq
637 %wide.vec = load <40 x i64>, ptr %in.vec, align 64
638 %strided.vec0 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35>
639 %strided.vec1 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36>
640 %strided.vec2 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37>
641 %strided.vec3 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38>
642 %strided.vec4 = shufflevector <40 x i64> %wide.vec, <40 x i64> poison, <8 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39>
643 store <8 x i64> %strided.vec0, ptr %out.vec0, align 64
644 store <8 x i64> %strided.vec1, ptr %out.vec1, align 64
645 store <8 x i64> %strided.vec2, ptr %out.vec2, align 64
646 store <8 x i64> %strided.vec3, ptr %out.vec3, align 64
647 store <8 x i64> %strided.vec4, ptr %out.vec4, align 64
651 define void @load_i64_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
652 ; SSE-LABEL: load_i64_stride5_vf16:
654 ; SSE-NEXT: subq $280, %rsp # imm = 0x118
655 ; SSE-NEXT: movapd 224(%rdi), %xmm3
656 ; SSE-NEXT: movapd 144(%rdi), %xmm2
657 ; SSE-NEXT: movapd 64(%rdi), %xmm1
658 ; SSE-NEXT: movapd 176(%rdi), %xmm4
659 ; SSE-NEXT: movapd 96(%rdi), %xmm5
660 ; SSE-NEXT: movapd 208(%rdi), %xmm7
661 ; SSE-NEXT: movapd 128(%rdi), %xmm8
662 ; SSE-NEXT: movapd (%rdi), %xmm10
663 ; SSE-NEXT: movapd 16(%rdi), %xmm6
664 ; SSE-NEXT: movapd 32(%rdi), %xmm14
665 ; SSE-NEXT: movapd 48(%rdi), %xmm9
666 ; SSE-NEXT: movapd 160(%rdi), %xmm11
667 ; SSE-NEXT: movapd 192(%rdi), %xmm13
668 ; SSE-NEXT: movapd 80(%rdi), %xmm12
669 ; SSE-NEXT: movapd 112(%rdi), %xmm0
670 ; SSE-NEXT: movapd %xmm14, %xmm15
671 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm10[0],xmm15[1]
672 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
673 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm9[0]
674 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
675 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm6[0],xmm9[1]
676 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
677 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm1[0]
678 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
679 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm14[0],xmm1[1]
680 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
681 ; SSE-NEXT: movapd %xmm0, %xmm1
682 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm12[0],xmm1[1]
683 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
684 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm8[0]
685 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
686 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1]
687 ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
688 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm2[0]
689 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
690 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
691 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
692 ; SSE-NEXT: movapd %xmm13, %xmm0
693 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm11[0],xmm0[1]
694 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
695 ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm7[0]
696 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
697 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm4[0],xmm7[1]
698 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
699 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm3[0]
700 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
701 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm13[0],xmm3[1]
702 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
703 ; SSE-NEXT: movapd 240(%rdi), %xmm2
704 ; SSE-NEXT: movapd 272(%rdi), %xmm0
705 ; SSE-NEXT: movapd %xmm0, %xmm1
706 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
707 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
708 ; SSE-NEXT: movapd 288(%rdi), %xmm1
709 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
710 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
711 ; SSE-NEXT: movapd 256(%rdi), %xmm2
712 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
713 ; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill
714 ; SSE-NEXT: movapd 304(%rdi), %xmm1
715 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
716 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
717 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
718 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
719 ; SSE-NEXT: movapd 320(%rdi), %xmm15
720 ; SSE-NEXT: movapd 352(%rdi), %xmm0
721 ; SSE-NEXT: movapd %xmm0, %xmm1
722 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm15[0],xmm1[1]
723 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
724 ; SSE-NEXT: movapd 368(%rdi), %xmm1
725 ; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm1[0]
726 ; SSE-NEXT: movapd 336(%rdi), %xmm2
727 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
728 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
729 ; SSE-NEXT: movapd 384(%rdi), %xmm1
730 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
731 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
732 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
733 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
734 ; SSE-NEXT: movapd 400(%rdi), %xmm11
735 ; SSE-NEXT: movapd 432(%rdi), %xmm0
736 ; SSE-NEXT: movapd %xmm0, %xmm13
737 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm11[0],xmm13[1]
738 ; SSE-NEXT: movapd 448(%rdi), %xmm12
739 ; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm12[0]
740 ; SSE-NEXT: movapd 416(%rdi), %xmm14
741 ; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm14[0],xmm12[1]
742 ; SSE-NEXT: movapd 464(%rdi), %xmm1
743 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm1[0]
744 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
745 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
746 ; SSE-NEXT: movapd 480(%rdi), %xmm4
747 ; SSE-NEXT: movapd 512(%rdi), %xmm0
748 ; SSE-NEXT: movapd %xmm0, %xmm6
749 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm4[0],xmm6[1]
750 ; SSE-NEXT: movapd 528(%rdi), %xmm7
751 ; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm7[0]
752 ; SSE-NEXT: movapd 496(%rdi), %xmm9
753 ; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm9[0],xmm7[1]
754 ; SSE-NEXT: movapd 544(%rdi), %xmm10
755 ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm10[0]
756 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
757 ; SSE-NEXT: movapd 560(%rdi), %xmm0
758 ; SSE-NEXT: movapd 592(%rdi), %xmm5
759 ; SSE-NEXT: movapd %xmm5, %xmm2
760 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
761 ; SSE-NEXT: movapd 608(%rdi), %xmm1
762 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
763 ; SSE-NEXT: movapd 576(%rdi), %xmm3
764 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
765 ; SSE-NEXT: movapd 624(%rdi), %xmm8
766 ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm8[0]
767 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1]
768 ; SSE-NEXT: movapd %xmm6, 96(%rsi)
769 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
770 ; SSE-NEXT: movaps %xmm5, 32(%rsi)
771 ; SSE-NEXT: movapd %xmm2, 112(%rsi)
772 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
773 ; SSE-NEXT: movaps %xmm2, 48(%rsi)
774 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
775 ; SSE-NEXT: movaps %xmm2, 64(%rsi)
776 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
777 ; SSE-NEXT: movaps %xmm2, (%rsi)
778 ; SSE-NEXT: movapd %xmm13, 80(%rsi)
779 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
780 ; SSE-NEXT: movaps %xmm2, 16(%rsi)
781 ; SSE-NEXT: movapd %xmm4, 96(%rdx)
782 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
783 ; SSE-NEXT: movaps %xmm2, 32(%rdx)
784 ; SSE-NEXT: movapd %xmm0, 112(%rdx)
785 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
786 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
787 ; SSE-NEXT: movapd %xmm15, 64(%rdx)
788 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
789 ; SSE-NEXT: movaps %xmm0, (%rdx)
790 ; SSE-NEXT: movapd %xmm11, 80(%rdx)
791 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
792 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
793 ; SSE-NEXT: movapd %xmm7, 96(%rcx)
794 ; SSE-NEXT: movapd %xmm1, 112(%rcx)
795 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
796 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
797 ; SSE-NEXT: movapd %xmm12, 80(%rcx)
798 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
799 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
800 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
801 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
802 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
803 ; SSE-NEXT: movaps %xmm0, (%rcx)
804 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
805 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
806 ; SSE-NEXT: movapd %xmm3, 112(%r8)
807 ; SSE-NEXT: movapd %xmm9, 96(%r8)
808 ; SSE-NEXT: movapd %xmm14, 80(%r8)
809 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
810 ; SSE-NEXT: movaps %xmm0, 64(%r8)
811 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
812 ; SSE-NEXT: movaps %xmm0, 48(%r8)
813 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
814 ; SSE-NEXT: movaps %xmm0, 32(%r8)
815 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
816 ; SSE-NEXT: movaps %xmm0, 16(%r8)
817 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
818 ; SSE-NEXT: movaps %xmm0, (%r8)
819 ; SSE-NEXT: movapd %xmm8, 112(%r9)
820 ; SSE-NEXT: movapd %xmm10, 96(%r9)
821 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
822 ; SSE-NEXT: movaps %xmm0, 80(%r9)
823 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
824 ; SSE-NEXT: movaps %xmm0, 64(%r9)
825 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
826 ; SSE-NEXT: movaps %xmm0, 48(%r9)
827 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
828 ; SSE-NEXT: movaps %xmm0, 32(%r9)
829 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
830 ; SSE-NEXT: movaps %xmm0, 16(%r9)
831 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
832 ; SSE-NEXT: movaps %xmm0, (%r9)
833 ; SSE-NEXT: addq $280, %rsp # imm = 0x118
836 ; AVX1-ONLY-LABEL: load_i64_stride5_vf16:
837 ; AVX1-ONLY: # %bb.0:
838 ; AVX1-ONLY-NEXT: subq $360, %rsp # imm = 0x168
839 ; AVX1-ONLY-NEXT: vmovapd 96(%rdi), %ymm0
840 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
841 ; AVX1-ONLY-NEXT: vmovapd 64(%rdi), %ymm5
842 ; AVX1-ONLY-NEXT: vmovapd 576(%rdi), %ymm7
843 ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
844 ; AVX1-ONLY-NEXT: vmovapd 544(%rdi), %ymm1
845 ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm2
846 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
847 ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm3
848 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3]
849 ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm6
850 ; AVX1-ONLY-NEXT: vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
851 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm4
852 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm6 = xmm4[0],xmm6[1]
853 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3]
854 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
855 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm1[0,1,2],ymm7[3]
856 ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %xmm6
857 ; AVX1-ONLY-NEXT: vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
858 ; AVX1-ONLY-NEXT: vmovapd 480(%rdi), %xmm11
859 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm8 = xmm11[0],xmm6[1]
860 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3]
861 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
862 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm5[0,1,2],ymm0[3]
863 ; AVX1-ONLY-NEXT: vmovapd (%rdi), %xmm2
864 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %xmm0
865 ; AVX1-ONLY-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
866 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm10 = xmm2[0],xmm0[1]
867 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm10[0,1],ymm9[2,3]
868 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
869 ; AVX1-ONLY-NEXT: vmovapd 416(%rdi), %ymm0
870 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
871 ; AVX1-ONLY-NEXT: vmovapd 384(%rdi), %ymm14
872 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm15 = ymm14[0,1,2],ymm0[3]
873 ; AVX1-ONLY-NEXT: vmovapd 352(%rdi), %xmm6
874 ; AVX1-ONLY-NEXT: vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
875 ; AVX1-ONLY-NEXT: vmovapd 320(%rdi), %xmm0
876 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm13 = xmm0[0],xmm6[1]
877 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm13[0,1],ymm15[2,3]
878 ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
879 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm13
880 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm15
881 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm15[0],ymm3[3],ymm15[2]
882 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm15
883 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
884 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3]
885 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
886 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm3
887 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm4
888 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[3],ymm4[2]
889 ; AVX1-ONLY-NEXT: vmovdqa 528(%rdi), %xmm4
890 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm11 = xmm11[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
891 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm11[0,1],ymm1[2,3]
892 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
893 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm6
894 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm11
895 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0],ymm11[0],ymm5[3],ymm11[2]
896 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm11
897 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
898 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm5[2,3]
899 ; AVX1-ONLY-NEXT: vmovupd %ymm1, (%rsp) # 32-byte Spill
900 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm2
901 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm5
902 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm14[0],ymm5[0],ymm14[3],ymm5[2]
903 ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm14
904 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
905 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3]
906 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
907 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm0
908 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm13 = xmm0[0,1],xmm13[2,3]
909 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
910 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm9
911 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm15 = xmm9[0,1,2,3],xmm15[4,5,6,7]
912 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm13[4,5,6,7]
913 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
914 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm5
915 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
916 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
917 ; AVX1-ONLY-NEXT: vmovdqa 496(%rdi), %xmm7
918 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4,5,6,7]
919 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm4[0,1,2,3],ymm3[4,5,6,7]
920 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm4
921 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm6[2,3]
922 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
923 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm10
924 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm10[0,1,2,3],xmm11[4,5,6,7]
925 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm13 = ymm11[0,1,2,3],ymm3[4,5,6,7]
926 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm6
927 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm2 = xmm6[0,1],xmm2[2,3]
928 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
929 ; AVX1-ONLY-NEXT: vmovdqa 336(%rdi), %xmm8
930 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1,2,3],xmm14[4,5,6,7]
931 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm2[4,5,6,7]
932 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
933 ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm14
934 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm14[0],ymm0[3],ymm14[2]
935 ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm3
936 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
937 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = ymm9[0,1],ymm0[2,3]
938 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
939 ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm5
940 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[3],ymm5[2]
941 ; AVX1-ONLY-NEXT: vmovdqa 544(%rdi), %xmm2
942 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
943 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm7[0,1],ymm0[2,3]
944 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
945 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm4
946 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[3],ymm4[2]
947 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1
948 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
949 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm0[2,3]
950 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
951 ; AVX1-ONLY-NEXT: vmovapd 448(%rdi), %ymm10
952 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0],ymm10[0],ymm6[3],ymm10[2]
953 ; AVX1-ONLY-NEXT: vmovdqa 384(%rdi), %xmm0
954 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
955 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3]
956 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
957 ; AVX1-ONLY-NEXT: # ymm8 = mem[0,1,2],ymm14[3]
958 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
959 ; AVX1-ONLY-NEXT: # xmm3 = mem[0,1,2,3],xmm3[4,5,6,7]
960 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm8[2,3]
961 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
962 ; AVX1-ONLY-NEXT: # ymm5 = mem[0,1,2],ymm5[3]
963 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
964 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm2[4,5,6,7]
965 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3]
966 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
967 ; AVX1-ONLY-NEXT: # ymm4 = mem[0,1,2],ymm4[3]
968 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
969 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7]
970 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3]
971 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm4 # 32-byte Folded Reload
972 ; AVX1-ONLY-NEXT: # ymm4 = mem[0,1,2],ymm10[3]
973 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
974 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7]
975 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3]
976 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
977 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rsi)
978 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
979 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rsi)
980 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
981 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rsi)
982 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
983 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rsi)
984 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
985 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
986 ; AVX1-ONLY-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
987 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rdx)
988 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
989 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rdx)
990 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
991 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rdx)
992 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 64(%rcx)
993 ; AVX1-ONLY-NEXT: vmovaps %ymm13, (%rcx)
994 ; AVX1-ONLY-NEXT: vmovaps %ymm15, 96(%rcx)
995 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
996 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rcx)
997 ; AVX1-ONLY-NEXT: vmovapd %ymm6, 64(%r8)
998 ; AVX1-ONLY-NEXT: vmovapd %ymm7, (%r8)
999 ; AVX1-ONLY-NEXT: vmovapd %ymm9, 96(%r8)
1000 ; AVX1-ONLY-NEXT: vmovapd %ymm12, 32(%r8)
1001 ; AVX1-ONLY-NEXT: vmovapd %ymm0, 64(%r9)
1002 ; AVX1-ONLY-NEXT: vmovapd %ymm1, (%r9)
1003 ; AVX1-ONLY-NEXT: vmovapd %ymm2, 96(%r9)
1004 ; AVX1-ONLY-NEXT: vmovapd %ymm3, 32(%r9)
1005 ; AVX1-ONLY-NEXT: addq $360, %rsp # imm = 0x168
1006 ; AVX1-ONLY-NEXT: vzeroupper
1007 ; AVX1-ONLY-NEXT: retq
1009 ; AVX2-ONLY-LABEL: load_i64_stride5_vf16:
1010 ; AVX2-ONLY: # %bb.0:
1011 ; AVX2-ONLY-NEXT: subq $360, %rsp # imm = 0x168
1012 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm14
1013 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm4
1014 ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %ymm12
1015 ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %ymm1
1016 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm11
1017 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm2
1018 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm2[0,1,2,3,4,5],ymm11[6,7]
1019 ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %xmm0
1020 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1021 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %xmm5
1022 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm8 = xmm5[0,1],xmm0[2,3]
1023 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm3[4,5,6,7]
1024 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1025 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm12[6,7]
1026 ; AVX2-ONLY-NEXT: vmovdqa 512(%rdi), %xmm0
1027 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1028 ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %xmm8
1029 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm9 = xmm8[0,1],xmm0[2,3]
1030 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm3[4,5,6,7]
1031 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1032 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm14[6,7]
1033 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm10
1034 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm0
1035 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1036 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm0[2,3]
1037 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm3[4,5,6,7]
1038 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1039 ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm9
1040 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm3
1041 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm3[0,1,2,3,4,5],ymm9[6,7]
1042 ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %xmm6
1043 ; AVX2-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1044 ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %xmm0
1045 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm15 = xmm0[0,1],xmm6[2,3]
1046 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm13[4,5,6,7]
1047 ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1048 ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm13
1049 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
1050 ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm15
1051 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
1052 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
1053 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
1054 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1055 ; AVX2-ONLY-NEXT: vmovdqa 528(%rdi), %xmm2
1056 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm8[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
1057 ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm8
1058 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
1059 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
1060 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
1061 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1062 ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm1
1063 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
1064 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm7
1065 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm4[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
1066 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
1067 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm4[4,5,6,7]
1068 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1069 ; AVX2-ONLY-NEXT: vmovdqa 368(%rdi), %xmm4
1070 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
1071 ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm6
1072 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
1073 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
1074 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm3[4,5,6,7]
1075 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1076 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %ymm3
1077 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3],ymm3[4,5],mem[6,7]
1078 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %xmm4
1079 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
1080 ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
1081 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm4[2,3]
1082 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1083 ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %ymm3
1084 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3],ymm3[4,5],mem[6,7]
1085 ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %xmm4
1086 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
1087 ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
1088 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm4[2,3]
1089 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1090 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %ymm3
1091 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3],ymm3[4,5],mem[6,7]
1092 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %xmm4
1093 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
1094 ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
1095 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm4[2,3]
1096 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
1097 ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %ymm3
1098 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3],ymm3[4,5],mem[6,7]
1099 ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %xmm4
1100 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
1101 ; AVX2-ONLY-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
1102 ; AVX2-ONLY-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm3[2,3],ymm4[2,3]
1103 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1104 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm11[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
1105 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3]
1106 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm4
1107 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
1108 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm3[4,5,6,7]
1109 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1110 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm12[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
1111 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3]
1112 ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %xmm2
1113 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
1114 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm3[4,5,6,7]
1115 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm14[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm14[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
1116 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3]
1117 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1
1118 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm13 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
1119 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm3[4,5,6,7]
1120 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm13 = ymm9[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
1121 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,0,3]
1122 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %xmm0
1123 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm10 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
1124 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4,5,6,7]
1125 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm15[6,7]
1126 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
1127 ; AVX2-ONLY-NEXT: # xmm4 = mem[0,1],xmm4[2,3]
1128 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm11[4,5,6,7]
1129 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5],ymm8[6,7]
1130 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
1131 ; AVX2-ONLY-NEXT: # xmm2 = mem[0,1],xmm2[2,3]
1132 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5,6,7]
1133 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm14[0,1,2,3,4,5],ymm7[6,7]
1134 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
1135 ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm1[2,3]
1136 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
1137 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3,4,5],ymm6[6,7]
1138 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
1139 ; AVX2-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
1140 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
1141 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1142 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rsi)
1143 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1144 ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rsi)
1145 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1146 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rsi)
1147 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1148 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rsi)
1149 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1150 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rdx)
1151 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1152 ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rdx)
1153 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1154 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rdx)
1155 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1156 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rdx)
1157 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1158 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 64(%rcx)
1159 ; AVX2-ONLY-NEXT: vmovups (%rsp), %ymm6 # 32-byte Reload
1160 ; AVX2-ONLY-NEXT: vmovaps %ymm6, (%rcx)
1161 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1162 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 96(%rcx)
1163 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1164 ; AVX2-ONLY-NEXT: vmovaps %ymm6, 32(%rcx)
1165 ; AVX2-ONLY-NEXT: vmovdqa %ymm10, 64(%r8)
1166 ; AVX2-ONLY-NEXT: vmovdqa %ymm3, (%r8)
1167 ; AVX2-ONLY-NEXT: vmovdqa %ymm5, 96(%r8)
1168 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
1169 ; AVX2-ONLY-NEXT: vmovaps %ymm3, 32(%r8)
1170 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 64(%r9)
1171 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, (%r9)
1172 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, 96(%r9)
1173 ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 32(%r9)
1174 ; AVX2-ONLY-NEXT: addq $360, %rsp # imm = 0x168
1175 ; AVX2-ONLY-NEXT: vzeroupper
1176 ; AVX2-ONLY-NEXT: retq
1178 ; AVX512F-LABEL: load_i64_stride5_vf16:
1180 ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm2
1181 ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm0
1182 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm3
1183 ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm5
1184 ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm4
1185 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm6
1186 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm8
1187 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm1
1188 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm11
1189 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm9
1190 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [12,1,6,0,12,1,6,0]
1191 ; AVX512F-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
1192 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm7
1193 ; AVX512F-NEXT: vpermt2q %zmm11, %zmm10, %zmm7
1194 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm12 = [0,5,10,15]
1195 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm13
1196 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
1197 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm13[0,1,2,3],zmm7[4,5,6,7]
1198 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm13 = [0,1,2,3,4,5,6,11]
1199 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm13, %zmm7
1200 ; AVX512F-NEXT: vpermi2q %zmm5, %zmm4, %zmm10
1201 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm12
1202 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm12[0,1,2,3],zmm10[4,5,6,7]
1203 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm13, %zmm10
1204 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm14 = <1,6,11,u>
1205 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm15
1206 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm14, %zmm15
1207 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [5,10,15,0,5,10,15,0]
1208 ; AVX512F-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
1209 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm12
1210 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm13, %zmm12
1211 ; AVX512F-NEXT: movb $7, %al
1212 ; AVX512F-NEXT: kmovw %eax, %k1
1213 ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm12 {%k1}
1214 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,6,12]
1215 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm15, %zmm12
1216 ; AVX512F-NEXT: vpermi2q %zmm4, %zmm5, %zmm13
1217 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm14
1218 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm13 {%k1}
1219 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm15, %zmm13
1220 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [6,11,0,1,6,11,0,1]
1221 ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
1222 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm15
1223 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm14, %zmm15
1224 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm16 = <2,7,12,u>
1225 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm17
1226 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm16, %zmm17
1227 ; AVX512F-NEXT: movb $56, %al
1228 ; AVX512F-NEXT: kmovw %eax, %k1
1229 ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
1230 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,8,13]
1231 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm15, %zmm17
1232 ; AVX512F-NEXT: vpermi2q %zmm4, %zmm5, %zmm14
1233 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm3, %zmm16
1234 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm16 {%k1}
1235 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm15, %zmm16
1236 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [7,12,0,2,7,12,0,2]
1237 ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
1238 ; AVX512F-NEXT: vmovdqa64 %zmm11, %zmm15
1239 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm14, %zmm15
1240 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm18 = <11,0,5,u>
1241 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm19
1242 ; AVX512F-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
1243 ; AVX512F-NEXT: vmovdqa64 %zmm15, %zmm19 {%k1}
1244 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,9,14]
1245 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm15, %zmm19
1246 ; AVX512F-NEXT: vpermi2q %zmm4, %zmm5, %zmm14
1247 ; AVX512F-NEXT: vpermi2q %zmm3, %zmm0, %zmm18
1248 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm18 {%k1}
1249 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm15, %zmm18
1250 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [0,5,0,11,0,5,0,11]
1251 ; AVX512F-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
1252 ; AVX512F-NEXT: vpermt2q %zmm11, %zmm14, %zmm9
1253 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = <12,1,6,u>
1254 ; AVX512F-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
1255 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm1 {%k1}
1256 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,10,15]
1257 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm8, %zmm1
1258 ; AVX512F-NEXT: vpermt2q %zmm5, %zmm14, %zmm4
1259 ; AVX512F-NEXT: vpermt2q %zmm3, %zmm11, %zmm0
1260 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
1261 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm8, %zmm0
1262 ; AVX512F-NEXT: vmovdqa64 %zmm10, 64(%rsi)
1263 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%rsi)
1264 ; AVX512F-NEXT: vmovdqa64 %zmm13, 64(%rdx)
1265 ; AVX512F-NEXT: vmovdqa64 %zmm12, (%rdx)
1266 ; AVX512F-NEXT: vmovdqa64 %zmm16, 64(%rcx)
1267 ; AVX512F-NEXT: vmovdqa64 %zmm17, (%rcx)
1268 ; AVX512F-NEXT: vmovdqa64 %zmm18, 64(%r8)
1269 ; AVX512F-NEXT: vmovdqa64 %zmm19, (%r8)
1270 ; AVX512F-NEXT: vmovdqa64 %zmm0, 64(%r9)
1271 ; AVX512F-NEXT: vmovdqa64 %zmm1, (%r9)
1272 ; AVX512F-NEXT: vzeroupper
1273 ; AVX512F-NEXT: retq
1275 ; AVX512BW-LABEL: load_i64_stride5_vf16:
1276 ; AVX512BW: # %bb.0:
1277 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm2
1278 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm0
1279 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm3
1280 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm5
1281 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm4
1282 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm6
1283 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm8
1284 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
1285 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm11
1286 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm9
1287 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [12,1,6,0,12,1,6,0]
1288 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
1289 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm7
1290 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm10, %zmm7
1291 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm12 = [0,5,10,15]
1292 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm13
1293 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
1294 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm13[0,1,2,3],zmm7[4,5,6,7]
1295 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm13 = [0,1,2,3,4,5,6,11]
1296 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm13, %zmm7
1297 ; AVX512BW-NEXT: vpermi2q %zmm5, %zmm4, %zmm10
1298 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm12
1299 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm12[0,1,2,3],zmm10[4,5,6,7]
1300 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm13, %zmm10
1301 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm14 = <1,6,11,u>
1302 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm15
1303 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm14, %zmm15
1304 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm13 = [5,10,15,0,5,10,15,0]
1305 ; AVX512BW-NEXT: # zmm13 = mem[0,1,2,3,0,1,2,3]
1306 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm12
1307 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm13, %zmm12
1308 ; AVX512BW-NEXT: movb $7, %al
1309 ; AVX512BW-NEXT: kmovd %eax, %k1
1310 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm12 {%k1}
1311 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,6,12]
1312 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm15, %zmm12
1313 ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm5, %zmm13
1314 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm14
1315 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm13 {%k1}
1316 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm15, %zmm13
1317 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [6,11,0,1,6,11,0,1]
1318 ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
1319 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm15
1320 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm14, %zmm15
1321 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm16 = <2,7,12,u>
1322 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm17
1323 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm16, %zmm17
1324 ; AVX512BW-NEXT: movb $56, %al
1325 ; AVX512BW-NEXT: kmovd %eax, %k1
1326 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
1327 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,8,13]
1328 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm15, %zmm17
1329 ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm5, %zmm14
1330 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm3, %zmm16
1331 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm16 {%k1}
1332 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm15, %zmm16
1333 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [7,12,0,2,7,12,0,2]
1334 ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
1335 ; AVX512BW-NEXT: vmovdqa64 %zmm11, %zmm15
1336 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm14, %zmm15
1337 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm18 = <11,0,5,u>
1338 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm19
1339 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm18, %zmm19
1340 ; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm19 {%k1}
1341 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [0,1,2,3,4,5,9,14]
1342 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm15, %zmm19
1343 ; AVX512BW-NEXT: vpermi2q %zmm4, %zmm5, %zmm14
1344 ; AVX512BW-NEXT: vpermi2q %zmm3, %zmm0, %zmm18
1345 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm18 {%k1}
1346 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm15, %zmm18
1347 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [0,5,0,11,0,5,0,11]
1348 ; AVX512BW-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3]
1349 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm14, %zmm9
1350 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm11 = <12,1,6,u>
1351 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
1352 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm1 {%k1}
1353 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,10,15]
1354 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm8, %zmm1
1355 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm14, %zmm4
1356 ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm11, %zmm0
1357 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
1358 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm8, %zmm0
1359 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 64(%rsi)
1360 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rsi)
1361 ; AVX512BW-NEXT: vmovdqa64 %zmm13, 64(%rdx)
1362 ; AVX512BW-NEXT: vmovdqa64 %zmm12, (%rdx)
1363 ; AVX512BW-NEXT: vmovdqa64 %zmm16, 64(%rcx)
1364 ; AVX512BW-NEXT: vmovdqa64 %zmm17, (%rcx)
1365 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 64(%r8)
1366 ; AVX512BW-NEXT: vmovdqa64 %zmm19, (%r8)
1367 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%r9)
1368 ; AVX512BW-NEXT: vmovdqa64 %zmm1, (%r9)
1369 ; AVX512BW-NEXT: vzeroupper
1370 ; AVX512BW-NEXT: retq
1371 %wide.vec = load <80 x i64>, ptr %in.vec, align 64
1372 %strided.vec0 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75>
1373 %strided.vec1 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76>
1374 %strided.vec2 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77>
1375 %strided.vec3 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78>
1376 %strided.vec4 = shufflevector <80 x i64> %wide.vec, <80 x i64> poison, <16 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79>
1377 store <16 x i64> %strided.vec0, ptr %out.vec0, align 64
1378 store <16 x i64> %strided.vec1, ptr %out.vec1, align 64
1379 store <16 x i64> %strided.vec2, ptr %out.vec2, align 64
1380 store <16 x i64> %strided.vec3, ptr %out.vec3, align 64
1381 store <16 x i64> %strided.vec4, ptr %out.vec4, align 64
1385 define void @load_i64_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
1386 ; SSE-LABEL: load_i64_stride5_vf32:
1388 ; SSE-NEXT: subq $920, %rsp # imm = 0x398
1389 ; SSE-NEXT: movapd 224(%rdi), %xmm5
1390 ; SSE-NEXT: movapd 144(%rdi), %xmm4
1391 ; SSE-NEXT: movapd 64(%rdi), %xmm3
1392 ; SSE-NEXT: movapd 176(%rdi), %xmm7
1393 ; SSE-NEXT: movapd 96(%rdi), %xmm6
1394 ; SSE-NEXT: movapd 208(%rdi), %xmm9
1395 ; SSE-NEXT: movapd 128(%rdi), %xmm10
1396 ; SSE-NEXT: movapd (%rdi), %xmm12
1397 ; SSE-NEXT: movapd 16(%rdi), %xmm8
1398 ; SSE-NEXT: movapd 32(%rdi), %xmm1
1399 ; SSE-NEXT: movapd 48(%rdi), %xmm11
1400 ; SSE-NEXT: movapd 160(%rdi), %xmm13
1401 ; SSE-NEXT: movapd 192(%rdi), %xmm0
1402 ; SSE-NEXT: movapd 80(%rdi), %xmm14
1403 ; SSE-NEXT: movapd 112(%rdi), %xmm2
1404 ; SSE-NEXT: movapd %xmm1, %xmm15
1405 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm12[0],xmm15[1]
1406 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1407 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm11[0]
1408 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1409 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm8[0],xmm11[1]
1410 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1411 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm3[0]
1412 ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1413 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
1414 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1415 ; SSE-NEXT: movapd %xmm2, %xmm1
1416 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm14[0],xmm1[1]
1417 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1418 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm10[0]
1419 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1420 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm6[0],xmm10[1]
1421 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1422 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm4[0]
1423 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1424 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
1425 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1426 ; SSE-NEXT: movapd %xmm0, %xmm1
1427 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm13[0],xmm1[1]
1428 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1429 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm9[0]
1430 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1431 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm7[0],xmm9[1]
1432 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1433 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm5[0]
1434 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1435 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
1436 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1437 ; SSE-NEXT: movapd 240(%rdi), %xmm2
1438 ; SSE-NEXT: movapd 272(%rdi), %xmm0
1439 ; SSE-NEXT: movapd %xmm0, %xmm1
1440 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1441 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1442 ; SSE-NEXT: movapd 288(%rdi), %xmm1
1443 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1444 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1445 ; SSE-NEXT: movapd 256(%rdi), %xmm2
1446 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1447 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1448 ; SSE-NEXT: movapd 304(%rdi), %xmm1
1449 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1450 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1451 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1452 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1453 ; SSE-NEXT: movapd 320(%rdi), %xmm2
1454 ; SSE-NEXT: movapd 352(%rdi), %xmm0
1455 ; SSE-NEXT: movapd %xmm0, %xmm1
1456 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1457 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1458 ; SSE-NEXT: movapd 368(%rdi), %xmm1
1459 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1460 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1461 ; SSE-NEXT: movapd 336(%rdi), %xmm2
1462 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1463 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1464 ; SSE-NEXT: movapd 384(%rdi), %xmm1
1465 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1466 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1467 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1468 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1469 ; SSE-NEXT: movapd 400(%rdi), %xmm2
1470 ; SSE-NEXT: movapd 432(%rdi), %xmm0
1471 ; SSE-NEXT: movapd %xmm0, %xmm1
1472 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1473 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1474 ; SSE-NEXT: movapd 448(%rdi), %xmm1
1475 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1476 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1477 ; SSE-NEXT: movapd 416(%rdi), %xmm2
1478 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1479 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1480 ; SSE-NEXT: movapd 464(%rdi), %xmm1
1481 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1482 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1483 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1484 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1485 ; SSE-NEXT: movapd 480(%rdi), %xmm2
1486 ; SSE-NEXT: movapd 512(%rdi), %xmm0
1487 ; SSE-NEXT: movapd %xmm0, %xmm1
1488 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1489 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1490 ; SSE-NEXT: movapd 528(%rdi), %xmm1
1491 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1492 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1493 ; SSE-NEXT: movapd 496(%rdi), %xmm2
1494 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1495 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1496 ; SSE-NEXT: movapd 544(%rdi), %xmm1
1497 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1498 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1499 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1500 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1501 ; SSE-NEXT: movapd 560(%rdi), %xmm2
1502 ; SSE-NEXT: movapd 592(%rdi), %xmm0
1503 ; SSE-NEXT: movapd %xmm0, %xmm1
1504 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1505 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1506 ; SSE-NEXT: movapd 608(%rdi), %xmm1
1507 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1508 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1509 ; SSE-NEXT: movapd 576(%rdi), %xmm2
1510 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1511 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1512 ; SSE-NEXT: movapd 624(%rdi), %xmm1
1513 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1514 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1515 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1516 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1517 ; SSE-NEXT: movapd 640(%rdi), %xmm2
1518 ; SSE-NEXT: movapd 672(%rdi), %xmm0
1519 ; SSE-NEXT: movapd %xmm0, %xmm1
1520 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1521 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1522 ; SSE-NEXT: movapd 688(%rdi), %xmm1
1523 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1524 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1525 ; SSE-NEXT: movapd 656(%rdi), %xmm2
1526 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1527 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1528 ; SSE-NEXT: movapd 704(%rdi), %xmm1
1529 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1530 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1531 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1532 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1533 ; SSE-NEXT: movapd 720(%rdi), %xmm2
1534 ; SSE-NEXT: movapd 752(%rdi), %xmm0
1535 ; SSE-NEXT: movapd %xmm0, %xmm1
1536 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1537 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1538 ; SSE-NEXT: movapd 768(%rdi), %xmm1
1539 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1540 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1541 ; SSE-NEXT: movapd 736(%rdi), %xmm2
1542 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1543 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1544 ; SSE-NEXT: movapd 784(%rdi), %xmm1
1545 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1546 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1547 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1548 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1549 ; SSE-NEXT: movapd 800(%rdi), %xmm2
1550 ; SSE-NEXT: movapd 832(%rdi), %xmm0
1551 ; SSE-NEXT: movapd %xmm0, %xmm1
1552 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1553 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1554 ; SSE-NEXT: movapd 848(%rdi), %xmm1
1555 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1556 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1557 ; SSE-NEXT: movapd 816(%rdi), %xmm2
1558 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1559 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1560 ; SSE-NEXT: movapd 864(%rdi), %xmm1
1561 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1562 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1563 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1564 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1565 ; SSE-NEXT: movapd 880(%rdi), %xmm2
1566 ; SSE-NEXT: movapd 912(%rdi), %xmm0
1567 ; SSE-NEXT: movapd %xmm0, %xmm1
1568 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1569 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1570 ; SSE-NEXT: movapd 928(%rdi), %xmm1
1571 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1572 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1573 ; SSE-NEXT: movapd 896(%rdi), %xmm2
1574 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
1575 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1576 ; SSE-NEXT: movapd 944(%rdi), %xmm1
1577 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1578 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1579 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1580 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1581 ; SSE-NEXT: movapd 960(%rdi), %xmm10
1582 ; SSE-NEXT: movapd 992(%rdi), %xmm0
1583 ; SSE-NEXT: movapd %xmm0, %xmm14
1584 ; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm10[0],xmm14[1]
1585 ; SSE-NEXT: movapd 1008(%rdi), %xmm15
1586 ; SSE-NEXT: shufpd {{.*#+}} xmm10 = xmm10[1],xmm15[0]
1587 ; SSE-NEXT: movapd 976(%rdi), %xmm2
1588 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm2[0],xmm15[1]
1589 ; SSE-NEXT: movapd 1024(%rdi), %xmm1
1590 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1591 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1592 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1593 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1594 ; SSE-NEXT: movapd 1040(%rdi), %xmm8
1595 ; SSE-NEXT: movapd 1072(%rdi), %xmm0
1596 ; SSE-NEXT: movapd %xmm0, %xmm9
1597 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm8[0],xmm9[1]
1598 ; SSE-NEXT: movapd 1088(%rdi), %xmm11
1599 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm11[0]
1600 ; SSE-NEXT: movapd 1056(%rdi), %xmm2
1601 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm2[0],xmm11[1]
1602 ; SSE-NEXT: movapd 1104(%rdi), %xmm1
1603 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
1604 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1605 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1606 ; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill
1607 ; SSE-NEXT: movapd 1120(%rdi), %xmm5
1608 ; SSE-NEXT: movapd 1152(%rdi), %xmm1
1609 ; SSE-NEXT: movapd %xmm1, %xmm3
1610 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm5[0],xmm3[1]
1611 ; SSE-NEXT: movapd 1168(%rdi), %xmm6
1612 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm6[0]
1613 ; SSE-NEXT: movapd 1136(%rdi), %xmm12
1614 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm12[0],xmm6[1]
1615 ; SSE-NEXT: movapd 1184(%rdi), %xmm0
1616 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm0[0]
1617 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1618 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1619 ; SSE-NEXT: movapd 1200(%rdi), %xmm0
1620 ; SSE-NEXT: movapd 1232(%rdi), %xmm2
1621 ; SSE-NEXT: movapd %xmm2, %xmm1
1622 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1623 ; SSE-NEXT: movapd 1248(%rdi), %xmm4
1624 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm4[0]
1625 ; SSE-NEXT: movapd 1216(%rdi), %xmm7
1626 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm7[0],xmm4[1]
1627 ; SSE-NEXT: movapd 1264(%rdi), %xmm13
1628 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm13[0]
1629 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm2[0],xmm13[1]
1630 ; SSE-NEXT: movapd %xmm3, 224(%rsi)
1631 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1632 ; SSE-NEXT: movaps %xmm2, 160(%rsi)
1633 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1634 ; SSE-NEXT: movaps %xmm2, 96(%rsi)
1635 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1636 ; SSE-NEXT: movaps %xmm2, 32(%rsi)
1637 ; SSE-NEXT: movapd %xmm1, 240(%rsi)
1638 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1639 ; SSE-NEXT: movaps %xmm1, 176(%rsi)
1640 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1641 ; SSE-NEXT: movaps %xmm1, 112(%rsi)
1642 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1643 ; SSE-NEXT: movaps %xmm1, 48(%rsi)
1644 ; SSE-NEXT: movapd %xmm14, 192(%rsi)
1645 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1646 ; SSE-NEXT: movaps %xmm1, 128(%rsi)
1647 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1648 ; SSE-NEXT: movaps %xmm1, 64(%rsi)
1649 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1650 ; SSE-NEXT: movaps %xmm1, (%rsi)
1651 ; SSE-NEXT: movapd %xmm9, 208(%rsi)
1652 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1653 ; SSE-NEXT: movaps %xmm1, 144(%rsi)
1654 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1655 ; SSE-NEXT: movaps %xmm1, 80(%rsi)
1656 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1657 ; SSE-NEXT: movaps %xmm1, 16(%rsi)
1658 ; SSE-NEXT: movapd %xmm5, 224(%rdx)
1659 ; SSE-NEXT: movapd %xmm0, 240(%rdx)
1660 ; SSE-NEXT: movapd %xmm10, 192(%rdx)
1661 ; SSE-NEXT: movapd %xmm8, 208(%rdx)
1662 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1663 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
1664 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1665 ; SSE-NEXT: movaps %xmm0, 176(%rdx)
1666 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1667 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
1668 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1669 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
1670 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1671 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
1672 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1673 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
1674 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1675 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
1676 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1677 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
1678 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1679 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
1680 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1681 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
1682 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1683 ; SSE-NEXT: movaps %xmm0, (%rdx)
1684 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1685 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
1686 ; SSE-NEXT: movapd %xmm4, 240(%rcx)
1687 ; SSE-NEXT: movapd %xmm6, 224(%rcx)
1688 ; SSE-NEXT: movapd %xmm11, 208(%rcx)
1689 ; SSE-NEXT: movapd %xmm15, 192(%rcx)
1690 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1691 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
1692 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1693 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
1694 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1695 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
1696 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1697 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
1698 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1699 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
1700 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1701 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
1702 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1703 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
1704 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1705 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
1706 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1707 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
1708 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1709 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
1710 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1711 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
1712 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1713 ; SSE-NEXT: movaps %xmm0, (%rcx)
1714 ; SSE-NEXT: movapd %xmm7, 240(%r8)
1715 ; SSE-NEXT: movapd %xmm12, 224(%r8)
1716 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1717 ; SSE-NEXT: movaps %xmm0, 208(%r8)
1718 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1719 ; SSE-NEXT: movaps %xmm0, 192(%r8)
1720 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1721 ; SSE-NEXT: movaps %xmm0, 176(%r8)
1722 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1723 ; SSE-NEXT: movaps %xmm0, 160(%r8)
1724 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1725 ; SSE-NEXT: movaps %xmm0, 144(%r8)
1726 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1727 ; SSE-NEXT: movaps %xmm0, 128(%r8)
1728 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1729 ; SSE-NEXT: movaps %xmm0, 112(%r8)
1730 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1731 ; SSE-NEXT: movaps %xmm0, 96(%r8)
1732 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1733 ; SSE-NEXT: movaps %xmm0, 80(%r8)
1734 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1735 ; SSE-NEXT: movaps %xmm0, 64(%r8)
1736 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1737 ; SSE-NEXT: movaps %xmm0, 48(%r8)
1738 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1739 ; SSE-NEXT: movaps %xmm0, 32(%r8)
1740 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1741 ; SSE-NEXT: movaps %xmm0, 16(%r8)
1742 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1743 ; SSE-NEXT: movaps %xmm0, (%r8)
1744 ; SSE-NEXT: movapd %xmm13, 240(%r9)
1745 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1746 ; SSE-NEXT: movaps %xmm0, 224(%r9)
1747 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1748 ; SSE-NEXT: movaps %xmm0, 208(%r9)
1749 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1750 ; SSE-NEXT: movaps %xmm0, 192(%r9)
1751 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1752 ; SSE-NEXT: movaps %xmm0, 176(%r9)
1753 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1754 ; SSE-NEXT: movaps %xmm0, 160(%r9)
1755 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1756 ; SSE-NEXT: movaps %xmm0, 144(%r9)
1757 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1758 ; SSE-NEXT: movaps %xmm0, 128(%r9)
1759 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1760 ; SSE-NEXT: movaps %xmm0, 112(%r9)
1761 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1762 ; SSE-NEXT: movaps %xmm0, 96(%r9)
1763 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1764 ; SSE-NEXT: movaps %xmm0, 80(%r9)
1765 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1766 ; SSE-NEXT: movaps %xmm0, 64(%r9)
1767 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1768 ; SSE-NEXT: movaps %xmm0, 48(%r9)
1769 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1770 ; SSE-NEXT: movaps %xmm0, 32(%r9)
1771 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1772 ; SSE-NEXT: movaps %xmm0, 16(%r9)
1773 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1774 ; SSE-NEXT: movaps %xmm0, (%r9)
1775 ; SSE-NEXT: addq $920, %rsp # imm = 0x398
1778 ; AVX1-ONLY-LABEL: load_i64_stride5_vf32:
1779 ; AVX1-ONLY: # %bb.0:
1780 ; AVX1-ONLY-NEXT: subq $1368, %rsp # imm = 0x558
1781 ; AVX1-ONLY-NEXT: vmovapd 896(%rdi), %ymm3
1782 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1783 ; AVX1-ONLY-NEXT: vmovapd 864(%rdi), %ymm7
1784 ; AVX1-ONLY-NEXT: vmovapd 576(%rdi), %ymm4
1785 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1786 ; AVX1-ONLY-NEXT: vmovapd 544(%rdi), %ymm2
1787 ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm0
1788 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1789 ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm5
1790 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3]
1791 ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm1
1792 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1793 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm9
1794 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm9[0],xmm1[1]
1795 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
1796 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1797 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1,2],ymm4[3]
1798 ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %xmm1
1799 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1800 ; AVX1-ONLY-NEXT: vmovapd 480(%rdi), %xmm6
1801 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm6[0],xmm1[1]
1802 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
1803 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1804 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm7[0,1,2],ymm3[3]
1805 ; AVX1-ONLY-NEXT: vmovapd 832(%rdi), %xmm1
1806 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1807 ; AVX1-ONLY-NEXT: vmovapd 800(%rdi), %xmm10
1808 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm10[0],xmm1[1]
1809 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
1810 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1811 ; AVX1-ONLY-NEXT: vmovapd 1216(%rdi), %ymm0
1812 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1813 ; AVX1-ONLY-NEXT: vmovapd 1184(%rdi), %ymm8
1814 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3]
1815 ; AVX1-ONLY-NEXT: vmovapd 1152(%rdi), %xmm1
1816 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1817 ; AVX1-ONLY-NEXT: vmovapd 1120(%rdi), %xmm11
1818 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm11[0],xmm1[1]
1819 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
1820 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1821 ; AVX1-ONLY-NEXT: vmovapd 96(%rdi), %ymm0
1822 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1823 ; AVX1-ONLY-NEXT: vmovapd 64(%rdi), %ymm4
1824 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3]
1825 ; AVX1-ONLY-NEXT: vmovapd (%rdi), %xmm13
1826 ; AVX1-ONLY-NEXT: vmovapd 32(%rdi), %xmm1
1827 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1828 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm13[0],xmm1[1]
1829 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
1830 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1831 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm0
1832 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1833 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm1
1834 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1835 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1836 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm1
1837 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1838 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm12
1839 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm12[0,1],xmm1[2,3]
1840 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1841 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1842 ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm0
1843 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1844 ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm1
1845 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1846 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1847 ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm1
1848 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1849 ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm14
1850 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3]
1851 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1852 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1853 ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %ymm0
1854 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1855 ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %ymm1
1856 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1857 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
1858 ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm1
1859 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1860 ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm15
1861 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
1862 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1863 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1864 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm1
1865 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
1866 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[3],ymm0[2]
1867 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm5
1868 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = xmm9[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
1869 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
1870 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1871 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm9
1872 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm0
1873 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[3],ymm0[2]
1874 ; AVX1-ONLY-NEXT: vmovdqa 528(%rdi), %xmm2
1875 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = xmm6[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
1876 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
1877 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1878 ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm0
1879 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1880 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1881 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[3],ymm0[2]
1882 ; AVX1-ONLY-NEXT: vmovdqa 848(%rdi), %xmm3
1883 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm10[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
1884 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
1885 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1886 ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0
1887 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1888 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm6
1889 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[3],ymm6[2]
1890 ; AVX1-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm7
1891 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = xmm11[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
1892 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3]
1893 ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1894 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm8
1895 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm6
1896 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[3],ymm6[2]
1897 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm0
1898 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1899 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm13[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
1900 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3]
1901 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1902 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm10
1903 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm4
1904 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1905 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[3],ymm4[2]
1906 ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm11
1907 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm12[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
1908 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3]
1909 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1910 ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm12
1911 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm4
1912 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
1913 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[3],ymm4[2]
1914 ; AVX1-ONLY-NEXT: vmovdqa 688(%rdi), %xmm13
1915 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm14[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
1916 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3]
1917 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1918 ; AVX1-ONLY-NEXT: vmovaps 1088(%rdi), %xmm14
1919 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm4
1920 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
1921 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm0[0],ymm4[0],ymm0[3],ymm4[2]
1922 ; AVX1-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm0
1923 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1924 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm15[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
1925 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3]
1926 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1927 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm4
1928 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
1929 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1930 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm15
1931 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm15[0,1,2,3],xmm5[4,5,6,7]
1932 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
1933 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1934 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm5
1935 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm9[2,3]
1936 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1937 ; AVX1-ONLY-NEXT: vmovdqa 496(%rdi), %xmm0
1938 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1939 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
1940 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1941 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1942 ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm6
1943 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm1 # 16-byte Folded Reload
1944 ; AVX1-ONLY-NEXT: # xmm1 = xmm6[0,1],mem[2,3]
1945 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1946 ; AVX1-ONLY-NEXT: vmovdqa 816(%rdi), %xmm0
1947 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1948 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm3[4,5,6,7]
1949 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
1950 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1951 ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %xmm3
1952 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm0 # 16-byte Folded Reload
1953 ; AVX1-ONLY-NEXT: # xmm0 = xmm3[0,1],mem[2,3]
1954 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1955 ; AVX1-ONLY-NEXT: vmovdqa 1136(%rdi), %xmm9
1956 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0,1,2,3],xmm7[4,5,6,7]
1957 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1958 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1959 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm0
1960 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1961 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm8[2,3]
1962 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1963 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm1
1964 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1965 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
1966 ; AVX1-ONLY-NEXT: # xmm1 = xmm1[0,1],mem[2,3]
1967 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1968 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1969 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm0
1970 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1971 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm10[2,3]
1972 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1973 ; AVX1-ONLY-NEXT: vmovdqa 336(%rdi), %xmm1
1974 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1975 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm11[4,5,6,7]
1976 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1977 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1978 ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm2
1979 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm12[2,3]
1980 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
1981 ; AVX1-ONLY-NEXT: vmovdqa 656(%rdi), %xmm12
1982 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm12[0,1,2,3],xmm13[4,5,6,7]
1983 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
1984 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1985 ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %xmm0
1986 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm14[2,3]
1987 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm7
1988 ; AVX1-ONLY-NEXT: vmovaps 976(%rdi), %xmm1
1989 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm8 # 16-byte Folded Reload
1990 ; AVX1-ONLY-NEXT: # xmm8 = xmm1[0,1],mem[2,3]
1991 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
1992 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
1993 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
1994 ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm13
1995 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0],ymm13[0],ymm4[3],ymm13[2]
1996 ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm10
1997 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = xmm15[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
1998 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3]
1999 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2000 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm4
2001 ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm14
2002 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm4[0],ymm14[0],ymm4[3],ymm14[2]
2003 ; AVX1-ONLY-NEXT: vmovdqa 544(%rdi), %xmm11
2004 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm8 # 16-byte Folded Reload
2005 ; AVX1-ONLY-NEXT: # xmm8 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
2006 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm8[0,1],ymm5[2,3]
2007 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2008 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm5
2009 ; AVX1-ONLY-NEXT: vmovapd 928(%rdi), %ymm4
2010 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2011 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0],ymm4[0],ymm5[3],ymm4[2]
2012 ; AVX1-ONLY-NEXT: vmovdqa 864(%rdi), %xmm8
2013 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm6 # 16-byte Folded Reload
2014 ; AVX1-ONLY-NEXT: # xmm6 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
2015 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm6[0,1],ymm5[2,3]
2016 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2017 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
2018 ; AVX1-ONLY-NEXT: vmovapd 1248(%rdi), %ymm4
2019 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2020 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[3],ymm4[2]
2021 ; AVX1-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm15
2022 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm9[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
2023 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3]
2024 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2025 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2026 ; AVX1-ONLY-NEXT: vmovapd 1088(%rdi), %ymm7
2027 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm7[0],ymm0[3],ymm7[2]
2028 ; AVX1-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm9
2029 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
2030 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
2031 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2032 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
2033 ; AVX1-ONLY-NEXT: vmovapd 768(%rdi), %ymm5
2034 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm5[0],ymm1[3],ymm5[2]
2035 ; AVX1-ONLY-NEXT: vmovdqa 704(%rdi), %xmm4
2036 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm6 = xmm12[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
2037 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm1[2,3]
2038 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2039 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
2040 ; AVX1-ONLY-NEXT: vmovapd 448(%rdi), %ymm3
2041 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[3],ymm3[2]
2042 ; AVX1-ONLY-NEXT: vmovdqa 384(%rdi), %xmm2
2043 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm6 # 16-byte Folded Reload
2044 ; AVX1-ONLY-NEXT: # xmm6 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
2045 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm1[2,3]
2046 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2047 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
2048 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm12
2049 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[3],ymm12[2]
2050 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm0
2051 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
2052 ; AVX1-ONLY-NEXT: # xmm6 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2053 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm1[2,3]
2054 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm1 # 32-byte Folded Reload
2055 ; AVX1-ONLY-NEXT: # ymm1 = mem[0,1,2],ymm12[3]
2056 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
2057 ; AVX1-ONLY-NEXT: # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7]
2058 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
2059 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2060 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
2061 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm13[3]
2062 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm1 # 16-byte Folded Reload
2063 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm10[4,5,6,7]
2064 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm12 = ymm1[0,1],ymm0[2,3]
2065 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
2066 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm3[3]
2067 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
2068 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm2[4,5,6,7]
2069 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm10 = ymm1[0,1],ymm0[2,3]
2070 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
2071 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm14[3]
2072 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload
2073 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm11[4,5,6,7]
2074 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3]
2075 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
2076 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm5[3]
2077 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm2 # 16-byte Folded Reload
2078 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm4[4,5,6,7]
2079 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm2[0,1],ymm0[2,3]
2080 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2081 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
2082 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
2083 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm2 # 16-byte Folded Reload
2084 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm8[4,5,6,7]
2085 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5,6,7]
2086 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm0 # 32-byte Folded Reload
2087 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm7[3]
2088 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm2 # 16-byte Folded Reload
2089 ; AVX1-ONLY-NEXT: # xmm2 = mem[0,1,2,3],xmm9[4,5,6,7]
2090 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3]
2091 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2092 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
2093 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
2094 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm4 # 16-byte Folded Reload
2095 ; AVX1-ONLY-NEXT: # xmm4 = mem[0,1,2,3],xmm15[4,5,6,7]
2096 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
2097 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2098 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rsi)
2099 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2100 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rsi)
2101 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2102 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rsi)
2103 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2104 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rsi)
2105 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2106 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rsi)
2107 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2108 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rsi)
2109 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2110 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rsi)
2111 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2112 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rsi)
2113 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2114 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rdx)
2115 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2116 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rdx)
2117 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2118 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rdx)
2119 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2120 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rdx)
2121 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2122 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rdx)
2123 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2124 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rdx)
2125 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2126 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rdx)
2127 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2128 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rdx)
2129 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2130 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%rcx)
2131 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2132 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%rcx)
2133 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2134 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%rcx)
2135 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2136 ; AVX1-ONLY-NEXT: vmovaps %ymm4, (%rcx)
2137 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2138 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%rcx)
2139 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2140 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%rcx)
2141 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2142 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%rcx)
2143 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2144 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%rcx)
2145 ; AVX1-ONLY-NEXT: vmovapd %ymm6, (%r8)
2146 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2147 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 64(%r8)
2148 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2149 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 128(%r8)
2150 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2151 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 192(%r8)
2152 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2153 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 224(%r8)
2154 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2155 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 160(%r8)
2156 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2157 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 96(%r8)
2158 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
2159 ; AVX1-ONLY-NEXT: vmovaps %ymm4, 32(%r8)
2160 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%r9)
2161 ; AVX1-ONLY-NEXT: vmovapd %ymm2, 192(%r9)
2162 ; AVX1-ONLY-NEXT: vmovaps %ymm11, 160(%r9)
2163 ; AVX1-ONLY-NEXT: vmovapd %ymm3, 128(%r9)
2164 ; AVX1-ONLY-NEXT: vmovapd %ymm1, 96(%r9)
2165 ; AVX1-ONLY-NEXT: vmovapd %ymm10, 64(%r9)
2166 ; AVX1-ONLY-NEXT: vmovapd %ymm12, 32(%r9)
2167 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2168 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r9)
2169 ; AVX1-ONLY-NEXT: addq $1368, %rsp # imm = 0x558
2170 ; AVX1-ONLY-NEXT: vzeroupper
2171 ; AVX1-ONLY-NEXT: retq
2173 ; AVX2-ONLY-LABEL: load_i64_stride5_vf32:
2174 ; AVX2-ONLY: # %bb.0:
2175 ; AVX2-ONLY-NEXT: subq $1464, %rsp # imm = 0x5B8
2176 ; AVX2-ONLY-NEXT: vmovdqa 896(%rdi), %ymm1
2177 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2178 ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %ymm4
2179 ; AVX2-ONLY-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2180 ; AVX2-ONLY-NEXT: vmovdqa 576(%rdi), %ymm3
2181 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2182 ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %ymm6
2183 ; AVX2-ONLY-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2184 ; AVX2-ONLY-NEXT: vmovdqa 256(%rdi), %ymm0
2185 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2186 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %ymm5
2187 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6,7]
2188 ; AVX2-ONLY-NEXT: vmovdqa 192(%rdi), %xmm2
2189 ; AVX2-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2190 ; AVX2-ONLY-NEXT: vmovdqa 160(%rdi), %xmm14
2191 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm14[0,1],xmm2[2,3]
2192 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
2193 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2194 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm3[6,7]
2195 ; AVX2-ONLY-NEXT: vmovdqa 512(%rdi), %xmm2
2196 ; AVX2-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2197 ; AVX2-ONLY-NEXT: vmovdqa 480(%rdi), %xmm10
2198 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm10[0,1],xmm2[2,3]
2199 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
2200 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2201 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm1[6,7]
2202 ; AVX2-ONLY-NEXT: vmovdqa 832(%rdi), %xmm1
2203 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2204 ; AVX2-ONLY-NEXT: vmovdqa 800(%rdi), %xmm4
2205 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm1[2,3]
2206 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
2207 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2208 ; AVX2-ONLY-NEXT: vmovdqa 1216(%rdi), %ymm0
2209 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2210 ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %ymm2
2211 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
2212 ; AVX2-ONLY-NEXT: vmovdqa 1152(%rdi), %xmm1
2213 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2214 ; AVX2-ONLY-NEXT: vmovdqa 1120(%rdi), %xmm12
2215 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm3 = xmm12[0,1],xmm1[2,3]
2216 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
2217 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2218 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm0
2219 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2220 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm1
2221 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2222 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
2223 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm13
2224 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm1
2225 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
2226 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm13[0,1],xmm1[2,3]
2227 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
2228 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2229 ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm0
2230 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2231 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm6
2232 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm0[6,7]
2233 ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %xmm1
2234 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2235 ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %xmm15
2236 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm7 = xmm15[0,1],xmm1[2,3]
2237 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
2238 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2239 ; AVX2-ONLY-NEXT: vmovdqa 736(%rdi), %ymm0
2240 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2241 ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %ymm7
2242 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm7[0,1,2,3,4,5],ymm0[6,7]
2243 ; AVX2-ONLY-NEXT: vmovdqa 672(%rdi), %xmm0
2244 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2245 ; AVX2-ONLY-NEXT: vmovdqa 640(%rdi), %xmm3
2246 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm9 = xmm3[0,1],xmm0[2,3]
2247 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm8[4,5,6,7]
2248 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2249 ; AVX2-ONLY-NEXT: vmovdqa 1056(%rdi), %ymm0
2250 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2251 ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %ymm8
2252 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm0[6,7]
2253 ; AVX2-ONLY-NEXT: vmovdqa 992(%rdi), %xmm0
2254 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2255 ; AVX2-ONLY-NEXT: vmovdqa 960(%rdi), %xmm9
2256 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm11 = xmm9[0,1],xmm0[2,3]
2257 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm1[4,5,6,7]
2258 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2259 ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm0
2260 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm14[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2261 ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm1
2262 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm5 = ymm5[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
2263 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, %ymm14
2264 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2265 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
2266 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
2267 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2268 ; AVX2-ONLY-NEXT: vmovdqa 528(%rdi), %xmm0
2269 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm10[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2270 ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm1
2271 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
2272 ; AVX2-ONLY-NEXT: # ymm5 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
2273 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, %ymm11
2274 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2275 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
2276 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
2277 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2278 ; AVX2-ONLY-NEXT: vmovdqa 848(%rdi), %xmm0
2279 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm4[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2280 ; AVX2-ONLY-NEXT: vmovdqa 928(%rdi), %ymm1
2281 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
2282 ; AVX2-ONLY-NEXT: # ymm4 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
2283 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, %ymm10
2284 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2285 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
2286 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
2287 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2288 ; AVX2-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm0
2289 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm12[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2290 ; AVX2-ONLY-NEXT: vmovdqa 1248(%rdi), %ymm1
2291 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
2292 ; AVX2-ONLY-NEXT: vmovdqa %ymm1, %ymm5
2293 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2294 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
2295 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2296 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2297 ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm0
2298 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm13[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2299 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm12
2300 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
2301 ; AVX2-ONLY-NEXT: # ymm2 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
2302 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
2303 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2304 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2305 ; AVX2-ONLY-NEXT: vmovdqa 368(%rdi), %xmm0
2306 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm15[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2307 ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm1
2308 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2309 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm2 = ymm6[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
2310 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
2311 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
2312 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2313 ; AVX2-ONLY-NEXT: vmovdqa 688(%rdi), %xmm0
2314 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm3[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2315 ; AVX2-ONLY-NEXT: vmovdqa 768(%rdi), %ymm2
2316 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm7[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
2317 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm15
2318 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2319 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
2320 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2321 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2322 ; AVX2-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm0
2323 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm9[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2324 ; AVX2-ONLY-NEXT: vmovdqa 1088(%rdi), %ymm2
2325 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
2326 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm8
2327 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2328 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
2329 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2330 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2331 ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm0
2332 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2333 ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm1
2334 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2335 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2336 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2337 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2338 ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm0
2339 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2340 ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %xmm1
2341 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2342 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2343 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2344 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2345 ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm0
2346 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2347 ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm1
2348 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2349 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2350 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2351 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2352 ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %ymm0
2353 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2354 ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %xmm1
2355 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2356 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2357 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2358 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2359 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
2360 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2361 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm1
2362 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2363 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2364 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2365 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2366 ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm0
2367 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2368 ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1
2369 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2370 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2371 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2372 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2373 ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm0
2374 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2375 ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %xmm1
2376 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2377 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2378 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2379 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2380 ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm0
2381 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
2382 ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1
2383 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
2384 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
2385 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
2386 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2387 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
2388 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm14[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm14[16,17,18,19,20,21,22,23]
2389 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
2390 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm4
2391 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
2392 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
2393 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2394 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
2395 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23]
2396 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
2397 ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %xmm6
2398 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
2399 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
2400 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2401 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
2402 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23]
2403 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
2404 ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %xmm1
2405 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2406 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm3 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
2407 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
2408 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2409 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
2410 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
2411 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
2412 ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm14
2413 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = mem[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
2414 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
2415 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2416 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
2417 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm0 = ymm13[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
2418 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
2419 ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm11
2420 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
2421 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
2422 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2423 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
2424 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm0 = ymm10[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
2425 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm0[0,1,0,3]
2426 ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %xmm9
2427 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
2428 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm7[4,5,6,7]
2429 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
2430 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
2431 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
2432 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm7 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
2433 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,3]
2434 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %xmm5
2435 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
2436 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm7[4,5,6,7]
2437 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2438 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm7 = ymm1[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
2439 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,0,3]
2440 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm0
2441 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm15 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
2442 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm15[0,1,2,3],ymm7[4,5,6,7]
2443 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm1[0,1,2,3,4,5],ymm12[6,7]
2444 ; AVX2-ONLY-NEXT: vpblendd $3, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
2445 ; AVX2-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
2446 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm12[4,5,6,7]
2447 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2448 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
2449 ; AVX2-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
2450 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
2451 ; AVX2-ONLY-NEXT: # xmm4 = mem[0,1],xmm4[2,3]
2452 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm0[4,5,6,7]
2453 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
2454 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
2455 ; AVX2-ONLY-NEXT: # xmm5 = mem[0,1],xmm5[2,3]
2456 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm0[4,5,6,7]
2457 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2458 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
2459 ; AVX2-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
2460 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
2461 ; AVX2-ONLY-NEXT: # xmm6 = mem[0,1],xmm6[2,3]
2462 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm0[4,5,6,7]
2463 ; AVX2-ONLY-NEXT: vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
2464 ; AVX2-ONLY-NEXT: # ymm0 = ymm10[0,1,2,3,4,5],mem[6,7]
2465 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload
2466 ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm9[2,3]
2467 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm1[0,1,2,3],ymm0[4,5,6,7]
2468 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2469 ; AVX2-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
2470 ; AVX2-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
2471 ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
2472 ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
2473 ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm1[2,3]
2474 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
2475 ; AVX2-ONLY-NEXT: vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
2476 ; AVX2-ONLY-NEXT: # ymm0 = ymm13[0,1,2,3,4,5],mem[6,7]
2477 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload
2478 ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm11[2,3]
2479 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
2480 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
2481 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
2482 ; AVX2-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
2483 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
2484 ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm14[2,3]
2485 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
2486 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2487 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rsi)
2488 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2489 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rsi)
2490 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2491 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rsi)
2492 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2493 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rsi)
2494 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2495 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rsi)
2496 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2497 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rsi)
2498 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2499 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rsi)
2500 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2501 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rsi)
2502 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2503 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rdx)
2504 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2505 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rdx)
2506 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2507 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rdx)
2508 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2509 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rdx)
2510 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2511 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rdx)
2512 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2513 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rdx)
2514 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2515 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rdx)
2516 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2517 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rdx)
2518 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2519 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rcx)
2520 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2521 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rcx)
2522 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2523 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
2524 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2525 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx)
2526 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2527 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rcx)
2528 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2529 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rcx)
2530 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2531 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rcx)
2532 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2533 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rcx)
2534 ; AVX2-ONLY-NEXT: vmovdqa %ymm7, (%r8)
2535 ; AVX2-ONLY-NEXT: vmovdqa %ymm8, 64(%r8)
2536 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2537 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%r8)
2538 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2539 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%r8)
2540 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2541 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%r8)
2542 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2543 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%r8)
2544 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2545 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%r8)
2546 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
2547 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%r8)
2548 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 224(%r9)
2549 ; AVX2-ONLY-NEXT: vmovdqa %ymm9, 192(%r9)
2550 ; AVX2-ONLY-NEXT: vmovaps %ymm2, 160(%r9)
2551 ; AVX2-ONLY-NEXT: vmovdqa %ymm6, 128(%r9)
2552 ; AVX2-ONLY-NEXT: vmovdqa %ymm15, 96(%r9)
2553 ; AVX2-ONLY-NEXT: vmovdqa %ymm5, 64(%r9)
2554 ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 32(%r9)
2555 ; AVX2-ONLY-NEXT: vmovdqa %ymm12, (%r9)
2556 ; AVX2-ONLY-NEXT: addq $1464, %rsp # imm = 0x5B8
2557 ; AVX2-ONLY-NEXT: vzeroupper
2558 ; AVX2-ONLY-NEXT: retq
2560 ; AVX512F-LABEL: load_i64_stride5_vf32:
2562 ; AVX512F-NEXT: subq $584, %rsp # imm = 0x248
2563 ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %zmm21
2564 ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %zmm1
2565 ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm20
2566 ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm0
2567 ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm14
2568 ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm25
2569 ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm2
2570 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm22
2571 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm13
2572 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm27
2573 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm3
2574 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [12,1,6,0,12,1,6,0]
2575 ; AVX512F-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
2576 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm10
2577 ; AVX512F-NEXT: vpermt2q %zmm27, %zmm26, %zmm10
2578 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm24 = [0,5,10,15]
2579 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm11
2580 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm24, %zmm11
2581 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm4
2582 ; AVX512F-NEXT: vpermt2q %zmm25, %zmm26, %zmm4
2583 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2584 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm5
2585 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm4
2586 ; AVX512F-NEXT: vpermt2q %zmm20, %zmm26, %zmm5
2587 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2588 ; AVX512F-NEXT: vpermi2q %zmm21, %zmm1, %zmm26
2589 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm28 = <1,6,11,u>
2590 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm0
2591 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm28, %zmm0
2592 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2593 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [5,10,15,0,5,10,15,0]
2594 ; AVX512F-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
2595 ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm30
2596 ; AVX512F-NEXT: vpermt2q %zmm3, %zmm12, %zmm30
2597 ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm19
2598 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm12, %zmm19
2599 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm18
2600 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm12, %zmm18
2601 ; AVX512F-NEXT: vpermi2q %zmm1, %zmm21, %zmm12
2602 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm23 = [6,11,0,1,6,11,0,1]
2603 ; AVX512F-NEXT: # zmm23 = mem[0,1,2,3,0,1,2,3]
2604 ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm0
2605 ; AVX512F-NEXT: vpermt2q %zmm3, %zmm23, %zmm0
2606 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2607 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm15 = <2,7,12,u>
2608 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm16
2609 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm15, %zmm16
2610 ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm0
2611 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm23, %zmm0
2612 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2613 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm0
2614 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm23, %zmm0
2615 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2616 ; AVX512F-NEXT: vpermi2q %zmm1, %zmm21, %zmm23
2617 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [7,12,0,2,7,12,0,2]
2618 ; AVX512F-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3]
2619 ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm0
2620 ; AVX512F-NEXT: vpermt2q %zmm3, %zmm29, %zmm0
2621 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2622 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,0,11,0,5,0,11]
2623 ; AVX512F-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
2624 ; AVX512F-NEXT: vpermt2q %zmm27, %zmm0, %zmm3
2625 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2626 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm27
2627 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm29, %zmm27
2628 ; AVX512F-NEXT: vpermt2q %zmm25, %zmm0, %zmm2
2629 ; AVX512F-NEXT: vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
2630 ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm17
2631 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm29, %zmm17
2632 ; AVX512F-NEXT: vpermi2q %zmm1, %zmm21, %zmm29
2633 ; AVX512F-NEXT: vpermt2q %zmm21, %zmm0, %zmm1
2634 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2635 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm21 = <11,0,5,u>
2636 ; AVX512F-NEXT: vpermt2q %zmm20, %zmm0, %zmm4
2637 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2638 ; AVX512F-NEXT: vmovdqa64 %zmm13, %zmm20
2639 ; AVX512F-NEXT: vpermt2q %zmm22, %zmm21, %zmm20
2640 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = <12,1,6,u>
2641 ; AVX512F-NEXT: vpermt2q %zmm22, %zmm9, %zmm13
2642 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm0
2643 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm7
2644 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm24, %zmm7
2645 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm8
2646 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm28, %zmm8
2647 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm31
2648 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm15, %zmm31
2649 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm22
2650 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm21, %zmm22
2651 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm9, %zmm14
2652 ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm25
2653 ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm1
2654 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3
2655 ; AVX512F-NEXT: vpermt2q %zmm25, %zmm24, %zmm3
2656 ; AVX512F-NEXT: vmovdqa64 1024(%rdi), %zmm2
2657 ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm0
2658 ; AVX512F-NEXT: vpermi2q %zmm2, %zmm0, %zmm24
2659 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm6
2660 ; AVX512F-NEXT: vpermt2q %zmm25, %zmm28, %zmm6
2661 ; AVX512F-NEXT: vpermi2q %zmm2, %zmm0, %zmm28
2662 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm4
2663 ; AVX512F-NEXT: vpermt2q %zmm25, %zmm15, %zmm4
2664 ; AVX512F-NEXT: vpermi2q %zmm2, %zmm0, %zmm15
2665 ; AVX512F-NEXT: vmovdqa64 %zmm25, %zmm5
2666 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm21, %zmm5
2667 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm2, %zmm21
2668 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm9, %zmm2
2669 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm9, %zmm25
2670 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm11[0,1,2,3],zmm10[4,5,6,7]
2671 ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm10 # 64-byte Folded Reload
2672 ; AVX512F-NEXT: # zmm10 = zmm7[0,1,2,3],mem[4,5,6,7]
2673 ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
2674 ; AVX512F-NEXT: # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7]
2675 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm24[0,1,2,3],zmm26[4,5,6,7]
2676 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm9
2677 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm24 = [0,1,2,3,4,5,6,11]
2678 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm24, %zmm11
2679 ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm26
2680 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm24, %zmm10
2681 ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm1
2682 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm24, %zmm3
2683 ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %zmm0
2684 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm24, %zmm7
2685 ; AVX512F-NEXT: movb $7, %al
2686 ; AVX512F-NEXT: kmovw %eax, %k1
2687 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
2688 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm30 {%k1}
2689 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm19 {%k1}
2690 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm18 {%k1}
2691 ; AVX512F-NEXT: vmovdqa64 %zmm28, %zmm12 {%k1}
2692 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,12]
2693 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm6, %zmm30
2694 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm6, %zmm19
2695 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm6, %zmm18
2696 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm6, %zmm12
2697 ; AVX512F-NEXT: movb $56, %al
2698 ; AVX512F-NEXT: kmovw %eax, %k1
2699 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2700 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm16 {%k1}
2701 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2702 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm4 {%k1}
2703 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2704 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm31 {%k1}
2705 ; AVX512F-NEXT: vmovdqa64 %zmm23, %zmm15 {%k1}
2706 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,8,13]
2707 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm6, %zmm16
2708 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm6, %zmm4
2709 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm6, %zmm31
2710 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm6, %zmm15
2711 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2712 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm20 {%k1}
2713 ; AVX512F-NEXT: vmovdqa64 %zmm17, %zmm5 {%k1}
2714 ; AVX512F-NEXT: vmovdqa64 %zmm27, %zmm22 {%k1}
2715 ; AVX512F-NEXT: vmovdqa64 %zmm29, %zmm21 {%k1}
2716 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,9,14]
2717 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm6, %zmm20
2718 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm6, %zmm5
2719 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm6, %zmm22
2720 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm6, %zmm21
2721 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2722 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm13 {%k1}
2723 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,10,15]
2724 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm6, %zmm13
2725 ; AVX512F-NEXT: vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
2726 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm14 {%k1}
2727 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm6, %zmm14
2728 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
2729 ; AVX512F-NEXT: vmovdqa64 %zmm8, %zmm2 {%k1}
2730 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm6, %zmm2
2731 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
2732 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1}
2733 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm6, %zmm25
2734 ; AVX512F-NEXT: vmovdqa64 %zmm7, 192(%rsi)
2735 ; AVX512F-NEXT: vmovdqa64 %zmm3, 128(%rsi)
2736 ; AVX512F-NEXT: vmovdqa64 %zmm10, 64(%rsi)
2737 ; AVX512F-NEXT: vmovdqa64 %zmm11, (%rsi)
2738 ; AVX512F-NEXT: vmovdqa64 %zmm12, 192(%rdx)
2739 ; AVX512F-NEXT: vmovdqa64 %zmm30, (%rdx)
2740 ; AVX512F-NEXT: vmovdqa64 %zmm18, 64(%rdx)
2741 ; AVX512F-NEXT: vmovdqa64 %zmm19, 128(%rdx)
2742 ; AVX512F-NEXT: vmovdqa64 %zmm15, 192(%rcx)
2743 ; AVX512F-NEXT: vmovdqa64 %zmm16, (%rcx)
2744 ; AVX512F-NEXT: vmovdqa64 %zmm31, 64(%rcx)
2745 ; AVX512F-NEXT: vmovdqa64 %zmm4, 128(%rcx)
2746 ; AVX512F-NEXT: vmovdqa64 %zmm21, 192(%r8)
2747 ; AVX512F-NEXT: vmovdqa64 %zmm20, (%r8)
2748 ; AVX512F-NEXT: vmovdqa64 %zmm22, 64(%r8)
2749 ; AVX512F-NEXT: vmovdqa64 %zmm5, 128(%r8)
2750 ; AVX512F-NEXT: vmovdqa64 %zmm25, 128(%r9)
2751 ; AVX512F-NEXT: vmovdqa64 %zmm2, 192(%r9)
2752 ; AVX512F-NEXT: vmovdqa64 %zmm13, (%r9)
2753 ; AVX512F-NEXT: vmovdqa64 %zmm14, 64(%r9)
2754 ; AVX512F-NEXT: addq $584, %rsp # imm = 0x248
2755 ; AVX512F-NEXT: vzeroupper
2756 ; AVX512F-NEXT: retq
2758 ; AVX512BW-LABEL: load_i64_stride5_vf32:
2759 ; AVX512BW: # %bb.0:
2760 ; AVX512BW-NEXT: subq $584, %rsp # imm = 0x248
2761 ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm21
2762 ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm1
2763 ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm20
2764 ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm0
2765 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm14
2766 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm25
2767 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm2
2768 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm22
2769 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm13
2770 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm27
2771 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3
2772 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm26 = [12,1,6,0,12,1,6,0]
2773 ; AVX512BW-NEXT: # zmm26 = mem[0,1,2,3,0,1,2,3]
2774 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm10
2775 ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm26, %zmm10
2776 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm24 = [0,5,10,15]
2777 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm11
2778 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm24, %zmm11
2779 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm4
2780 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm26, %zmm4
2781 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2782 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5
2783 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4
2784 ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm26, %zmm5
2785 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2786 ; AVX512BW-NEXT: vpermi2q %zmm21, %zmm1, %zmm26
2787 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm28 = <1,6,11,u>
2788 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm0
2789 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm28, %zmm0
2790 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2791 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [5,10,15,0,5,10,15,0]
2792 ; AVX512BW-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3]
2793 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm30
2794 ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm12, %zmm30
2795 ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm19
2796 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm12, %zmm19
2797 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm18
2798 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm12, %zmm18
2799 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm21, %zmm12
2800 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm23 = [6,11,0,1,6,11,0,1]
2801 ; AVX512BW-NEXT: # zmm23 = mem[0,1,2,3,0,1,2,3]
2802 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm0
2803 ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm23, %zmm0
2804 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2805 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm15 = <2,7,12,u>
2806 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm16
2807 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm15, %zmm16
2808 ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm0
2809 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm23, %zmm0
2810 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2811 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm0
2812 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm23, %zmm0
2813 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2814 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm21, %zmm23
2815 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm29 = [7,12,0,2,7,12,0,2]
2816 ; AVX512BW-NEXT: # zmm29 = mem[0,1,2,3,0,1,2,3]
2817 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm0
2818 ; AVX512BW-NEXT: vpermt2q %zmm3, %zmm29, %zmm0
2819 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2820 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [0,5,0,11,0,5,0,11]
2821 ; AVX512BW-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
2822 ; AVX512BW-NEXT: vpermt2q %zmm27, %zmm0, %zmm3
2823 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2824 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm27
2825 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm29, %zmm27
2826 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm0, %zmm2
2827 ; AVX512BW-NEXT: vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
2828 ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm17
2829 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm29, %zmm17
2830 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm21, %zmm29
2831 ; AVX512BW-NEXT: vpermt2q %zmm21, %zmm0, %zmm1
2832 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2833 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm21 = <11,0,5,u>
2834 ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm0, %zmm4
2835 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
2836 ; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm20
2837 ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm21, %zmm20
2838 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm9 = <12,1,6,u>
2839 ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm9, %zmm13
2840 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm0
2841 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm7
2842 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm24, %zmm7
2843 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm8
2844 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm28, %zmm8
2845 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm31
2846 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm15, %zmm31
2847 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm22
2848 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm21, %zmm22
2849 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm9, %zmm14
2850 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm25
2851 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm1
2852 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3
2853 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm24, %zmm3
2854 ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm2
2855 ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm0
2856 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm0, %zmm24
2857 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm6
2858 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm28, %zmm6
2859 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm0, %zmm28
2860 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm4
2861 ; AVX512BW-NEXT: vpermt2q %zmm25, %zmm15, %zmm4
2862 ; AVX512BW-NEXT: vpermi2q %zmm2, %zmm0, %zmm15
2863 ; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm5
2864 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm21, %zmm5
2865 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm2, %zmm21
2866 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm9, %zmm2
2867 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm9, %zmm25
2868 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm11[0,1,2,3],zmm10[4,5,6,7]
2869 ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm10 # 64-byte Folded Reload
2870 ; AVX512BW-NEXT: # zmm10 = zmm7[0,1,2,3],mem[4,5,6,7]
2871 ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
2872 ; AVX512BW-NEXT: # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7]
2873 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm24[0,1,2,3],zmm26[4,5,6,7]
2874 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm9
2875 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm24 = [0,1,2,3,4,5,6,11]
2876 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm24, %zmm11
2877 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm26
2878 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm24, %zmm10
2879 ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm1
2880 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm24, %zmm3
2881 ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm0
2882 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm24, %zmm7
2883 ; AVX512BW-NEXT: movb $7, %al
2884 ; AVX512BW-NEXT: kmovd %eax, %k1
2885 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
2886 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm30 {%k1}
2887 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm19 {%k1}
2888 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm18 {%k1}
2889 ; AVX512BW-NEXT: vmovdqa64 %zmm28, %zmm12 {%k1}
2890 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,6,12]
2891 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm6, %zmm30
2892 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm6, %zmm19
2893 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm6, %zmm18
2894 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm6, %zmm12
2895 ; AVX512BW-NEXT: movb $56, %al
2896 ; AVX512BW-NEXT: kmovd %eax, %k1
2897 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2898 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm16 {%k1}
2899 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2900 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm4 {%k1}
2901 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2902 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm31 {%k1}
2903 ; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm15 {%k1}
2904 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,8,13]
2905 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm6, %zmm16
2906 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm6, %zmm4
2907 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm6, %zmm31
2908 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm6, %zmm15
2909 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2910 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm20 {%k1}
2911 ; AVX512BW-NEXT: vmovdqa64 %zmm17, %zmm5 {%k1}
2912 ; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm22 {%k1}
2913 ; AVX512BW-NEXT: vmovdqa64 %zmm29, %zmm21 {%k1}
2914 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,9,14]
2915 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm6, %zmm20
2916 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm6, %zmm5
2917 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm6, %zmm22
2918 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm6, %zmm21
2919 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
2920 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm13 {%k1}
2921 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,5,10,15]
2922 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm6, %zmm13
2923 ; AVX512BW-NEXT: vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
2924 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm14 {%k1}
2925 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm6, %zmm14
2926 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
2927 ; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm2 {%k1}
2928 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm6, %zmm2
2929 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
2930 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1}
2931 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm6, %zmm25
2932 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 192(%rsi)
2933 ; AVX512BW-NEXT: vmovdqa64 %zmm3, 128(%rsi)
2934 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 64(%rsi)
2935 ; AVX512BW-NEXT: vmovdqa64 %zmm11, (%rsi)
2936 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 192(%rdx)
2937 ; AVX512BW-NEXT: vmovdqa64 %zmm30, (%rdx)
2938 ; AVX512BW-NEXT: vmovdqa64 %zmm18, 64(%rdx)
2939 ; AVX512BW-NEXT: vmovdqa64 %zmm19, 128(%rdx)
2940 ; AVX512BW-NEXT: vmovdqa64 %zmm15, 192(%rcx)
2941 ; AVX512BW-NEXT: vmovdqa64 %zmm16, (%rcx)
2942 ; AVX512BW-NEXT: vmovdqa64 %zmm31, 64(%rcx)
2943 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 128(%rcx)
2944 ; AVX512BW-NEXT: vmovdqa64 %zmm21, 192(%r8)
2945 ; AVX512BW-NEXT: vmovdqa64 %zmm20, (%r8)
2946 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 64(%r8)
2947 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 128(%r8)
2948 ; AVX512BW-NEXT: vmovdqa64 %zmm25, 128(%r9)
2949 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 192(%r9)
2950 ; AVX512BW-NEXT: vmovdqa64 %zmm13, (%r9)
2951 ; AVX512BW-NEXT: vmovdqa64 %zmm14, 64(%r9)
2952 ; AVX512BW-NEXT: addq $584, %rsp # imm = 0x248
2953 ; AVX512BW-NEXT: vzeroupper
2954 ; AVX512BW-NEXT: retq
2955 %wide.vec = load <160 x i64>, ptr %in.vec, align 64
2956 %strided.vec0 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155>
2957 %strided.vec1 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156>
2958 %strided.vec2 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77, i32 82, i32 87, i32 92, i32 97, i32 102, i32 107, i32 112, i32 117, i32 122, i32 127, i32 132, i32 137, i32 142, i32 147, i32 152, i32 157>
2959 %strided.vec3 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78, i32 83, i32 88, i32 93, i32 98, i32 103, i32 108, i32 113, i32 118, i32 123, i32 128, i32 133, i32 138, i32 143, i32 148, i32 153, i32 158>
2960 %strided.vec4 = shufflevector <160 x i64> %wide.vec, <160 x i64> poison, <32 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79, i32 84, i32 89, i32 94, i32 99, i32 104, i32 109, i32 114, i32 119, i32 124, i32 129, i32 134, i32 139, i32 144, i32 149, i32 154, i32 159>
2961 store <32 x i64> %strided.vec0, ptr %out.vec0, align 64
2962 store <32 x i64> %strided.vec1, ptr %out.vec1, align 64
2963 store <32 x i64> %strided.vec2, ptr %out.vec2, align 64
2964 store <32 x i64> %strided.vec3, ptr %out.vec3, align 64
2965 store <32 x i64> %strided.vec4, ptr %out.vec4, align 64
2969 define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3, ptr %out.vec4) nounwind {
2970 ; SSE-LABEL: load_i64_stride5_vf64:
2972 ; SSE-NEXT: subq $2216, %rsp # imm = 0x8A8
2973 ; SSE-NEXT: movapd 224(%rdi), %xmm5
2974 ; SSE-NEXT: movapd 144(%rdi), %xmm4
2975 ; SSE-NEXT: movapd 64(%rdi), %xmm3
2976 ; SSE-NEXT: movapd 176(%rdi), %xmm7
2977 ; SSE-NEXT: movapd 96(%rdi), %xmm6
2978 ; SSE-NEXT: movapd 208(%rdi), %xmm10
2979 ; SSE-NEXT: movapd 128(%rdi), %xmm9
2980 ; SSE-NEXT: movapd (%rdi), %xmm12
2981 ; SSE-NEXT: movapd 16(%rdi), %xmm8
2982 ; SSE-NEXT: movapd 32(%rdi), %xmm0
2983 ; SSE-NEXT: movapd 48(%rdi), %xmm11
2984 ; SSE-NEXT: movapd 160(%rdi), %xmm13
2985 ; SSE-NEXT: movapd 192(%rdi), %xmm1
2986 ; SSE-NEXT: movapd 80(%rdi), %xmm14
2987 ; SSE-NEXT: movapd 112(%rdi), %xmm2
2988 ; SSE-NEXT: movapd %xmm0, %xmm15
2989 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm12[0],xmm15[1]
2990 ; SSE-NEXT: movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2991 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm11[0]
2992 ; SSE-NEXT: movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2993 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm8[0],xmm11[1]
2994 ; SSE-NEXT: movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2995 ; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm3[0]
2996 ; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2997 ; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
2998 ; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
2999 ; SSE-NEXT: movapd %xmm2, %xmm0
3000 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm14[0],xmm0[1]
3001 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3002 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm9[0]
3003 ; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3004 ; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm6[0],xmm9[1]
3005 ; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3006 ; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm4[0]
3007 ; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3008 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
3009 ; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3010 ; SSE-NEXT: movapd %xmm1, %xmm0
3011 ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm13[0],xmm0[1]
3012 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3013 ; SSE-NEXT: shufpd {{.*#+}} xmm13 = xmm13[1],xmm10[0]
3014 ; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3015 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm7[0],xmm10[1]
3016 ; SSE-NEXT: movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3017 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm5[0]
3018 ; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3019 ; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
3020 ; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3021 ; SSE-NEXT: movapd 240(%rdi), %xmm2
3022 ; SSE-NEXT: movapd 272(%rdi), %xmm0
3023 ; SSE-NEXT: movapd %xmm0, %xmm1
3024 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3025 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3026 ; SSE-NEXT: movapd 288(%rdi), %xmm1
3027 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3028 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3029 ; SSE-NEXT: movapd 256(%rdi), %xmm2
3030 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3031 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3032 ; SSE-NEXT: movapd 304(%rdi), %xmm1
3033 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3034 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3035 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3036 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3037 ; SSE-NEXT: movapd 320(%rdi), %xmm2
3038 ; SSE-NEXT: movapd 352(%rdi), %xmm0
3039 ; SSE-NEXT: movapd %xmm0, %xmm1
3040 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3041 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3042 ; SSE-NEXT: movapd 368(%rdi), %xmm1
3043 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3044 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3045 ; SSE-NEXT: movapd 336(%rdi), %xmm2
3046 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3047 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3048 ; SSE-NEXT: movapd 384(%rdi), %xmm1
3049 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3050 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3051 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3052 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3053 ; SSE-NEXT: movapd 400(%rdi), %xmm2
3054 ; SSE-NEXT: movapd 432(%rdi), %xmm0
3055 ; SSE-NEXT: movapd %xmm0, %xmm1
3056 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3057 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3058 ; SSE-NEXT: movapd 448(%rdi), %xmm1
3059 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3060 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3061 ; SSE-NEXT: movapd 416(%rdi), %xmm2
3062 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3063 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3064 ; SSE-NEXT: movapd 464(%rdi), %xmm1
3065 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3066 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3067 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3068 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3069 ; SSE-NEXT: movapd 480(%rdi), %xmm2
3070 ; SSE-NEXT: movapd 512(%rdi), %xmm0
3071 ; SSE-NEXT: movapd %xmm0, %xmm1
3072 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3073 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3074 ; SSE-NEXT: movapd 528(%rdi), %xmm1
3075 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3076 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3077 ; SSE-NEXT: movapd 496(%rdi), %xmm2
3078 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3079 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3080 ; SSE-NEXT: movapd 544(%rdi), %xmm1
3081 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3082 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3083 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3084 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3085 ; SSE-NEXT: movapd 560(%rdi), %xmm2
3086 ; SSE-NEXT: movapd 592(%rdi), %xmm0
3087 ; SSE-NEXT: movapd %xmm0, %xmm1
3088 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3089 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3090 ; SSE-NEXT: movapd 608(%rdi), %xmm1
3091 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3092 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3093 ; SSE-NEXT: movapd 576(%rdi), %xmm2
3094 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3095 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3096 ; SSE-NEXT: movapd 624(%rdi), %xmm1
3097 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3098 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3099 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3100 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3101 ; SSE-NEXT: movapd 640(%rdi), %xmm2
3102 ; SSE-NEXT: movapd 672(%rdi), %xmm0
3103 ; SSE-NEXT: movapd %xmm0, %xmm1
3104 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3105 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3106 ; SSE-NEXT: movapd 688(%rdi), %xmm1
3107 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3108 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3109 ; SSE-NEXT: movapd 656(%rdi), %xmm2
3110 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3111 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3112 ; SSE-NEXT: movapd 704(%rdi), %xmm1
3113 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3114 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3115 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3116 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3117 ; SSE-NEXT: movapd 720(%rdi), %xmm2
3118 ; SSE-NEXT: movapd 752(%rdi), %xmm0
3119 ; SSE-NEXT: movapd %xmm0, %xmm1
3120 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3121 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3122 ; SSE-NEXT: movapd 768(%rdi), %xmm1
3123 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3124 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3125 ; SSE-NEXT: movapd 736(%rdi), %xmm2
3126 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3127 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3128 ; SSE-NEXT: movapd 784(%rdi), %xmm1
3129 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3130 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3131 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3132 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3133 ; SSE-NEXT: movapd 800(%rdi), %xmm2
3134 ; SSE-NEXT: movapd 832(%rdi), %xmm0
3135 ; SSE-NEXT: movapd %xmm0, %xmm1
3136 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3137 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3138 ; SSE-NEXT: movapd 848(%rdi), %xmm1
3139 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3140 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3141 ; SSE-NEXT: movapd 816(%rdi), %xmm2
3142 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3143 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3144 ; SSE-NEXT: movapd 864(%rdi), %xmm1
3145 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3146 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3147 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3148 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3149 ; SSE-NEXT: movapd 880(%rdi), %xmm2
3150 ; SSE-NEXT: movapd 912(%rdi), %xmm0
3151 ; SSE-NEXT: movapd %xmm0, %xmm1
3152 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3153 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3154 ; SSE-NEXT: movapd 928(%rdi), %xmm1
3155 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3156 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3157 ; SSE-NEXT: movapd 896(%rdi), %xmm2
3158 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3159 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3160 ; SSE-NEXT: movapd 944(%rdi), %xmm1
3161 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3162 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3163 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3164 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3165 ; SSE-NEXT: movapd 960(%rdi), %xmm2
3166 ; SSE-NEXT: movapd 992(%rdi), %xmm0
3167 ; SSE-NEXT: movapd %xmm0, %xmm1
3168 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3169 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3170 ; SSE-NEXT: movapd 1008(%rdi), %xmm1
3171 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3172 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3173 ; SSE-NEXT: movapd 976(%rdi), %xmm2
3174 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3175 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3176 ; SSE-NEXT: movapd 1024(%rdi), %xmm1
3177 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3178 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3179 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3180 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3181 ; SSE-NEXT: movapd 1040(%rdi), %xmm2
3182 ; SSE-NEXT: movapd 1072(%rdi), %xmm0
3183 ; SSE-NEXT: movapd %xmm0, %xmm1
3184 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3185 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3186 ; SSE-NEXT: movapd 1088(%rdi), %xmm1
3187 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3188 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3189 ; SSE-NEXT: movapd 1056(%rdi), %xmm2
3190 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3191 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3192 ; SSE-NEXT: movapd 1104(%rdi), %xmm1
3193 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3194 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3195 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3196 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3197 ; SSE-NEXT: movapd 1120(%rdi), %xmm2
3198 ; SSE-NEXT: movapd 1152(%rdi), %xmm0
3199 ; SSE-NEXT: movapd %xmm0, %xmm1
3200 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3201 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3202 ; SSE-NEXT: movapd 1168(%rdi), %xmm1
3203 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3204 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3205 ; SSE-NEXT: movapd 1136(%rdi), %xmm2
3206 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3207 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3208 ; SSE-NEXT: movapd 1184(%rdi), %xmm1
3209 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3210 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3211 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3212 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3213 ; SSE-NEXT: movapd 1200(%rdi), %xmm2
3214 ; SSE-NEXT: movapd 1232(%rdi), %xmm0
3215 ; SSE-NEXT: movapd %xmm0, %xmm1
3216 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3217 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3218 ; SSE-NEXT: movapd 1248(%rdi), %xmm1
3219 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3220 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3221 ; SSE-NEXT: movapd 1216(%rdi), %xmm2
3222 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3223 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3224 ; SSE-NEXT: movapd 1264(%rdi), %xmm1
3225 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3226 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3227 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3228 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3229 ; SSE-NEXT: movapd 1280(%rdi), %xmm2
3230 ; SSE-NEXT: movapd 1312(%rdi), %xmm0
3231 ; SSE-NEXT: movapd %xmm0, %xmm1
3232 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3233 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3234 ; SSE-NEXT: movapd 1328(%rdi), %xmm1
3235 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3236 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3237 ; SSE-NEXT: movapd 1296(%rdi), %xmm2
3238 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3239 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3240 ; SSE-NEXT: movapd 1344(%rdi), %xmm1
3241 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3242 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3243 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3244 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3245 ; SSE-NEXT: movapd 1360(%rdi), %xmm2
3246 ; SSE-NEXT: movapd 1392(%rdi), %xmm0
3247 ; SSE-NEXT: movapd %xmm0, %xmm1
3248 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3249 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3250 ; SSE-NEXT: movapd 1408(%rdi), %xmm1
3251 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3252 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3253 ; SSE-NEXT: movapd 1376(%rdi), %xmm2
3254 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3255 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3256 ; SSE-NEXT: movapd 1424(%rdi), %xmm1
3257 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3258 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3259 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3260 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3261 ; SSE-NEXT: movapd 1440(%rdi), %xmm2
3262 ; SSE-NEXT: movapd 1472(%rdi), %xmm0
3263 ; SSE-NEXT: movapd %xmm0, %xmm1
3264 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3265 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3266 ; SSE-NEXT: movapd 1488(%rdi), %xmm1
3267 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3268 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3269 ; SSE-NEXT: movapd 1456(%rdi), %xmm2
3270 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3271 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3272 ; SSE-NEXT: movapd 1504(%rdi), %xmm1
3273 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3274 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3275 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3276 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3277 ; SSE-NEXT: movapd 1520(%rdi), %xmm2
3278 ; SSE-NEXT: movapd 1552(%rdi), %xmm0
3279 ; SSE-NEXT: movapd %xmm0, %xmm1
3280 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3281 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3282 ; SSE-NEXT: movapd 1568(%rdi), %xmm1
3283 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3284 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3285 ; SSE-NEXT: movapd 1536(%rdi), %xmm2
3286 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3287 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3288 ; SSE-NEXT: movapd 1584(%rdi), %xmm1
3289 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3290 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3291 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3292 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3293 ; SSE-NEXT: movapd 1600(%rdi), %xmm2
3294 ; SSE-NEXT: movapd 1632(%rdi), %xmm0
3295 ; SSE-NEXT: movapd %xmm0, %xmm1
3296 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3297 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3298 ; SSE-NEXT: movapd 1648(%rdi), %xmm1
3299 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3300 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3301 ; SSE-NEXT: movapd 1616(%rdi), %xmm2
3302 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3303 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3304 ; SSE-NEXT: movapd 1664(%rdi), %xmm1
3305 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3306 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3307 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3308 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3309 ; SSE-NEXT: movapd 1680(%rdi), %xmm2
3310 ; SSE-NEXT: movapd 1712(%rdi), %xmm0
3311 ; SSE-NEXT: movapd %xmm0, %xmm1
3312 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3313 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3314 ; SSE-NEXT: movapd 1728(%rdi), %xmm1
3315 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3316 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3317 ; SSE-NEXT: movapd 1696(%rdi), %xmm2
3318 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3319 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3320 ; SSE-NEXT: movapd 1744(%rdi), %xmm1
3321 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3322 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3323 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3324 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3325 ; SSE-NEXT: movapd 1760(%rdi), %xmm2
3326 ; SSE-NEXT: movapd 1792(%rdi), %xmm0
3327 ; SSE-NEXT: movapd %xmm0, %xmm1
3328 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3329 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3330 ; SSE-NEXT: movapd 1808(%rdi), %xmm1
3331 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3332 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3333 ; SSE-NEXT: movapd 1776(%rdi), %xmm2
3334 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3335 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3336 ; SSE-NEXT: movapd 1824(%rdi), %xmm1
3337 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3338 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3339 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3340 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3341 ; SSE-NEXT: movapd 1840(%rdi), %xmm2
3342 ; SSE-NEXT: movapd 1872(%rdi), %xmm0
3343 ; SSE-NEXT: movapd %xmm0, %xmm1
3344 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3345 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3346 ; SSE-NEXT: movapd 1888(%rdi), %xmm1
3347 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3348 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3349 ; SSE-NEXT: movapd 1856(%rdi), %xmm2
3350 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3351 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3352 ; SSE-NEXT: movapd 1904(%rdi), %xmm1
3353 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3354 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3355 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3356 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3357 ; SSE-NEXT: movapd 1920(%rdi), %xmm2
3358 ; SSE-NEXT: movapd 1952(%rdi), %xmm0
3359 ; SSE-NEXT: movapd %xmm0, %xmm1
3360 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3361 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3362 ; SSE-NEXT: movapd 1968(%rdi), %xmm1
3363 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3364 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3365 ; SSE-NEXT: movapd 1936(%rdi), %xmm2
3366 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3367 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3368 ; SSE-NEXT: movapd 1984(%rdi), %xmm1
3369 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3370 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3371 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3372 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3373 ; SSE-NEXT: movapd 2000(%rdi), %xmm2
3374 ; SSE-NEXT: movapd 2032(%rdi), %xmm0
3375 ; SSE-NEXT: movapd %xmm0, %xmm1
3376 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3377 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3378 ; SSE-NEXT: movapd 2048(%rdi), %xmm1
3379 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3380 ; SSE-NEXT: movapd %xmm2, (%rsp) # 16-byte Spill
3381 ; SSE-NEXT: movapd 2016(%rdi), %xmm2
3382 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3383 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3384 ; SSE-NEXT: movapd 2064(%rdi), %xmm1
3385 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3386 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3387 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3388 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3389 ; SSE-NEXT: movapd 2080(%rdi), %xmm2
3390 ; SSE-NEXT: movapd 2112(%rdi), %xmm0
3391 ; SSE-NEXT: movapd %xmm0, %xmm1
3392 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3393 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3394 ; SSE-NEXT: movapd 2128(%rdi), %xmm1
3395 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3396 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3397 ; SSE-NEXT: movapd 2096(%rdi), %xmm2
3398 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3399 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3400 ; SSE-NEXT: movapd 2144(%rdi), %xmm1
3401 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3402 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3403 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3404 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3405 ; SSE-NEXT: movapd 2160(%rdi), %xmm14
3406 ; SSE-NEXT: movapd 2192(%rdi), %xmm0
3407 ; SSE-NEXT: movapd %xmm0, %xmm11
3408 ; SSE-NEXT: movsd {{.*#+}} xmm11 = xmm14[0],xmm11[1]
3409 ; SSE-NEXT: movapd 2208(%rdi), %xmm1
3410 ; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm1[0]
3411 ; SSE-NEXT: movapd 2176(%rdi), %xmm2
3412 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3413 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3414 ; SSE-NEXT: movapd 2224(%rdi), %xmm1
3415 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3416 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3417 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3418 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3419 ; SSE-NEXT: movapd 2240(%rdi), %xmm12
3420 ; SSE-NEXT: movapd 2272(%rdi), %xmm0
3421 ; SSE-NEXT: movapd %xmm0, %xmm8
3422 ; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm12[0],xmm8[1]
3423 ; SSE-NEXT: movapd 2288(%rdi), %xmm1
3424 ; SSE-NEXT: shufpd {{.*#+}} xmm12 = xmm12[1],xmm1[0]
3425 ; SSE-NEXT: movapd 2256(%rdi), %xmm2
3426 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
3427 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3428 ; SSE-NEXT: movapd 2304(%rdi), %xmm1
3429 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3430 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3431 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3432 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3433 ; SSE-NEXT: movapd 2320(%rdi), %xmm9
3434 ; SSE-NEXT: movapd 2352(%rdi), %xmm0
3435 ; SSE-NEXT: movapd %xmm0, %xmm6
3436 ; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm9[0],xmm6[1]
3437 ; SSE-NEXT: movapd 2368(%rdi), %xmm15
3438 ; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm15[0]
3439 ; SSE-NEXT: movapd 2336(%rdi), %xmm2
3440 ; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm2[0],xmm15[1]
3441 ; SSE-NEXT: movapd 2384(%rdi), %xmm1
3442 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3443 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3444 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3445 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3446 ; SSE-NEXT: movapd 2400(%rdi), %xmm7
3447 ; SSE-NEXT: movapd 2432(%rdi), %xmm0
3448 ; SSE-NEXT: movapd %xmm0, %xmm4
3449 ; SSE-NEXT: movsd {{.*#+}} xmm4 = xmm7[0],xmm4[1]
3450 ; SSE-NEXT: movapd 2448(%rdi), %xmm13
3451 ; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm13[0]
3452 ; SSE-NEXT: movapd 2416(%rdi), %xmm2
3453 ; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm2[0],xmm13[1]
3454 ; SSE-NEXT: movapd 2464(%rdi), %xmm1
3455 ; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1],xmm1[0]
3456 ; SSE-NEXT: movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3457 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
3458 ; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3459 ; SSE-NEXT: movapd 2480(%rdi), %xmm5
3460 ; SSE-NEXT: movapd 2512(%rdi), %xmm3
3461 ; SSE-NEXT: movapd %xmm3, %xmm2
3462 ; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm5[0],xmm2[1]
3463 ; SSE-NEXT: movapd 2528(%rdi), %xmm10
3464 ; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1],xmm10[0]
3465 ; SSE-NEXT: movapd 2496(%rdi), %xmm0
3466 ; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm0[0],xmm10[1]
3467 ; SSE-NEXT: movapd 2544(%rdi), %xmm1
3468 ; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
3469 ; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3470 ; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
3471 ; SSE-NEXT: movapd %xmm2, 496(%rsi)
3472 ; SSE-NEXT: movapd %xmm4, 480(%rsi)
3473 ; SSE-NEXT: movapd %xmm6, 464(%rsi)
3474 ; SSE-NEXT: movapd %xmm8, 448(%rsi)
3475 ; SSE-NEXT: movapd %xmm11, 432(%rsi)
3476 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3477 ; SSE-NEXT: movaps %xmm0, 416(%rsi)
3478 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3479 ; SSE-NEXT: movaps %xmm0, 400(%rsi)
3480 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3481 ; SSE-NEXT: movaps %xmm0, 384(%rsi)
3482 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3483 ; SSE-NEXT: movaps %xmm0, 368(%rsi)
3484 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3485 ; SSE-NEXT: movaps %xmm0, 352(%rsi)
3486 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3487 ; SSE-NEXT: movaps %xmm0, 336(%rsi)
3488 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3489 ; SSE-NEXT: movaps %xmm0, 320(%rsi)
3490 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3491 ; SSE-NEXT: movaps %xmm0, 304(%rsi)
3492 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3493 ; SSE-NEXT: movaps %xmm0, 288(%rsi)
3494 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3495 ; SSE-NEXT: movaps %xmm0, 272(%rsi)
3496 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3497 ; SSE-NEXT: movaps %xmm0, 256(%rsi)
3498 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3499 ; SSE-NEXT: movaps %xmm0, 240(%rsi)
3500 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3501 ; SSE-NEXT: movaps %xmm0, 224(%rsi)
3502 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3503 ; SSE-NEXT: movaps %xmm0, 208(%rsi)
3504 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3505 ; SSE-NEXT: movaps %xmm0, 192(%rsi)
3506 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3507 ; SSE-NEXT: movaps %xmm0, 176(%rsi)
3508 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3509 ; SSE-NEXT: movaps %xmm0, 160(%rsi)
3510 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3511 ; SSE-NEXT: movaps %xmm0, 144(%rsi)
3512 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3513 ; SSE-NEXT: movaps %xmm0, 128(%rsi)
3514 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3515 ; SSE-NEXT: movaps %xmm0, 112(%rsi)
3516 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3517 ; SSE-NEXT: movaps %xmm0, 96(%rsi)
3518 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3519 ; SSE-NEXT: movaps %xmm0, 80(%rsi)
3520 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3521 ; SSE-NEXT: movaps %xmm0, 64(%rsi)
3522 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3523 ; SSE-NEXT: movaps %xmm0, 48(%rsi)
3524 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3525 ; SSE-NEXT: movaps %xmm0, 32(%rsi)
3526 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3527 ; SSE-NEXT: movaps %xmm0, 16(%rsi)
3528 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3529 ; SSE-NEXT: movaps %xmm0, (%rsi)
3530 ; SSE-NEXT: movapd %xmm5, 496(%rdx)
3531 ; SSE-NEXT: movapd %xmm7, 480(%rdx)
3532 ; SSE-NEXT: movapd %xmm9, 464(%rdx)
3533 ; SSE-NEXT: movapd %xmm12, 448(%rdx)
3534 ; SSE-NEXT: movapd %xmm14, 432(%rdx)
3535 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3536 ; SSE-NEXT: movaps %xmm0, 416(%rdx)
3537 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3538 ; SSE-NEXT: movaps %xmm0, 400(%rdx)
3539 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3540 ; SSE-NEXT: movaps %xmm0, 384(%rdx)
3541 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3542 ; SSE-NEXT: movaps %xmm0, 368(%rdx)
3543 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3544 ; SSE-NEXT: movaps %xmm0, 352(%rdx)
3545 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3546 ; SSE-NEXT: movaps %xmm0, 336(%rdx)
3547 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3548 ; SSE-NEXT: movaps %xmm0, 320(%rdx)
3549 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3550 ; SSE-NEXT: movaps %xmm0, 304(%rdx)
3551 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3552 ; SSE-NEXT: movaps %xmm0, 288(%rdx)
3553 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3554 ; SSE-NEXT: movaps %xmm0, 272(%rdx)
3555 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3556 ; SSE-NEXT: movaps %xmm0, 256(%rdx)
3557 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3558 ; SSE-NEXT: movaps %xmm0, 240(%rdx)
3559 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3560 ; SSE-NEXT: movaps %xmm0, 224(%rdx)
3561 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3562 ; SSE-NEXT: movaps %xmm0, 208(%rdx)
3563 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3564 ; SSE-NEXT: movaps %xmm0, 192(%rdx)
3565 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3566 ; SSE-NEXT: movaps %xmm0, 176(%rdx)
3567 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3568 ; SSE-NEXT: movaps %xmm0, 160(%rdx)
3569 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3570 ; SSE-NEXT: movaps %xmm0, 144(%rdx)
3571 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3572 ; SSE-NEXT: movaps %xmm0, 128(%rdx)
3573 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3574 ; SSE-NEXT: movaps %xmm0, 112(%rdx)
3575 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3576 ; SSE-NEXT: movaps %xmm0, 96(%rdx)
3577 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3578 ; SSE-NEXT: movaps %xmm0, 80(%rdx)
3579 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3580 ; SSE-NEXT: movaps %xmm0, 64(%rdx)
3581 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3582 ; SSE-NEXT: movaps %xmm0, 48(%rdx)
3583 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3584 ; SSE-NEXT: movaps %xmm0, 32(%rdx)
3585 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3586 ; SSE-NEXT: movaps %xmm0, 16(%rdx)
3587 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3588 ; SSE-NEXT: movaps %xmm0, (%rdx)
3589 ; SSE-NEXT: movapd %xmm10, 496(%rcx)
3590 ; SSE-NEXT: movapd %xmm13, 480(%rcx)
3591 ; SSE-NEXT: movapd %xmm15, 464(%rcx)
3592 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3593 ; SSE-NEXT: movaps %xmm0, 448(%rcx)
3594 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3595 ; SSE-NEXT: movaps %xmm0, 432(%rcx)
3596 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3597 ; SSE-NEXT: movaps %xmm0, 416(%rcx)
3598 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3599 ; SSE-NEXT: movaps %xmm0, 400(%rcx)
3600 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3601 ; SSE-NEXT: movaps %xmm0, 384(%rcx)
3602 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3603 ; SSE-NEXT: movaps %xmm0, 368(%rcx)
3604 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3605 ; SSE-NEXT: movaps %xmm0, 352(%rcx)
3606 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3607 ; SSE-NEXT: movaps %xmm0, 336(%rcx)
3608 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3609 ; SSE-NEXT: movaps %xmm0, 320(%rcx)
3610 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3611 ; SSE-NEXT: movaps %xmm0, 304(%rcx)
3612 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3613 ; SSE-NEXT: movaps %xmm0, 288(%rcx)
3614 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3615 ; SSE-NEXT: movaps %xmm0, 272(%rcx)
3616 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3617 ; SSE-NEXT: movaps %xmm0, 256(%rcx)
3618 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3619 ; SSE-NEXT: movaps %xmm0, 240(%rcx)
3620 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3621 ; SSE-NEXT: movaps %xmm0, 224(%rcx)
3622 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3623 ; SSE-NEXT: movaps %xmm0, 208(%rcx)
3624 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3625 ; SSE-NEXT: movaps %xmm0, 192(%rcx)
3626 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3627 ; SSE-NEXT: movaps %xmm0, 176(%rcx)
3628 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3629 ; SSE-NEXT: movaps %xmm0, 160(%rcx)
3630 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3631 ; SSE-NEXT: movaps %xmm0, 144(%rcx)
3632 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3633 ; SSE-NEXT: movaps %xmm0, 128(%rcx)
3634 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3635 ; SSE-NEXT: movaps %xmm0, 112(%rcx)
3636 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3637 ; SSE-NEXT: movaps %xmm0, 96(%rcx)
3638 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3639 ; SSE-NEXT: movaps %xmm0, 80(%rcx)
3640 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3641 ; SSE-NEXT: movaps %xmm0, 64(%rcx)
3642 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3643 ; SSE-NEXT: movaps %xmm0, 48(%rcx)
3644 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3645 ; SSE-NEXT: movaps %xmm0, 32(%rcx)
3646 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3647 ; SSE-NEXT: movaps %xmm0, 16(%rcx)
3648 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3649 ; SSE-NEXT: movaps %xmm0, (%rcx)
3650 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3651 ; SSE-NEXT: movaps %xmm0, 496(%r8)
3652 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3653 ; SSE-NEXT: movaps %xmm0, 480(%r8)
3654 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3655 ; SSE-NEXT: movaps %xmm0, 464(%r8)
3656 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3657 ; SSE-NEXT: movaps %xmm0, 448(%r8)
3658 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3659 ; SSE-NEXT: movaps %xmm0, 432(%r8)
3660 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3661 ; SSE-NEXT: movaps %xmm0, 416(%r8)
3662 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3663 ; SSE-NEXT: movaps %xmm0, 400(%r8)
3664 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3665 ; SSE-NEXT: movaps %xmm0, 384(%r8)
3666 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3667 ; SSE-NEXT: movaps %xmm0, 368(%r8)
3668 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3669 ; SSE-NEXT: movaps %xmm0, 352(%r8)
3670 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3671 ; SSE-NEXT: movaps %xmm0, 336(%r8)
3672 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3673 ; SSE-NEXT: movaps %xmm0, 320(%r8)
3674 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3675 ; SSE-NEXT: movaps %xmm0, 304(%r8)
3676 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3677 ; SSE-NEXT: movaps %xmm0, 288(%r8)
3678 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3679 ; SSE-NEXT: movaps %xmm0, 272(%r8)
3680 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3681 ; SSE-NEXT: movaps %xmm0, 256(%r8)
3682 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3683 ; SSE-NEXT: movaps %xmm0, 240(%r8)
3684 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3685 ; SSE-NEXT: movaps %xmm0, 224(%r8)
3686 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3687 ; SSE-NEXT: movaps %xmm0, 208(%r8)
3688 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3689 ; SSE-NEXT: movaps %xmm0, 192(%r8)
3690 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3691 ; SSE-NEXT: movaps %xmm0, 176(%r8)
3692 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3693 ; SSE-NEXT: movaps %xmm0, 160(%r8)
3694 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3695 ; SSE-NEXT: movaps %xmm0, 144(%r8)
3696 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3697 ; SSE-NEXT: movaps %xmm0, 128(%r8)
3698 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3699 ; SSE-NEXT: movaps %xmm0, 112(%r8)
3700 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3701 ; SSE-NEXT: movaps %xmm0, 96(%r8)
3702 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3703 ; SSE-NEXT: movaps %xmm0, 80(%r8)
3704 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3705 ; SSE-NEXT: movaps %xmm0, 64(%r8)
3706 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3707 ; SSE-NEXT: movaps %xmm0, 48(%r8)
3708 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3709 ; SSE-NEXT: movaps %xmm0, 32(%r8)
3710 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3711 ; SSE-NEXT: movaps %xmm0, 16(%r8)
3712 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3713 ; SSE-NEXT: movaps %xmm0, (%r8)
3714 ; SSE-NEXT: movapd %xmm1, 496(%r9)
3715 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3716 ; SSE-NEXT: movaps %xmm0, 480(%r9)
3717 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3718 ; SSE-NEXT: movaps %xmm0, 464(%r9)
3719 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3720 ; SSE-NEXT: movaps %xmm0, 448(%r9)
3721 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3722 ; SSE-NEXT: movaps %xmm0, 432(%r9)
3723 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3724 ; SSE-NEXT: movaps %xmm0, 416(%r9)
3725 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3726 ; SSE-NEXT: movaps %xmm0, 400(%r9)
3727 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3728 ; SSE-NEXT: movaps %xmm0, 384(%r9)
3729 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3730 ; SSE-NEXT: movaps %xmm0, 368(%r9)
3731 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3732 ; SSE-NEXT: movaps %xmm0, 352(%r9)
3733 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3734 ; SSE-NEXT: movaps %xmm0, 336(%r9)
3735 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3736 ; SSE-NEXT: movaps %xmm0, 320(%r9)
3737 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3738 ; SSE-NEXT: movaps %xmm0, 304(%r9)
3739 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3740 ; SSE-NEXT: movaps %xmm0, 288(%r9)
3741 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3742 ; SSE-NEXT: movaps %xmm0, 272(%r9)
3743 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3744 ; SSE-NEXT: movaps %xmm0, 256(%r9)
3745 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3746 ; SSE-NEXT: movaps %xmm0, 240(%r9)
3747 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3748 ; SSE-NEXT: movaps %xmm0, 224(%r9)
3749 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3750 ; SSE-NEXT: movaps %xmm0, 208(%r9)
3751 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3752 ; SSE-NEXT: movaps %xmm0, 192(%r9)
3753 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3754 ; SSE-NEXT: movaps %xmm0, 176(%r9)
3755 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3756 ; SSE-NEXT: movaps %xmm0, 160(%r9)
3757 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3758 ; SSE-NEXT: movaps %xmm0, 144(%r9)
3759 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3760 ; SSE-NEXT: movaps %xmm0, 128(%r9)
3761 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3762 ; SSE-NEXT: movaps %xmm0, 112(%r9)
3763 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3764 ; SSE-NEXT: movaps %xmm0, 96(%r9)
3765 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3766 ; SSE-NEXT: movaps %xmm0, 80(%r9)
3767 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3768 ; SSE-NEXT: movaps %xmm0, 64(%r9)
3769 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3770 ; SSE-NEXT: movaps %xmm0, 48(%r9)
3771 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3772 ; SSE-NEXT: movaps %xmm0, 32(%r9)
3773 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3774 ; SSE-NEXT: movaps %xmm0, 16(%r9)
3775 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3776 ; SSE-NEXT: movaps %xmm0, (%r9)
3777 ; SSE-NEXT: addq $2216, %rsp # imm = 0x8A8
3780 ; AVX1-ONLY-LABEL: load_i64_stride5_vf64:
3781 ; AVX1-ONLY: # %bb.0:
3782 ; AVX1-ONLY-NEXT: subq $3256, %rsp # imm = 0xCB8
3783 ; AVX1-ONLY-NEXT: vmovapd 896(%rdi), %ymm2
3784 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3785 ; AVX1-ONLY-NEXT: vmovapd 864(%rdi), %ymm4
3786 ; AVX1-ONLY-NEXT: vmovapd 576(%rdi), %ymm3
3787 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3788 ; AVX1-ONLY-NEXT: vmovapd 544(%rdi), %ymm5
3789 ; AVX1-ONLY-NEXT: vmovapd 256(%rdi), %ymm0
3790 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3791 ; AVX1-ONLY-NEXT: vmovapd 224(%rdi), %ymm7
3792 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3]
3793 ; AVX1-ONLY-NEXT: vmovapd 192(%rdi), %xmm1
3794 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3795 ; AVX1-ONLY-NEXT: vmovapd 160(%rdi), %xmm11
3796 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm11[0],xmm1[1]
3797 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3798 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3799 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm5[0,1,2],ymm3[3]
3800 ; AVX1-ONLY-NEXT: vmovapd 512(%rdi), %xmm1
3801 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3802 ; AVX1-ONLY-NEXT: vmovapd 480(%rdi), %xmm8
3803 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm8[0],xmm1[1]
3804 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3805 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3806 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm4[0,1,2],ymm2[3]
3807 ; AVX1-ONLY-NEXT: vmovapd 832(%rdi), %xmm1
3808 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3809 ; AVX1-ONLY-NEXT: vmovapd 800(%rdi), %xmm3
3810 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
3811 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3812 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3813 ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %ymm0
3814 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3815 ; AVX1-ONLY-NEXT: vmovaps 1184(%rdi), %ymm1
3816 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3817 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3818 ; AVX1-ONLY-NEXT: vmovaps 1152(%rdi), %xmm1
3819 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3820 ; AVX1-ONLY-NEXT: vmovaps 1120(%rdi), %xmm9
3821 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm9[0,1],xmm1[2,3]
3822 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3823 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3824 ; AVX1-ONLY-NEXT: vmovapd 1536(%rdi), %ymm0
3825 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3826 ; AVX1-ONLY-NEXT: vmovapd 1504(%rdi), %ymm6
3827 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3]
3828 ; AVX1-ONLY-NEXT: vmovapd 1472(%rdi), %xmm1
3829 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3830 ; AVX1-ONLY-NEXT: vmovapd 1440(%rdi), %xmm12
3831 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm12[0],xmm1[1]
3832 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3833 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3834 ; AVX1-ONLY-NEXT: vmovapd 1856(%rdi), %ymm0
3835 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3836 ; AVX1-ONLY-NEXT: vmovapd 1824(%rdi), %ymm10
3837 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3]
3838 ; AVX1-ONLY-NEXT: vmovapd 1792(%rdi), %xmm1
3839 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3840 ; AVX1-ONLY-NEXT: vmovapd 1760(%rdi), %xmm13
3841 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm13[0],xmm1[1]
3842 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3843 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3844 ; AVX1-ONLY-NEXT: vmovapd 2176(%rdi), %ymm0
3845 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3846 ; AVX1-ONLY-NEXT: vmovapd 2144(%rdi), %ymm14
3847 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3]
3848 ; AVX1-ONLY-NEXT: vmovapd 2112(%rdi), %xmm1
3849 ; AVX1-ONLY-NEXT: vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3850 ; AVX1-ONLY-NEXT: vmovapd 2080(%rdi), %xmm15
3851 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} xmm1 = xmm15[0],xmm1[1]
3852 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3853 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3854 ; AVX1-ONLY-NEXT: vmovaps 2496(%rdi), %ymm0
3855 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3856 ; AVX1-ONLY-NEXT: vmovaps 2464(%rdi), %ymm1
3857 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3858 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3859 ; AVX1-ONLY-NEXT: vmovaps 2432(%rdi), %xmm1
3860 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3861 ; AVX1-ONLY-NEXT: vmovaps 2400(%rdi), %xmm0
3862 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3863 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
3864 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3865 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3866 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %ymm0
3867 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3868 ; AVX1-ONLY-NEXT: vmovaps 64(%rdi), %ymm1
3869 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3870 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3871 ; AVX1-ONLY-NEXT: vmovaps (%rdi), %xmm2
3872 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3873 ; AVX1-ONLY-NEXT: vmovaps 32(%rdi), %xmm1
3874 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3875 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3876 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3877 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3878 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %ymm0
3879 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3880 ; AVX1-ONLY-NEXT: vmovaps 384(%rdi), %ymm1
3881 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3882 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3883 ; AVX1-ONLY-NEXT: vmovaps 352(%rdi), %xmm1
3884 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3885 ; AVX1-ONLY-NEXT: vmovaps 320(%rdi), %xmm0
3886 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3887 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
3888 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3889 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3890 ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %ymm0
3891 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3892 ; AVX1-ONLY-NEXT: vmovaps 704(%rdi), %ymm1
3893 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3894 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3895 ; AVX1-ONLY-NEXT: vmovaps 672(%rdi), %xmm1
3896 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3897 ; AVX1-ONLY-NEXT: vmovaps 640(%rdi), %xmm2
3898 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3899 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3900 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3901 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3902 ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %ymm0
3903 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3904 ; AVX1-ONLY-NEXT: vmovaps 1024(%rdi), %ymm1
3905 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3906 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3907 ; AVX1-ONLY-NEXT: vmovaps 992(%rdi), %xmm1
3908 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3909 ; AVX1-ONLY-NEXT: vmovaps 960(%rdi), %xmm0
3910 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3911 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
3912 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3913 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3914 ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %ymm0
3915 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3916 ; AVX1-ONLY-NEXT: vmovaps 1344(%rdi), %ymm1
3917 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3918 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3919 ; AVX1-ONLY-NEXT: vmovaps 1312(%rdi), %xmm1
3920 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3921 ; AVX1-ONLY-NEXT: vmovaps 1280(%rdi), %xmm2
3922 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3923 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3924 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3925 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3926 ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %ymm0
3927 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3928 ; AVX1-ONLY-NEXT: vmovaps 1664(%rdi), %ymm1
3929 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3930 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3931 ; AVX1-ONLY-NEXT: vmovaps 1632(%rdi), %xmm1
3932 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3933 ; AVX1-ONLY-NEXT: vmovaps 1600(%rdi), %xmm0
3934 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3935 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
3936 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3937 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3938 ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %ymm0
3939 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3940 ; AVX1-ONLY-NEXT: vmovaps 1984(%rdi), %ymm1
3941 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3942 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3943 ; AVX1-ONLY-NEXT: vmovaps 1952(%rdi), %xmm1
3944 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3945 ; AVX1-ONLY-NEXT: vmovaps 1920(%rdi), %xmm2
3946 ; AVX1-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3947 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
3948 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
3949 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3950 ; AVX1-ONLY-NEXT: vmovaps 2336(%rdi), %ymm0
3951 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3952 ; AVX1-ONLY-NEXT: vmovaps 2304(%rdi), %ymm1
3953 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3954 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
3955 ; AVX1-ONLY-NEXT: vmovaps 2272(%rdi), %xmm1
3956 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3957 ; AVX1-ONLY-NEXT: vmovaps 2240(%rdi), %xmm0
3958 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3959 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
3960 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
3961 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3962 ; AVX1-ONLY-NEXT: vmovaps 288(%rdi), %xmm0
3963 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3964 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3965 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[3],ymm0[2]
3966 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm1
3967 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3968 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm11[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
3969 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3970 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3971 ; AVX1-ONLY-NEXT: vmovaps 608(%rdi), %xmm0
3972 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3973 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3974 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[3],ymm0[2]
3975 ; AVX1-ONLY-NEXT: vmovdqa 528(%rdi), %xmm1
3976 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3977 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm8[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
3978 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3979 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3980 ; AVX1-ONLY-NEXT: vmovaps 928(%rdi), %xmm0
3981 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3982 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3983 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[3],ymm0[2]
3984 ; AVX1-ONLY-NEXT: vmovdqa 848(%rdi), %xmm1
3985 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3986 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm3[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
3987 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3988 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3989 ; AVX1-ONLY-NEXT: vmovaps 1248(%rdi), %xmm0
3990 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3991 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
3992 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
3993 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
3994 ; AVX1-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm1
3995 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3996 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm9[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
3997 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
3998 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
3999 ; AVX1-ONLY-NEXT: vmovaps 1568(%rdi), %xmm0
4000 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4001 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4002 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[3],ymm0[2]
4003 ; AVX1-ONLY-NEXT: vmovdqa 1488(%rdi), %xmm1
4004 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4005 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm12[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
4006 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4007 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4008 ; AVX1-ONLY-NEXT: vmovaps 1888(%rdi), %xmm11
4009 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm0
4010 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[3],ymm0[2]
4011 ; AVX1-ONLY-NEXT: vmovdqa 1808(%rdi), %xmm12
4012 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm13[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
4013 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4014 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4015 ; AVX1-ONLY-NEXT: vmovaps 2208(%rdi), %xmm13
4016 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm0
4017 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[3],ymm0[2]
4018 ; AVX1-ONLY-NEXT: vmovdqa 2128(%rdi), %xmm14
4019 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm15[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
4020 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4021 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4022 ; AVX1-ONLY-NEXT: vmovaps 2528(%rdi), %xmm15
4023 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm0
4024 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4025 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4026 ; AVX1-ONLY-NEXT: vmovdqa 2448(%rdi), %xmm1
4027 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4028 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
4029 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
4030 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4031 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4032 ; AVX1-ONLY-NEXT: vmovaps 128(%rdi), %xmm0
4033 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4034 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4035 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4036 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4037 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm1
4038 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4039 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
4040 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
4041 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4042 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4043 ; AVX1-ONLY-NEXT: vmovaps 448(%rdi), %xmm0
4044 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4045 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4046 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4047 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4048 ; AVX1-ONLY-NEXT: vmovdqa 368(%rdi), %xmm1
4049 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4050 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
4051 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
4052 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4053 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4054 ; AVX1-ONLY-NEXT: vmovaps 768(%rdi), %xmm0
4055 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4056 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4057 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4058 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4059 ; AVX1-ONLY-NEXT: vmovdqa 688(%rdi), %xmm1
4060 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4061 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
4062 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
4063 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4064 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4065 ; AVX1-ONLY-NEXT: vmovaps 1088(%rdi), %xmm0
4066 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4067 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4068 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4069 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4070 ; AVX1-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm1
4071 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4072 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
4073 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
4074 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4075 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4076 ; AVX1-ONLY-NEXT: vmovaps 1408(%rdi), %xmm8
4077 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm0
4078 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4079 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4080 ; AVX1-ONLY-NEXT: vmovdqa 1328(%rdi), %xmm10
4081 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm1 # 16-byte Folded Reload
4082 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
4083 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4084 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4085 ; AVX1-ONLY-NEXT: vmovaps 1728(%rdi), %xmm6
4086 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
4087 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4088 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4089 ; AVX1-ONLY-NEXT: vmovdqa 1648(%rdi), %xmm7
4090 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload
4091 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
4092 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4093 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4094 ; AVX1-ONLY-NEXT: vmovaps 2048(%rdi), %xmm4
4095 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
4096 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4097 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
4098 ; AVX1-ONLY-NEXT: vmovdqa 1968(%rdi), %xmm5
4099 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload
4100 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
4101 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4102 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4103 ; AVX1-ONLY-NEXT: vmovaps 2368(%rdi), %xmm2
4104 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
4105 ; AVX1-ONLY-NEXT: vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4106 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
4107 ; AVX1-ONLY-NEXT: vmovdqa 2288(%rdi), %xmm3
4108 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm9 # 16-byte Folded Reload
4109 ; AVX1-ONLY-NEXT: # xmm9 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
4110 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2,3]
4111 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4112 ; AVX1-ONLY-NEXT: vmovaps 256(%rdi), %xmm0
4113 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4114 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4115 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3]
4116 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4117 ; AVX1-ONLY-NEXT: vmovaps 176(%rdi), %xmm1
4118 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4119 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm9 # 16-byte Folded Reload
4120 ; AVX1-ONLY-NEXT: # xmm9 = xmm1[0,1],mem[2,3]
4121 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
4122 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4123 ; AVX1-ONLY-NEXT: vmovaps 576(%rdi), %xmm0
4124 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4125 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4126 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3]
4127 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4128 ; AVX1-ONLY-NEXT: vmovaps 496(%rdi), %xmm1
4129 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4130 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm9 # 16-byte Folded Reload
4131 ; AVX1-ONLY-NEXT: # xmm9 = xmm1[0,1],mem[2,3]
4132 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
4133 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4134 ; AVX1-ONLY-NEXT: vmovaps 896(%rdi), %xmm0
4135 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4136 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4137 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3]
4138 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4139 ; AVX1-ONLY-NEXT: vmovaps 816(%rdi), %xmm1
4140 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4141 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm9 # 16-byte Folded Reload
4142 ; AVX1-ONLY-NEXT: # xmm9 = xmm1[0,1],mem[2,3]
4143 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
4144 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4145 ; AVX1-ONLY-NEXT: vmovaps 1216(%rdi), %xmm0
4146 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4147 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4148 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3]
4149 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4150 ; AVX1-ONLY-NEXT: vmovaps 1136(%rdi), %xmm1
4151 ; AVX1-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4152 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm9 # 16-byte Folded Reload
4153 ; AVX1-ONLY-NEXT: # xmm9 = xmm1[0,1],mem[2,3]
4154 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
4155 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4156 ; AVX1-ONLY-NEXT: vmovaps 1536(%rdi), %xmm0
4157 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4158 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4159 ; AVX1-ONLY-NEXT: # xmm0 = xmm0[0,1],mem[2,3]
4160 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
4161 ; AVX1-ONLY-NEXT: vmovaps 1456(%rdi), %xmm0
4162 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4163 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
4164 ; AVX1-ONLY-NEXT: # xmm9 = xmm0[0,1],mem[2,3]
4165 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm1[4,5,6,7]
4166 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4167 ; AVX1-ONLY-NEXT: vmovaps 1856(%rdi), %xmm0
4168 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4169 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3]
4170 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4171 ; AVX1-ONLY-NEXT: vmovdqa 1776(%rdi), %xmm9
4172 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4173 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm12[4,5,6,7]
4174 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
4175 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4176 ; AVX1-ONLY-NEXT: vmovaps 2176(%rdi), %xmm0
4177 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4178 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3]
4179 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4180 ; AVX1-ONLY-NEXT: vmovdqa 2096(%rdi), %xmm9
4181 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4182 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm14[4,5,6,7]
4183 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
4184 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4185 ; AVX1-ONLY-NEXT: vmovaps 2496(%rdi), %xmm0
4186 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4187 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm15[2,3]
4188 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4189 ; AVX1-ONLY-NEXT: vmovaps 2416(%rdi), %xmm15
4190 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm9 # 16-byte Folded Reload
4191 ; AVX1-ONLY-NEXT: # xmm9 = xmm15[0,1],mem[2,3]
4192 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
4193 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4194 ; AVX1-ONLY-NEXT: vmovaps 2336(%rdi), %xmm0
4195 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4196 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
4197 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4198 ; AVX1-ONLY-NEXT: vmovdqa 2256(%rdi), %xmm2
4199 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4200 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm3[4,5,6,7]
4201 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4202 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4203 ; AVX1-ONLY-NEXT: vmovaps 2016(%rdi), %xmm0
4204 ; AVX1-ONLY-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4205 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
4206 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4207 ; AVX1-ONLY-NEXT: vmovdqa 1936(%rdi), %xmm1
4208 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4209 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7]
4210 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4211 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4212 ; AVX1-ONLY-NEXT: vmovaps 1696(%rdi), %xmm12
4213 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm12[0,1],xmm6[2,3]
4214 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
4215 ; AVX1-ONLY-NEXT: vmovdqa 1616(%rdi), %xmm1
4216 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4217 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4,5,6,7]
4218 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4219 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4220 ; AVX1-ONLY-NEXT: vmovaps 1376(%rdi), %xmm2
4221 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm8[2,3]
4222 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
4223 ; AVX1-ONLY-NEXT: vmovdqa 1296(%rdi), %xmm0
4224 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm10[4,5,6,7]
4225 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
4226 ; AVX1-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4227 ; AVX1-ONLY-NEXT: vmovaps 1056(%rdi), %xmm3
4228 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload
4229 ; AVX1-ONLY-NEXT: # xmm1 = xmm3[0,1],mem[2,3]
4230 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm4
4231 ; AVX1-ONLY-NEXT: vmovaps 976(%rdi), %xmm1
4232 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload
4233 ; AVX1-ONLY-NEXT: # xmm5 = xmm1[0,1],mem[2,3]
4234 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
4235 ; AVX1-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4236 ; AVX1-ONLY-NEXT: vmovaps 736(%rdi), %xmm5
4237 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm4 # 16-byte Folded Reload
4238 ; AVX1-ONLY-NEXT: # xmm4 = xmm5[0,1],mem[2,3]
4239 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm6
4240 ; AVX1-ONLY-NEXT: vmovaps 656(%rdi), %xmm4
4241 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm7 # 16-byte Folded Reload
4242 ; AVX1-ONLY-NEXT: # xmm7 = xmm4[0,1],mem[2,3]
4243 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
4244 ; AVX1-ONLY-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4245 ; AVX1-ONLY-NEXT: vmovaps 416(%rdi), %xmm6
4246 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm7 # 16-byte Folded Reload
4247 ; AVX1-ONLY-NEXT: # xmm7 = xmm6[0,1],mem[2,3]
4248 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
4249 ; AVX1-ONLY-NEXT: vmovaps 336(%rdi), %xmm8
4250 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm9 # 16-byte Folded Reload
4251 ; AVX1-ONLY-NEXT: # xmm9 = xmm8[0,1],mem[2,3]
4252 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
4253 ; AVX1-ONLY-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4254 ; AVX1-ONLY-NEXT: vmovaps 96(%rdi), %xmm7
4255 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm9 # 16-byte Folded Reload
4256 ; AVX1-ONLY-NEXT: # xmm9 = xmm7[0,1],mem[2,3]
4257 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
4258 ; AVX1-ONLY-NEXT: vmovaps 16(%rdi), %xmm10
4259 ; AVX1-ONLY-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm11 # 16-byte Folded Reload
4260 ; AVX1-ONLY-NEXT: # xmm11 = xmm10[0,1],mem[2,3]
4261 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
4262 ; AVX1-ONLY-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4263 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
4264 ; AVX1-ONLY-NEXT: vmovapd 128(%rdi), %ymm9
4265 ; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4266 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm7 = ymm7[0],ymm9[0],ymm7[3],ymm9[2]
4267 ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm9
4268 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4269 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm9 = xmm10[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
4270 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,3]
4271 ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4272 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 16-byte Folded Reload
4273 ; AVX1-ONLY-NEXT: vmovapd 288(%rdi), %ymm9
4274 ; AVX1-ONLY-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4275 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm7 = ymm7[0],ymm9[0],ymm7[3],ymm9[2]
4276 ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm9
4277 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4278 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
4279 ; AVX1-ONLY-NEXT: # xmm9 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
4280 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm9[0,1],ymm7[2,3]
4281 ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4282 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
4283 ; AVX1-ONLY-NEXT: vmovapd 448(%rdi), %ymm7
4284 ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4285 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[3],ymm7[2]
4286 ; AVX1-ONLY-NEXT: vmovdqa 384(%rdi), %xmm7
4287 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4288 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm7 = xmm8[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
4289 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
4290 ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4291 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 16-byte Folded Reload
4292 ; AVX1-ONLY-NEXT: vmovapd 608(%rdi), %ymm7
4293 ; AVX1-ONLY-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4294 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[3],ymm7[2]
4295 ; AVX1-ONLY-NEXT: vmovdqa 544(%rdi), %xmm7
4296 ; AVX1-ONLY-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4297 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
4298 ; AVX1-ONLY-NEXT: # xmm7 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
4299 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
4300 ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4301 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
4302 ; AVX1-ONLY-NEXT: vmovapd 768(%rdi), %ymm6
4303 ; AVX1-ONLY-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4304 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[3],ymm6[2]
4305 ; AVX1-ONLY-NEXT: vmovdqa 704(%rdi), %xmm6
4306 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4307 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
4308 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3]
4309 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4310 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
4311 ; AVX1-ONLY-NEXT: vmovapd 928(%rdi), %ymm5
4312 ; AVX1-ONLY-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4313 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[3],ymm5[2]
4314 ; AVX1-ONLY-NEXT: vmovdqa 864(%rdi), %xmm5
4315 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4316 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
4317 ; AVX1-ONLY-NEXT: # xmm5 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
4318 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
4319 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4320 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
4321 ; AVX1-ONLY-NEXT: vmovapd 1088(%rdi), %ymm4
4322 ; AVX1-ONLY-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4323 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[3],ymm4[2]
4324 ; AVX1-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm4
4325 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4326 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
4327 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
4328 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4329 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
4330 ; AVX1-ONLY-NEXT: vmovapd 1248(%rdi), %ymm3
4331 ; AVX1-ONLY-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4332 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[3],ymm3[2]
4333 ; AVX1-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm3
4334 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4335 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
4336 ; AVX1-ONLY-NEXT: # xmm3 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
4337 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3]
4338 ; AVX1-ONLY-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4339 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1
4340 ; AVX1-ONLY-NEXT: vmovapd 1408(%rdi), %ymm2
4341 ; AVX1-ONLY-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4342 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
4343 ; AVX1-ONLY-NEXT: vmovdqa 1344(%rdi), %xmm2
4344 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4345 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
4346 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
4347 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4348 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
4349 ; AVX1-ONLY-NEXT: vmovapd 1568(%rdi), %ymm14
4350 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm14[0],ymm0[3],ymm14[2]
4351 ; AVX1-ONLY-NEXT: vmovdqa 1504(%rdi), %xmm13
4352 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm1 # 16-byte Folded Reload
4353 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
4354 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4355 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4356 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm0
4357 ; AVX1-ONLY-NEXT: vmovapd 1728(%rdi), %ymm12
4358 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm12[0],ymm0[3],ymm12[2]
4359 ; AVX1-ONLY-NEXT: vmovdqa 1664(%rdi), %xmm11
4360 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload
4361 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
4362 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4363 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4364 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
4365 ; AVX1-ONLY-NEXT: vmovapd 1888(%rdi), %ymm10
4366 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[3],ymm10[2]
4367 ; AVX1-ONLY-NEXT: vmovdqa 1824(%rdi), %xmm9
4368 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload
4369 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
4370 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4371 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4372 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
4373 ; AVX1-ONLY-NEXT: vmovapd 2048(%rdi), %ymm8
4374 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[3],ymm8[2]
4375 ; AVX1-ONLY-NEXT: vmovdqa 1984(%rdi), %xmm7
4376 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload
4377 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
4378 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4379 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4380 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
4381 ; AVX1-ONLY-NEXT: vmovapd 2208(%rdi), %ymm6
4382 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[3],ymm6[2]
4383 ; AVX1-ONLY-NEXT: vmovdqa 2144(%rdi), %xmm5
4384 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload
4385 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
4386 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4387 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4388 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
4389 ; AVX1-ONLY-NEXT: vmovapd 2368(%rdi), %ymm4
4390 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[3],ymm4[2]
4391 ; AVX1-ONLY-NEXT: vmovdqa 2304(%rdi), %xmm3
4392 ; AVX1-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload
4393 ; AVX1-ONLY-NEXT: # xmm1 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
4394 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4395 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4396 ; AVX1-ONLY-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
4397 ; AVX1-ONLY-NEXT: vmovapd 2528(%rdi), %ymm2
4398 ; AVX1-ONLY-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[3],ymm2[2]
4399 ; AVX1-ONLY-NEXT: vmovdqa 2464(%rdi), %xmm1
4400 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm15 = xmm15[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
4401 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3]
4402 ; AVX1-ONLY-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4403 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4404 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4405 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4406 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4407 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4408 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4409 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4410 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4411 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4412 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4413 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4414 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4415 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4416 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4417 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4418 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4419 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4420 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4421 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4422 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4423 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4424 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4425 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4426 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4427 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4428 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4429 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4430 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4431 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4432 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4433 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4434 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4435 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4436 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4437 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4438 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4439 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4440 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4441 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4442 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4443 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4444 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4445 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4446 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4447 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4448 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4449 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4450 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4451 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4452 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4453 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4454 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4455 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4456 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4457 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4458 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4459 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4460 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4461 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4462 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4463 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4464 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4465 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4466 ; AVX1-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4467 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4468 ; AVX1-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
4469 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
4470 ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
4471 ; AVX1-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
4472 ; AVX1-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
4473 ; AVX1-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm0[4,5,6,7]
4474 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
4475 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm14[3]
4476 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
4477 ; AVX1-ONLY-NEXT: # xmm13 = mem[0,1,2,3],xmm13[4,5,6,7]
4478 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm13 = ymm13[0,1],ymm0[2,3]
4479 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
4480 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm12[3]
4481 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
4482 ; AVX1-ONLY-NEXT: # xmm11 = mem[0,1,2,3],xmm11[4,5,6,7]
4483 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm0[2,3]
4484 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
4485 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm10[3]
4486 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
4487 ; AVX1-ONLY-NEXT: # xmm9 = mem[0,1,2,3],xmm9[4,5,6,7]
4488 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm0[2,3]
4489 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
4490 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm8[3]
4491 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
4492 ; AVX1-ONLY-NEXT: # xmm7 = mem[0,1,2,3],xmm7[4,5,6,7]
4493 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm0[2,3]
4494 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
4495 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm6[3]
4496 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
4497 ; AVX1-ONLY-NEXT: # xmm5 = mem[0,1,2,3],xmm5[4,5,6,7]
4498 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3]
4499 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
4500 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm4[3]
4501 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
4502 ; AVX1-ONLY-NEXT: # xmm3 = mem[0,1,2,3],xmm3[4,5,6,7]
4503 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm0[2,3]
4504 ; AVX1-ONLY-NEXT: vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
4505 ; AVX1-ONLY-NEXT: # ymm0 = mem[0,1,2],ymm2[3]
4506 ; AVX1-ONLY-NEXT: vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
4507 ; AVX1-ONLY-NEXT: # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7]
4508 ; AVX1-ONLY-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
4509 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4510 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%rsi)
4511 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4512 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%rsi)
4513 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4514 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%rsi)
4515 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4516 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%rsi)
4517 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4518 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rsi)
4519 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4520 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%rsi)
4521 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4522 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rsi)
4523 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4524 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rsi)
4525 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4526 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%rsi)
4527 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4528 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%rsi)
4529 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4530 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%rsi)
4531 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4532 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%rsi)
4533 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4534 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%rsi)
4535 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4536 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rsi)
4537 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4538 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rsi)
4539 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4540 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rsi)
4541 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4542 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%rdx)
4543 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4544 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%rdx)
4545 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4546 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%rdx)
4547 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4548 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%rdx)
4549 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4550 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rdx)
4551 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4552 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%rdx)
4553 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4554 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rdx)
4555 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4556 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rdx)
4557 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4558 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%rdx)
4559 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4560 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%rdx)
4561 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4562 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%rdx)
4563 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4564 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%rdx)
4565 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4566 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%rdx)
4567 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4568 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rdx)
4569 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4570 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rdx)
4571 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4572 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rdx)
4573 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4574 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%rcx)
4575 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4576 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
4577 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4578 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%rcx)
4579 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4580 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%rcx)
4581 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4582 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%rcx)
4583 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4584 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%rcx)
4585 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4586 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%rcx)
4587 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4588 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%rcx)
4589 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4590 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%rcx)
4591 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4592 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%rcx)
4593 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4594 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%rcx)
4595 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4596 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%rcx)
4597 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4598 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%rcx)
4599 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4600 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%rcx)
4601 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4602 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%rcx)
4603 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4604 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%rcx)
4605 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4606 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 480(%r8)
4607 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4608 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 448(%r8)
4609 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4610 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 416(%r8)
4611 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4612 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 384(%r8)
4613 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4614 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 352(%r8)
4615 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4616 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 320(%r8)
4617 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4618 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 288(%r8)
4619 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4620 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 256(%r8)
4621 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4622 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 224(%r8)
4623 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4624 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 192(%r8)
4625 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4626 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 160(%r8)
4627 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4628 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 128(%r8)
4629 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4630 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 96(%r8)
4631 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4632 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 64(%r8)
4633 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4634 ; AVX1-ONLY-NEXT: vmovaps %ymm1, 32(%r8)
4635 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
4636 ; AVX1-ONLY-NEXT: vmovaps %ymm1, (%r8)
4637 ; AVX1-ONLY-NEXT: vmovapd %ymm0, 480(%r9)
4638 ; AVX1-ONLY-NEXT: vmovapd %ymm3, 448(%r9)
4639 ; AVX1-ONLY-NEXT: vmovapd %ymm5, 416(%r9)
4640 ; AVX1-ONLY-NEXT: vmovapd %ymm7, 384(%r9)
4641 ; AVX1-ONLY-NEXT: vmovapd %ymm9, 352(%r9)
4642 ; AVX1-ONLY-NEXT: vmovapd %ymm11, 320(%r9)
4643 ; AVX1-ONLY-NEXT: vmovapd %ymm13, 288(%r9)
4644 ; AVX1-ONLY-NEXT: vmovaps %ymm15, 256(%r9)
4645 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4646 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 224(%r9)
4647 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4648 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 192(%r9)
4649 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4650 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 160(%r9)
4651 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4652 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 128(%r9)
4653 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4654 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 96(%r9)
4655 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4656 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 64(%r9)
4657 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4658 ; AVX1-ONLY-NEXT: vmovaps %ymm0, 32(%r9)
4659 ; AVX1-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
4660 ; AVX1-ONLY-NEXT: vmovaps %ymm0, (%r9)
4661 ; AVX1-ONLY-NEXT: addq $3256, %rsp # imm = 0xCB8
4662 ; AVX1-ONLY-NEXT: vzeroupper
4663 ; AVX1-ONLY-NEXT: retq
4665 ; AVX2-ONLY-LABEL: load_i64_stride5_vf64:
4666 ; AVX2-ONLY: # %bb.0:
4667 ; AVX2-ONLY-NEXT: subq $3240, %rsp # imm = 0xCA8
4668 ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %ymm2
4669 ; AVX2-ONLY-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4670 ; AVX2-ONLY-NEXT: vmovaps 864(%rdi), %ymm4
4671 ; AVX2-ONLY-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4672 ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %ymm3
4673 ; AVX2-ONLY-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4674 ; AVX2-ONLY-NEXT: vmovaps 544(%rdi), %ymm5
4675 ; AVX2-ONLY-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4676 ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %ymm0
4677 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4678 ; AVX2-ONLY-NEXT: vmovaps 224(%rdi), %ymm1
4679 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4680 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4681 ; AVX2-ONLY-NEXT: vmovaps 192(%rdi), %xmm1
4682 ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4683 ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %xmm6
4684 ; AVX2-ONLY-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4685 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3]
4686 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4687 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4688 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm3[6,7]
4689 ; AVX2-ONLY-NEXT: vmovaps 512(%rdi), %xmm1
4690 ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4691 ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %xmm3
4692 ; AVX2-ONLY-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4693 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
4694 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4695 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4696 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm2[6,7]
4697 ; AVX2-ONLY-NEXT: vmovaps 832(%rdi), %xmm1
4698 ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4699 ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %xmm2
4700 ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4701 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
4702 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4703 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4704 ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %ymm0
4705 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4706 ; AVX2-ONLY-NEXT: vmovaps 1184(%rdi), %ymm1
4707 ; AVX2-ONLY-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
4708 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4709 ; AVX2-ONLY-NEXT: vmovaps 1152(%rdi), %xmm1
4710 ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4711 ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %xmm2
4712 ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4713 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
4714 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4715 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4716 ; AVX2-ONLY-NEXT: vmovaps 1536(%rdi), %ymm0
4717 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4718 ; AVX2-ONLY-NEXT: vmovaps 1504(%rdi), %ymm1
4719 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4720 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4721 ; AVX2-ONLY-NEXT: vmovaps 1472(%rdi), %xmm1
4722 ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4723 ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %xmm2
4724 ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4725 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
4726 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4727 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4728 ; AVX2-ONLY-NEXT: vmovaps 1856(%rdi), %ymm0
4729 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4730 ; AVX2-ONLY-NEXT: vmovaps 1824(%rdi), %ymm1
4731 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4732 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4733 ; AVX2-ONLY-NEXT: vmovaps 1792(%rdi), %xmm1
4734 ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4735 ; AVX2-ONLY-NEXT: vmovaps 1760(%rdi), %xmm2
4736 ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4737 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
4738 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4739 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4740 ; AVX2-ONLY-NEXT: vmovaps 2176(%rdi), %ymm0
4741 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4742 ; AVX2-ONLY-NEXT: vmovaps 2144(%rdi), %ymm1
4743 ; AVX2-ONLY-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4744 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4745 ; AVX2-ONLY-NEXT: vmovaps 2112(%rdi), %xmm1
4746 ; AVX2-ONLY-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4747 ; AVX2-ONLY-NEXT: vmovaps 2080(%rdi), %xmm2
4748 ; AVX2-ONLY-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4749 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
4750 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4751 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4752 ; AVX2-ONLY-NEXT: vmovdqa 2496(%rdi), %ymm0
4753 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4754 ; AVX2-ONLY-NEXT: vmovdqa 2464(%rdi), %ymm1
4755 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4756 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4757 ; AVX2-ONLY-NEXT: vmovdqa 2432(%rdi), %xmm1
4758 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4759 ; AVX2-ONLY-NEXT: vmovdqa 2400(%rdi), %xmm13
4760 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3]
4761 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4762 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4763 ; AVX2-ONLY-NEXT: vmovdqa 96(%rdi), %ymm0
4764 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4765 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %ymm1
4766 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4767 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4768 ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm14
4769 ; AVX2-ONLY-NEXT: vmovdqa 32(%rdi), %xmm1
4770 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4771 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3]
4772 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4773 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4774 ; AVX2-ONLY-NEXT: vmovdqa 416(%rdi), %ymm0
4775 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4776 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %ymm1
4777 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4778 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4779 ; AVX2-ONLY-NEXT: vmovdqa 352(%rdi), %xmm1
4780 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4781 ; AVX2-ONLY-NEXT: vmovdqa 320(%rdi), %xmm8
4782 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm8[0,1],xmm1[2,3]
4783 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4784 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4785 ; AVX2-ONLY-NEXT: vmovdqa 736(%rdi), %ymm0
4786 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4787 ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %ymm1
4788 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4789 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4790 ; AVX2-ONLY-NEXT: vmovdqa 672(%rdi), %xmm1
4791 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4792 ; AVX2-ONLY-NEXT: vmovdqa 640(%rdi), %xmm7
4793 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm7[0,1],xmm1[2,3]
4794 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4795 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4796 ; AVX2-ONLY-NEXT: vmovdqa 1056(%rdi), %ymm0
4797 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4798 ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %ymm1
4799 ; AVX2-ONLY-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4800 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
4801 ; AVX2-ONLY-NEXT: vmovdqa 992(%rdi), %xmm1
4802 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4803 ; AVX2-ONLY-NEXT: vmovdqa 960(%rdi), %xmm5
4804 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
4805 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4806 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4807 ; AVX2-ONLY-NEXT: vmovdqa 1376(%rdi), %ymm0
4808 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4809 ; AVX2-ONLY-NEXT: vmovdqa 1344(%rdi), %ymm12
4810 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3,4,5],ymm0[6,7]
4811 ; AVX2-ONLY-NEXT: vmovdqa 1312(%rdi), %xmm1
4812 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4813 ; AVX2-ONLY-NEXT: vmovdqa 1280(%rdi), %xmm4
4814 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
4815 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4816 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4817 ; AVX2-ONLY-NEXT: vmovdqa 1696(%rdi), %ymm0
4818 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4819 ; AVX2-ONLY-NEXT: vmovdqa 1664(%rdi), %ymm11
4820 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3,4,5],ymm0[6,7]
4821 ; AVX2-ONLY-NEXT: vmovdqa 1632(%rdi), %xmm1
4822 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4823 ; AVX2-ONLY-NEXT: vmovdqa 1600(%rdi), %xmm3
4824 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
4825 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
4826 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4827 ; AVX2-ONLY-NEXT: vmovdqa 2016(%rdi), %ymm0
4828 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4829 ; AVX2-ONLY-NEXT: vmovdqa 1984(%rdi), %ymm10
4830 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3,4,5],ymm0[6,7]
4831 ; AVX2-ONLY-NEXT: vmovdqa 1952(%rdi), %xmm1
4832 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4833 ; AVX2-ONLY-NEXT: vmovdqa 1920(%rdi), %xmm9
4834 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm6 = xmm9[0,1],xmm1[2,3]
4835 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
4836 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4837 ; AVX2-ONLY-NEXT: vmovdqa 2336(%rdi), %ymm0
4838 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4839 ; AVX2-ONLY-NEXT: vmovdqa 2304(%rdi), %ymm6
4840 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm0[6,7]
4841 ; AVX2-ONLY-NEXT: vmovdqa 2272(%rdi), %xmm0
4842 ; AVX2-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4843 ; AVX2-ONLY-NEXT: vmovdqa 2240(%rdi), %xmm1
4844 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} xmm15 = xmm1[0,1],xmm0[2,3]
4845 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm2[4,5,6,7]
4846 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4847 ; AVX2-ONLY-NEXT: vmovdqa 208(%rdi), %xmm0
4848 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4849 ; AVX2-ONLY-NEXT: # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4850 ; AVX2-ONLY-NEXT: vmovdqa 288(%rdi), %ymm2
4851 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4852 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
4853 ; AVX2-ONLY-NEXT: # ymm15 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4854 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,2,1]
4855 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4856 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4857 ; AVX2-ONLY-NEXT: vmovdqa 528(%rdi), %xmm0
4858 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4859 ; AVX2-ONLY-NEXT: # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4860 ; AVX2-ONLY-NEXT: vmovdqa 608(%rdi), %ymm2
4861 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4862 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
4863 ; AVX2-ONLY-NEXT: # ymm15 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4864 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,2,1]
4865 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4866 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4867 ; AVX2-ONLY-NEXT: vmovdqa 848(%rdi), %xmm0
4868 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4869 ; AVX2-ONLY-NEXT: # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4870 ; AVX2-ONLY-NEXT: vmovdqa 928(%rdi), %ymm2
4871 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4872 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
4873 ; AVX2-ONLY-NEXT: # ymm15 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4874 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,2,1]
4875 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4876 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4877 ; AVX2-ONLY-NEXT: vmovdqa 1168(%rdi), %xmm0
4878 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4879 ; AVX2-ONLY-NEXT: # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4880 ; AVX2-ONLY-NEXT: vmovdqa 1248(%rdi), %ymm2
4881 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4882 ; AVX2-ONLY-NEXT: vpalignr $8, (%rsp), %ymm2, %ymm15 # 32-byte Folded Reload
4883 ; AVX2-ONLY-NEXT: # ymm15 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4884 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,2,1]
4885 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4886 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4887 ; AVX2-ONLY-NEXT: vmovdqa 1488(%rdi), %xmm0
4888 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4889 ; AVX2-ONLY-NEXT: # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4890 ; AVX2-ONLY-NEXT: vmovdqa 1568(%rdi), %ymm2
4891 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4892 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
4893 ; AVX2-ONLY-NEXT: # ymm15 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4894 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,2,1]
4895 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4896 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4897 ; AVX2-ONLY-NEXT: vmovdqa 1808(%rdi), %xmm0
4898 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4899 ; AVX2-ONLY-NEXT: # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4900 ; AVX2-ONLY-NEXT: vmovdqa 1888(%rdi), %ymm2
4901 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, (%rsp) # 32-byte Spill
4902 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
4903 ; AVX2-ONLY-NEXT: # ymm15 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4904 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,2,1]
4905 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4906 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4907 ; AVX2-ONLY-NEXT: vmovdqa 2128(%rdi), %xmm0
4908 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
4909 ; AVX2-ONLY-NEXT: # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4910 ; AVX2-ONLY-NEXT: vmovdqa 2208(%rdi), %ymm2
4911 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4912 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
4913 ; AVX2-ONLY-NEXT: # ymm15 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4914 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm15[0,1,2,1]
4915 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
4916 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4917 ; AVX2-ONLY-NEXT: vmovdqa 2448(%rdi), %xmm0
4918 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm13[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4919 ; AVX2-ONLY-NEXT: vmovdqa 2528(%rdi), %ymm2
4920 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4921 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
4922 ; AVX2-ONLY-NEXT: # ymm13 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4923 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,2,1]
4924 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
4925 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4926 ; AVX2-ONLY-NEXT: vmovdqa 48(%rdi), %xmm0
4927 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm14[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4928 ; AVX2-ONLY-NEXT: vmovdqa 128(%rdi), %ymm2
4929 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
4930 ; AVX2-ONLY-NEXT: # ymm13 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4931 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm15
4932 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4933 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,1,2,1]
4934 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
4935 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4936 ; AVX2-ONLY-NEXT: vmovdqa 368(%rdi), %xmm0
4937 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm8[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4938 ; AVX2-ONLY-NEXT: vmovdqa 448(%rdi), %ymm2
4939 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm8 # 32-byte Folded Reload
4940 ; AVX2-ONLY-NEXT: # ymm8 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4941 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm13
4942 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4943 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,2,1]
4944 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
4945 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4946 ; AVX2-ONLY-NEXT: vmovdqa 688(%rdi), %xmm0
4947 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm7[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4948 ; AVX2-ONLY-NEXT: vmovdqa 768(%rdi), %ymm2
4949 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
4950 ; AVX2-ONLY-NEXT: # ymm7 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4951 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm8
4952 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4953 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,2,1]
4954 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
4955 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4956 ; AVX2-ONLY-NEXT: vmovdqa 1008(%rdi), %xmm0
4957 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm5[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4958 ; AVX2-ONLY-NEXT: vmovdqa 1088(%rdi), %ymm2
4959 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
4960 ; AVX2-ONLY-NEXT: # ymm5 = mem[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4961 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm7
4962 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4963 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
4964 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
4965 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4966 ; AVX2-ONLY-NEXT: vmovdqa 1328(%rdi), %xmm0
4967 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm4[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4968 ; AVX2-ONLY-NEXT: vmovdqa 1408(%rdi), %ymm2
4969 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm4 = ymm12[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4970 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm5
4971 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4972 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,2,1]
4973 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
4974 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4975 ; AVX2-ONLY-NEXT: vmovdqa 1648(%rdi), %xmm0
4976 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm3[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4977 ; AVX2-ONLY-NEXT: vmovdqa 1728(%rdi), %ymm2
4978 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm3 = ymm11[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4979 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm4
4980 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4981 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,2,1]
4982 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
4983 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4984 ; AVX2-ONLY-NEXT: vmovdqa 1968(%rdi), %xmm0
4985 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm9[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4986 ; AVX2-ONLY-NEXT: vmovdqa 2048(%rdi), %ymm3
4987 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm2 = ymm10[8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23]
4988 ; AVX2-ONLY-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4989 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,2,1]
4990 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
4991 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4992 ; AVX2-ONLY-NEXT: vmovdqa 2288(%rdi), %xmm0
4993 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
4994 ; AVX2-ONLY-NEXT: vmovdqa 2368(%rdi), %ymm2
4995 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} ymm1 = ymm6[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
4996 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, %ymm14
4997 ; AVX2-ONLY-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
4998 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
4999 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5000 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5001 ; AVX2-ONLY-NEXT: vmovaps 160(%rdi), %ymm0
5002 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5003 ; AVX2-ONLY-NEXT: vmovaps 256(%rdi), %xmm1
5004 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5005 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5006 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5007 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5008 ; AVX2-ONLY-NEXT: vmovaps 480(%rdi), %ymm0
5009 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5010 ; AVX2-ONLY-NEXT: vmovaps 576(%rdi), %xmm1
5011 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5012 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5013 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5014 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5015 ; AVX2-ONLY-NEXT: vmovaps 800(%rdi), %ymm0
5016 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5017 ; AVX2-ONLY-NEXT: vmovaps 896(%rdi), %xmm1
5018 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5019 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5020 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5021 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5022 ; AVX2-ONLY-NEXT: vmovaps 1120(%rdi), %ymm0
5023 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5024 ; AVX2-ONLY-NEXT: vmovaps 1216(%rdi), %xmm1
5025 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5026 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5027 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5028 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5029 ; AVX2-ONLY-NEXT: vmovaps 1440(%rdi), %ymm0
5030 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5031 ; AVX2-ONLY-NEXT: vmovaps 1536(%rdi), %xmm1
5032 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5033 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5034 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5035 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5036 ; AVX2-ONLY-NEXT: vmovaps 1760(%rdi), %ymm0
5037 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5038 ; AVX2-ONLY-NEXT: vmovaps 1856(%rdi), %xmm1
5039 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5040 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5041 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5042 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5043 ; AVX2-ONLY-NEXT: vmovaps 2080(%rdi), %ymm0
5044 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5045 ; AVX2-ONLY-NEXT: vmovaps 2176(%rdi), %xmm1
5046 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5047 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5048 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5049 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5050 ; AVX2-ONLY-NEXT: vmovaps 2400(%rdi), %ymm0
5051 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5052 ; AVX2-ONLY-NEXT: vmovaps 2496(%rdi), %xmm1
5053 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5054 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5055 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5056 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5057 ; AVX2-ONLY-NEXT: vmovaps 2240(%rdi), %ymm0
5058 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5059 ; AVX2-ONLY-NEXT: vmovaps 2336(%rdi), %xmm1
5060 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5061 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5062 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5063 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5064 ; AVX2-ONLY-NEXT: vmovaps 1920(%rdi), %ymm0
5065 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5066 ; AVX2-ONLY-NEXT: vmovaps 2016(%rdi), %xmm1
5067 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5068 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5069 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5070 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5071 ; AVX2-ONLY-NEXT: vmovaps 1600(%rdi), %ymm0
5072 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5073 ; AVX2-ONLY-NEXT: vmovaps 1696(%rdi), %xmm1
5074 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5075 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5076 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5077 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5078 ; AVX2-ONLY-NEXT: vmovaps 1280(%rdi), %ymm0
5079 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5080 ; AVX2-ONLY-NEXT: vmovaps 1376(%rdi), %xmm1
5081 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5082 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5083 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5084 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5085 ; AVX2-ONLY-NEXT: vmovaps 960(%rdi), %ymm0
5086 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5087 ; AVX2-ONLY-NEXT: vmovaps 1056(%rdi), %xmm1
5088 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5089 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5090 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5091 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5092 ; AVX2-ONLY-NEXT: vmovaps 640(%rdi), %ymm0
5093 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5094 ; AVX2-ONLY-NEXT: vmovaps 736(%rdi), %xmm1
5095 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5096 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5097 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5098 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5099 ; AVX2-ONLY-NEXT: vmovaps 320(%rdi), %ymm0
5100 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5101 ; AVX2-ONLY-NEXT: vmovaps 416(%rdi), %xmm1
5102 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5103 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5104 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5105 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5106 ; AVX2-ONLY-NEXT: vmovaps (%rdi), %ymm0
5107 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
5108 ; AVX2-ONLY-NEXT: vmovaps 96(%rdi), %xmm1
5109 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
5110 ; AVX2-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
5111 ; AVX2-ONLY-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
5112 ; AVX2-ONLY-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5113 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
5114 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
5115 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5116 ; AVX2-ONLY-NEXT: vmovdqa 64(%rdi), %xmm1
5117 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5118 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
5119 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5120 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5121 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5122 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5123 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5124 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5125 ; AVX2-ONLY-NEXT: vmovdqa 224(%rdi), %xmm1
5126 ; AVX2-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
5127 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
5128 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5129 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5130 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
5131 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
5132 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5133 ; AVX2-ONLY-NEXT: vmovdqa 384(%rdi), %xmm13
5134 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
5135 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5136 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5137 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5138 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5139 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5140 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5141 ; AVX2-ONLY-NEXT: vmovdqa 544(%rdi), %xmm11
5142 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
5143 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5144 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5145 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
5146 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
5147 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5148 ; AVX2-ONLY-NEXT: vmovdqa 704(%rdi), %xmm12
5149 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
5150 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5151 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5152 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5153 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5154 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5155 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5156 ; AVX2-ONLY-NEXT: vmovdqa 864(%rdi), %xmm10
5157 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
5158 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5159 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5160 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm0 # 32-byte Folded Reload
5161 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
5162 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5163 ; AVX2-ONLY-NEXT: vmovdqa 1024(%rdi), %xmm9
5164 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
5165 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5166 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5167 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5168 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5169 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5170 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5171 ; AVX2-ONLY-NEXT: vmovdqa 1184(%rdi), %xmm8
5172 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
5173 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5174 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5175 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
5176 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
5177 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5178 ; AVX2-ONLY-NEXT: vmovdqa 1344(%rdi), %xmm7
5179 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
5180 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5181 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5182 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5183 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5184 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5185 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5186 ; AVX2-ONLY-NEXT: vmovdqa 1504(%rdi), %xmm6
5187 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
5188 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5189 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5190 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
5191 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
5192 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5193 ; AVX2-ONLY-NEXT: vmovdqa 1664(%rdi), %xmm5
5194 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
5195 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5196 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5197 ; AVX2-ONLY-NEXT: vmovdqu (%rsp), %ymm0 # 32-byte Reload
5198 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5199 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5200 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5201 ; AVX2-ONLY-NEXT: vmovdqa 1824(%rdi), %xmm4
5202 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
5203 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5204 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5205 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
5206 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23]
5207 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5208 ; AVX2-ONLY-NEXT: vmovdqa 1984(%rdi), %xmm3
5209 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
5210 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5211 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5212 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5213 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5214 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5215 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5216 ; AVX2-ONLY-NEXT: vmovdqa 2144(%rdi), %xmm2
5217 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
5218 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
5219 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5220 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
5221 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm14[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm14[16,17,18,19,20,21,22,23]
5222 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3]
5223 ; AVX2-ONLY-NEXT: vmovdqa 2304(%rdi), %xmm1
5224 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm15 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
5225 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
5226 ; AVX2-ONLY-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5227 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5228 ; AVX2-ONLY-NEXT: vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
5229 ; AVX2-ONLY-NEXT: # ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
5230 ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm15 = ymm0[0,1,0,3]
5231 ; AVX2-ONLY-NEXT: vmovdqa 2464(%rdi), %xmm0
5232 ; AVX2-ONLY-NEXT: vpalignr {{.*#+}} xmm14 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
5233 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
5234 ; AVX2-ONLY-NEXT: vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5235 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
5236 ; AVX2-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
5237 ; AVX2-ONLY-NEXT: # ymm14 = mem[0,1,2,3,4,5],ymm14[6,7]
5238 ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5239 ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
5240 ; AVX2-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
5241 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
5242 ; AVX2-ONLY-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5243 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
5244 ; AVX2-ONLY-NEXT: vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
5245 ; AVX2-ONLY-NEXT: # ymm14 = mem[0,1,2,3,4,5],ymm14[6,7]
5246 ; AVX2-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
5247 ; AVX2-ONLY-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
5248 ; AVX2-ONLY-NEXT: # xmm15 = mem[0,1],xmm15[2,3]
5249 ; AVX2-ONLY-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm14[4,5,6,7]
5250 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
5251 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
5252 ; AVX2-ONLY-NEXT: # ymm14 = mem[0,1,2,3,4,5],ymm14[6,7]
5253 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
5254 ; AVX2-ONLY-NEXT: # xmm13 = mem[0,1],xmm13[2,3]
5255 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
5256 ; AVX2-ONLY-NEXT: vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5257 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
5258 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
5259 ; AVX2-ONLY-NEXT: # ymm13 = mem[0,1,2,3,4,5],ymm13[6,7]
5260 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
5261 ; AVX2-ONLY-NEXT: # xmm11 = mem[0,1],xmm11[2,3]
5262 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
5263 ; AVX2-ONLY-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
5264 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
5265 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
5266 ; AVX2-ONLY-NEXT: # ymm11 = mem[0,1,2,3,4,5],ymm11[6,7]
5267 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
5268 ; AVX2-ONLY-NEXT: # xmm12 = mem[0,1],xmm12[2,3]
5269 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm11[4,5,6,7]
5270 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
5271 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
5272 ; AVX2-ONLY-NEXT: # ymm11 = mem[0,1,2,3,4,5],ymm11[6,7]
5273 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
5274 ; AVX2-ONLY-NEXT: # xmm10 = mem[0,1],xmm10[2,3]
5275 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm11 = ymm10[0,1,2,3],ymm11[4,5,6,7]
5276 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5277 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
5278 ; AVX2-ONLY-NEXT: # ymm10 = mem[0,1,2,3,4,5],ymm10[6,7]
5279 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
5280 ; AVX2-ONLY-NEXT: # xmm9 = mem[0,1],xmm9[2,3]
5281 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
5282 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
5283 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
5284 ; AVX2-ONLY-NEXT: # ymm10 = mem[0,1,2,3,4,5],ymm10[6,7]
5285 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
5286 ; AVX2-ONLY-NEXT: # xmm8 = mem[0,1],xmm8[2,3]
5287 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm14 = ymm8[0,1,2,3],ymm10[4,5,6,7]
5288 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
5289 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
5290 ; AVX2-ONLY-NEXT: # ymm8 = mem[0,1,2,3,4,5],ymm8[6,7]
5291 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
5292 ; AVX2-ONLY-NEXT: # xmm7 = mem[0,1],xmm7[2,3]
5293 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm10 = ymm7[0,1,2,3],ymm8[4,5,6,7]
5294 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
5295 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
5296 ; AVX2-ONLY-NEXT: # ymm7 = mem[0,1,2,3,4,5],ymm7[6,7]
5297 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
5298 ; AVX2-ONLY-NEXT: # xmm6 = mem[0,1],xmm6[2,3]
5299 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
5300 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
5301 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
5302 ; AVX2-ONLY-NEXT: # ymm7 = mem[0,1,2,3,4,5],ymm7[6,7]
5303 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
5304 ; AVX2-ONLY-NEXT: # xmm5 = mem[0,1],xmm5[2,3]
5305 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm13 = ymm5[0,1,2,3],ymm7[4,5,6,7]
5306 ; AVX2-ONLY-NEXT: vmovdqu (%rsp), %ymm5 # 32-byte Reload
5307 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
5308 ; AVX2-ONLY-NEXT: # ymm5 = mem[0,1,2,3,4,5],ymm5[6,7]
5309 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
5310 ; AVX2-ONLY-NEXT: # xmm4 = mem[0,1],xmm4[2,3]
5311 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
5312 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
5313 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
5314 ; AVX2-ONLY-NEXT: # ymm5 = mem[0,1,2,3,4,5],ymm5[6,7]
5315 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
5316 ; AVX2-ONLY-NEXT: # xmm3 = mem[0,1],xmm3[2,3]
5317 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm5[4,5,6,7]
5318 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
5319 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
5320 ; AVX2-ONLY-NEXT: # ymm3 = mem[0,1,2,3,4,5],ymm3[6,7]
5321 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
5322 ; AVX2-ONLY-NEXT: # xmm2 = mem[0,1],xmm2[2,3]
5323 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1,2,3],ymm3[4,5,6,7]
5324 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
5325 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
5326 ; AVX2-ONLY-NEXT: # ymm2 = mem[0,1,2,3,4,5],ymm2[6,7]
5327 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
5328 ; AVX2-ONLY-NEXT: # xmm1 = mem[0,1],xmm1[2,3]
5329 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
5330 ; AVX2-ONLY-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5331 ; AVX2-ONLY-NEXT: vpblendd $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
5332 ; AVX2-ONLY-NEXT: # ymm1 = mem[0,1,2,3,4,5],ymm1[6,7]
5333 ; AVX2-ONLY-NEXT: vpblendd $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
5334 ; AVX2-ONLY-NEXT: # xmm0 = mem[0,1],xmm0[2,3]
5335 ; AVX2-ONLY-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
5336 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5337 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rsi)
5338 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5339 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rsi)
5340 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5341 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rsi)
5342 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5343 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rsi)
5344 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5345 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rsi)
5346 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5347 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rsi)
5348 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5349 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rsi)
5350 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5351 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rsi)
5352 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5353 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rsi)
5354 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5355 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rsi)
5356 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5357 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rsi)
5358 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5359 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rsi)
5360 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5361 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rsi)
5362 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5363 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rsi)
5364 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5365 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rsi)
5366 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5367 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rsi)
5368 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5369 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rdx)
5370 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5371 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rdx)
5372 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5373 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rdx)
5374 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5375 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rdx)
5376 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5377 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rdx)
5378 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5379 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rdx)
5380 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5381 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rdx)
5382 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5383 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rdx)
5384 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5385 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rdx)
5386 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5387 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rdx)
5388 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5389 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rdx)
5390 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5391 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rdx)
5392 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5393 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rdx)
5394 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5395 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rdx)
5396 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5397 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rdx)
5398 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5399 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rdx)
5400 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5401 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%rcx)
5402 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5403 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%rcx)
5404 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5405 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%rcx)
5406 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5407 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%rcx)
5408 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5409 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%rcx)
5410 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5411 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%rcx)
5412 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5413 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%rcx)
5414 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5415 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%rcx)
5416 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5417 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%rcx)
5418 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5419 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%rcx)
5420 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5421 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%rcx)
5422 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5423 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%rcx)
5424 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5425 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%rcx)
5426 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5427 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%rcx)
5428 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5429 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%rcx)
5430 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5431 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%rcx)
5432 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5433 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 480(%r8)
5434 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5435 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 448(%r8)
5436 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5437 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 416(%r8)
5438 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5439 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 384(%r8)
5440 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5441 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 352(%r8)
5442 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5443 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 320(%r8)
5444 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5445 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 288(%r8)
5446 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5447 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 256(%r8)
5448 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5449 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 224(%r8)
5450 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5451 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 192(%r8)
5452 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5453 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 160(%r8)
5454 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5455 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 128(%r8)
5456 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5457 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 96(%r8)
5458 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5459 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 64(%r8)
5460 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5461 ; AVX2-ONLY-NEXT: vmovaps %ymm1, 32(%r8)
5462 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
5463 ; AVX2-ONLY-NEXT: vmovaps %ymm1, (%r8)
5464 ; AVX2-ONLY-NEXT: vmovdqa %ymm0, 480(%r9)
5465 ; AVX2-ONLY-NEXT: vmovdqa %ymm2, 448(%r9)
5466 ; AVX2-ONLY-NEXT: vmovdqa %ymm5, 416(%r9)
5467 ; AVX2-ONLY-NEXT: vmovdqa %ymm7, 384(%r9)
5468 ; AVX2-ONLY-NEXT: vmovdqa %ymm4, 352(%r9)
5469 ; AVX2-ONLY-NEXT: vmovdqa %ymm13, 320(%r9)
5470 ; AVX2-ONLY-NEXT: vmovdqa %ymm6, 288(%r9)
5471 ; AVX2-ONLY-NEXT: vmovdqa %ymm10, 256(%r9)
5472 ; AVX2-ONLY-NEXT: vmovdqa %ymm14, 224(%r9)
5473 ; AVX2-ONLY-NEXT: vmovdqa %ymm9, 192(%r9)
5474 ; AVX2-ONLY-NEXT: vmovdqa %ymm11, 160(%r9)
5475 ; AVX2-ONLY-NEXT: vmovdqa %ymm12, 128(%r9)
5476 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5477 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 96(%r9)
5478 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5479 ; AVX2-ONLY-NEXT: vmovaps %ymm0, 64(%r9)
5480 ; AVX2-ONLY-NEXT: vmovaps %ymm15, 32(%r9)
5481 ; AVX2-ONLY-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
5482 ; AVX2-ONLY-NEXT: vmovaps %ymm0, (%r9)
5483 ; AVX2-ONLY-NEXT: addq $3240, %rsp # imm = 0xCA8
5484 ; AVX2-ONLY-NEXT: vzeroupper
5485 ; AVX2-ONLY-NEXT: retq
5487 ; AVX512F-LABEL: load_i64_stride5_vf64:
5489 ; AVX512F-NEXT: subq $3400, %rsp # imm = 0xD48
5490 ; AVX512F-NEXT: vmovdqa64 1728(%rdi), %zmm21
5491 ; AVX512F-NEXT: vmovdqa64 1792(%rdi), %zmm4
5492 ; AVX512F-NEXT: vmovdqa64 1408(%rdi), %zmm19
5493 ; AVX512F-NEXT: vmovdqa64 1088(%rdi), %zmm0
5494 ; AVX512F-NEXT: vmovdqa64 1152(%rdi), %zmm3
5495 ; AVX512F-NEXT: vmovdqa64 768(%rdi), %zmm26
5496 ; AVX512F-NEXT: vmovdqa64 832(%rdi), %zmm5
5497 ; AVX512F-NEXT: vmovdqa64 448(%rdi), %zmm1
5498 ; AVX512F-NEXT: vmovdqa64 512(%rdi), %zmm6
5499 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm2
5500 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm7
5501 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [12,1,6,0,12,1,6,0]
5502 ; AVX512F-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
5503 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm16
5504 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm15, %zmm16
5505 ; AVX512F-NEXT: vmovdqa64 %zmm6, %zmm8
5506 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm15, %zmm8
5507 ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5508 ; AVX512F-NEXT: vmovdqa64 %zmm5, %zmm8
5509 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm15, %zmm8
5510 ; AVX512F-NEXT: vmovdqu64 %zmm8, (%rsp) # 64-byte Spill
5511 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm9
5512 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm8
5513 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm9
5514 ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5515 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm3
5516 ; AVX512F-NEXT: vpermt2q %zmm21, %zmm15, %zmm3
5517 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5518 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [5,10,15,0,5,10,15,0]
5519 ; AVX512F-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
5520 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3
5521 ; AVX512F-NEXT: vpermt2q %zmm7, %zmm9, %zmm3
5522 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5523 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3
5524 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm9, %zmm3
5525 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5526 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3
5527 ; AVX512F-NEXT: vpermt2q %zmm8, %zmm9, %zmm3
5528 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5529 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm3
5530 ; AVX512F-NEXT: vpermt2q %zmm5, %zmm9, %zmm3
5531 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5532 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm3
5533 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm9, %zmm3
5534 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5535 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [6,11,0,1,6,11,0,1]
5536 ; AVX512F-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
5537 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3
5538 ; AVX512F-NEXT: vpermt2q %zmm7, %zmm10, %zmm3
5539 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5540 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3
5541 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm10, %zmm3
5542 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5543 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm3
5544 ; AVX512F-NEXT: vpermt2q %zmm8, %zmm10, %zmm3
5545 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5546 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm3
5547 ; AVX512F-NEXT: vpermt2q %zmm5, %zmm10, %zmm3
5548 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5549 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm3
5550 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm10, %zmm3
5551 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5552 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [7,12,0,2,7,12,0,2]
5553 ; AVX512F-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
5554 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3
5555 ; AVX512F-NEXT: vpermt2q %zmm7, %zmm11, %zmm3
5556 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5557 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,0,11,0,5,0,11]
5558 ; AVX512F-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
5559 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm3, %zmm7
5560 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5561 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2
5562 ; AVX512F-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
5563 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5564 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm3, %zmm6
5565 ; AVX512F-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5566 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
5567 ; AVX512F-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
5568 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5569 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm3, %zmm8
5570 ; AVX512F-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5571 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm0
5572 ; AVX512F-NEXT: vpermt2q %zmm5, %zmm11, %zmm0
5573 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5574 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm3, %zmm5
5575 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5576 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm0
5577 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
5578 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5579 ; AVX512F-NEXT: vpermt2q %zmm21, %zmm3, %zmm4
5580 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5581 ; AVX512F-NEXT: vmovdqa64 1472(%rdi), %zmm1
5582 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm17
5583 ; AVX512F-NEXT: vpermt2q %zmm19, %zmm15, %zmm17
5584 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm0
5585 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm9, %zmm0
5586 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5587 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm0
5588 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm10, %zmm0
5589 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5590 ; AVX512F-NEXT: vmovdqa64 %zmm19, %zmm0
5591 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm11, %zmm0
5592 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5593 ; AVX512F-NEXT: vpermt2q %zmm19, %zmm3, %zmm1
5594 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5595 ; AVX512F-NEXT: vmovdqa64 2048(%rdi), %zmm0
5596 ; AVX512F-NEXT: vmovdqa64 2112(%rdi), %zmm4
5597 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm18
5598 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm15, %zmm18
5599 ; AVX512F-NEXT: vmovdqa64 2368(%rdi), %zmm1
5600 ; AVX512F-NEXT: vmovdqa64 2432(%rdi), %zmm5
5601 ; AVX512F-NEXT: vpermi2q %zmm1, %zmm5, %zmm15
5602 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2
5603 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm9, %zmm2
5604 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5605 ; AVX512F-NEXT: vpermi2q %zmm5, %zmm1, %zmm9
5606 ; AVX512F-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5607 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2
5608 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm10, %zmm2
5609 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5610 ; AVX512F-NEXT: vpermi2q %zmm5, %zmm1, %zmm10
5611 ; AVX512F-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5612 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2
5613 ; AVX512F-NEXT: vpermt2q %zmm4, %zmm11, %zmm2
5614 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5615 ; AVX512F-NEXT: vpermi2q %zmm5, %zmm1, %zmm11
5616 ; AVX512F-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5617 ; AVX512F-NEXT: vpermt2q %zmm1, %zmm3, %zmm5
5618 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5619 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm3, %zmm4
5620 ; AVX512F-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5621 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm2
5622 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm31
5623 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,10,15]
5624 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm19
5625 ; AVX512F-NEXT: vpermt2q %zmm31, %zmm1, %zmm19
5626 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = <1,6,11,u>
5627 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5628 ; AVX512F-NEXT: vpermt2q %zmm31, %zmm4, %zmm0
5629 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5630 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u>
5631 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5632 ; AVX512F-NEXT: vpermt2q %zmm31, %zmm8, %zmm0
5633 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5634 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = <11,0,5,u>
5635 ; AVX512F-NEXT: vmovdqa64 %zmm31, %zmm0
5636 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
5637 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5638 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} ymm25 = <12,1,6,u>
5639 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm25, %zmm31
5640 ; AVX512F-NEXT: vmovdqa64 384(%rdi), %zmm26
5641 ; AVX512F-NEXT: vmovdqa64 320(%rdi), %zmm2
5642 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm30
5643 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm1, %zmm30
5644 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5645 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm4, %zmm0
5646 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5647 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5648 ; AVX512F-NEXT: vpermt2q %zmm26, %zmm8, %zmm0
5649 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5650 ; AVX512F-NEXT: vmovdqa64 %zmm26, %zmm0
5651 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
5652 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5653 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm25, %zmm26
5654 ; AVX512F-NEXT: vmovdqa64 1024(%rdi), %zmm21
5655 ; AVX512F-NEXT: vmovdqa64 960(%rdi), %zmm2
5656 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3
5657 ; AVX512F-NEXT: vpermt2q %zmm21, %zmm1, %zmm3
5658 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5659 ; AVX512F-NEXT: vpermt2q %zmm21, %zmm4, %zmm0
5660 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5661 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5662 ; AVX512F-NEXT: vpermt2q %zmm21, %zmm8, %zmm0
5663 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5664 ; AVX512F-NEXT: vmovdqa64 %zmm21, %zmm0
5665 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
5666 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5667 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm25, %zmm21
5668 ; AVX512F-NEXT: vmovdqa64 704(%rdi), %zmm20
5669 ; AVX512F-NEXT: vmovdqa64 640(%rdi), %zmm2
5670 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm5
5671 ; AVX512F-NEXT: vpermt2q %zmm20, %zmm1, %zmm5
5672 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5673 ; AVX512F-NEXT: vpermt2q %zmm20, %zmm4, %zmm0
5674 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5675 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5676 ; AVX512F-NEXT: vpermt2q %zmm20, %zmm8, %zmm0
5677 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5678 ; AVX512F-NEXT: vmovdqa64 %zmm20, %zmm0
5679 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
5680 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5681 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm25, %zmm20
5682 ; AVX512F-NEXT: vmovdqa64 1664(%rdi), %zmm22
5683 ; AVX512F-NEXT: vmovdqa64 1600(%rdi), %zmm2
5684 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm11
5685 ; AVX512F-NEXT: vpermt2q %zmm22, %zmm1, %zmm11
5686 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5687 ; AVX512F-NEXT: vpermt2q %zmm22, %zmm4, %zmm0
5688 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5689 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0
5690 ; AVX512F-NEXT: vpermt2q %zmm22, %zmm8, %zmm0
5691 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5692 ; AVX512F-NEXT: vmovdqa64 %zmm22, %zmm0
5693 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
5694 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5695 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm25, %zmm22
5696 ; AVX512F-NEXT: vmovdqa64 1344(%rdi), %zmm12
5697 ; AVX512F-NEXT: vmovdqa64 1280(%rdi), %zmm2
5698 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm13
5699 ; AVX512F-NEXT: vpermt2q %zmm12, %zmm1, %zmm13
5700 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm24
5701 ; AVX512F-NEXT: vpermt2q %zmm12, %zmm4, %zmm24
5702 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm28
5703 ; AVX512F-NEXT: vpermt2q %zmm12, %zmm8, %zmm28
5704 ; AVX512F-NEXT: vmovdqa64 %zmm12, %zmm29
5705 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm10, %zmm29
5706 ; AVX512F-NEXT: vpermt2q %zmm2, %zmm25, %zmm12
5707 ; AVX512F-NEXT: vmovdqa64 1984(%rdi), %zmm9
5708 ; AVX512F-NEXT: vmovdqa64 1920(%rdi), %zmm14
5709 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm2
5710 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm1, %zmm2
5711 ; AVX512F-NEXT: vmovdqa64 2304(%rdi), %zmm6
5712 ; AVX512F-NEXT: vmovdqa64 2240(%rdi), %zmm0
5713 ; AVX512F-NEXT: vpermi2q %zmm6, %zmm0, %zmm1
5714 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm7
5715 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm4, %zmm7
5716 ; AVX512F-NEXT: vpermi2q %zmm6, %zmm0, %zmm4
5717 ; AVX512F-NEXT: vmovdqa64 %zmm14, %zmm23
5718 ; AVX512F-NEXT: vpermt2q %zmm9, %zmm8, %zmm23
5719 ; AVX512F-NEXT: vpermi2q %zmm6, %zmm0, %zmm8
5720 ; AVX512F-NEXT: vmovdqa64 %zmm9, %zmm27
5721 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm10, %zmm27
5722 ; AVX512F-NEXT: vpermi2q %zmm0, %zmm6, %zmm10
5723 ; AVX512F-NEXT: vpermt2q %zmm0, %zmm25, %zmm6
5724 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm25, %zmm9
5725 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm19[0,1,2,3],zmm16[4,5,6,7]
5726 ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm14 # 64-byte Folded Reload
5727 ; AVX512F-NEXT: # zmm14 = zmm30[0,1,2,3],mem[4,5,6,7]
5728 ; AVX512F-NEXT: vshufi64x2 $228, (%rsp), %zmm5, %zmm5 # 64-byte Folded Reload
5729 ; AVX512F-NEXT: # zmm5 = zmm5[0,1,2,3],mem[4,5,6,7]
5730 ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
5731 ; AVX512F-NEXT: # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7]
5732 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm17[4,5,6,7]
5733 ; AVX512F-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm19 # 64-byte Folded Reload
5734 ; AVX512F-NEXT: # zmm19 = zmm11[0,1,2,3],mem[4,5,6,7]
5735 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm18[4,5,6,7]
5736 ; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm15[4,5,6,7]
5737 ; AVX512F-NEXT: vmovdqa64 256(%rdi), %zmm16
5738 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm25 = [0,1,2,3,4,5,6,11]
5739 ; AVX512F-NEXT: vpermt2q %zmm16, %zmm25, %zmm0
5740 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5741 ; AVX512F-NEXT: vmovdqa64 576(%rdi), %zmm15
5742 ; AVX512F-NEXT: vpermt2q %zmm15, %zmm25, %zmm14
5743 ; AVX512F-NEXT: vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
5744 ; AVX512F-NEXT: vmovdqa64 896(%rdi), %zmm17
5745 ; AVX512F-NEXT: vpermt2q %zmm17, %zmm25, %zmm5
5746 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5747 ; AVX512F-NEXT: vmovdqa64 1216(%rdi), %zmm11
5748 ; AVX512F-NEXT: vpermt2q %zmm11, %zmm25, %zmm3
5749 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5750 ; AVX512F-NEXT: vmovdqa64 1536(%rdi), %zmm18
5751 ; AVX512F-NEXT: vpermt2q %zmm18, %zmm25, %zmm13
5752 ; AVX512F-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5753 ; AVX512F-NEXT: vmovdqa64 1856(%rdi), %zmm14
5754 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm25, %zmm19
5755 ; AVX512F-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5756 ; AVX512F-NEXT: vmovdqa64 2176(%rdi), %zmm19
5757 ; AVX512F-NEXT: vpermt2q %zmm19, %zmm25, %zmm2
5758 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5759 ; AVX512F-NEXT: vmovdqa64 2496(%rdi), %zmm13
5760 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm25, %zmm1
5761 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5762 ; AVX512F-NEXT: movb $7, %al
5763 ; AVX512F-NEXT: kmovw %eax, %k1
5764 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
5765 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5766 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm5 {%k1}
5767 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
5768 ; AVX512F-NEXT: vmovdqa64 %zmm7, %zmm3 {%k1}
5769 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
5770 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5771 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1}
5772 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
5773 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5774 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm7 {%k1}
5775 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5776 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5777 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1}
5778 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5779 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
5780 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1}
5781 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
5782 ; AVX512F-NEXT: vmovdqa64 %zmm24, %zmm2 {%k1}
5783 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
5784 ; AVX512F-NEXT: vmovdqa64 %zmm4, %zmm24 {%k1}
5785 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,12]
5786 ; AVX512F-NEXT: vpermt2q %zmm16, %zmm4, %zmm5
5787 ; AVX512F-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5788 ; AVX512F-NEXT: vpermt2q %zmm19, %zmm4, %zmm3
5789 ; AVX512F-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5790 ; AVX512F-NEXT: vpermt2q %zmm15, %zmm4, %zmm25
5791 ; AVX512F-NEXT: vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5792 ; AVX512F-NEXT: vpermt2q %zmm11, %zmm4, %zmm7
5793 ; AVX512F-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5794 ; AVX512F-NEXT: vpermt2q %zmm17, %zmm4, %zmm0
5795 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5796 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm4, %zmm1
5797 ; AVX512F-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5798 ; AVX512F-NEXT: vpermt2q %zmm18, %zmm4, %zmm2
5799 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5800 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm4, %zmm24
5801 ; AVX512F-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5802 ; AVX512F-NEXT: movb $56, %al
5803 ; AVX512F-NEXT: kmovw %eax, %k1
5804 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5805 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
5806 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1}
5807 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5808 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm23 {%k1}
5809 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5810 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5811 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1}
5812 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5813 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
5814 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm30 {%k1}
5815 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5816 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
5817 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm25 {%k1}
5818 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5819 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
5820 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm24 {%k1}
5821 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5822 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm28 {%k1}
5823 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5824 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm8 {%k1}
5825 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,13]
5826 ; AVX512F-NEXT: vpermt2q %zmm16, %zmm3, %zmm2
5827 ; AVX512F-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5828 ; AVX512F-NEXT: vpermt2q %zmm19, %zmm3, %zmm23
5829 ; AVX512F-NEXT: vpermt2q %zmm15, %zmm3, %zmm0
5830 ; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5831 ; AVX512F-NEXT: vpermt2q %zmm11, %zmm3, %zmm30
5832 ; AVX512F-NEXT: vpermt2q %zmm17, %zmm3, %zmm25
5833 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm3, %zmm24
5834 ; AVX512F-NEXT: vpermt2q %zmm18, %zmm3, %zmm28
5835 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm3, %zmm8
5836 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5837 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
5838 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm7 {%k1}
5839 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5840 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1}
5841 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5842 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
5843 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1}
5844 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5845 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
5846 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1}
5847 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5848 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
5849 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm4 {%k1}
5850 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5851 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
5852 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm5 {%k1}
5853 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5854 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm29 {%k1}
5855 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5856 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm10 {%k1}
5857 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,9,14]
5858 ; AVX512F-NEXT: vpermt2q %zmm16, %zmm1, %zmm7
5859 ; AVX512F-NEXT: vpermt2q %zmm19, %zmm1, %zmm27
5860 ; AVX512F-NEXT: vpermt2q %zmm15, %zmm1, %zmm0
5861 ; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm2
5862 ; AVX512F-NEXT: vpermt2q %zmm11, %zmm1, %zmm3
5863 ; AVX512F-NEXT: vmovdqa64 %zmm3, %zmm0
5864 ; AVX512F-NEXT: vpermt2q %zmm17, %zmm1, %zmm4
5865 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm1, %zmm5
5866 ; AVX512F-NEXT: vpermt2q %zmm18, %zmm1, %zmm29
5867 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm1, %zmm10
5868 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5869 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm31 {%k1}
5870 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,10,15]
5871 ; AVX512F-NEXT: vpermt2q %zmm16, %zmm3, %zmm31
5872 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5873 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm26 {%k1}
5874 ; AVX512F-NEXT: vpermt2q %zmm15, %zmm3, %zmm26
5875 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5876 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm21 {%k1}
5877 ; AVX512F-NEXT: vpermt2q %zmm11, %zmm3, %zmm21
5878 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5879 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm20 {%k1}
5880 ; AVX512F-NEXT: vpermt2q %zmm17, %zmm3, %zmm20
5881 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5882 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm22 {%k1}
5883 ; AVX512F-NEXT: vpermt2q %zmm14, %zmm3, %zmm22
5884 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5885 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm12 {%k1}
5886 ; AVX512F-NEXT: vpermt2q %zmm18, %zmm3, %zmm12
5887 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5888 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm6 {%k1}
5889 ; AVX512F-NEXT: vpermt2q %zmm13, %zmm3, %zmm6
5890 ; AVX512F-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5891 ; AVX512F-NEXT: vmovdqa64 %zmm1, %zmm9 {%k1}
5892 ; AVX512F-NEXT: vpermt2q %zmm19, %zmm3, %zmm9
5893 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5894 ; AVX512F-NEXT: vmovaps %zmm1, 448(%rsi)
5895 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5896 ; AVX512F-NEXT: vmovaps %zmm1, 384(%rsi)
5897 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5898 ; AVX512F-NEXT: vmovaps %zmm1, 320(%rsi)
5899 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5900 ; AVX512F-NEXT: vmovaps %zmm1, 256(%rsi)
5901 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5902 ; AVX512F-NEXT: vmovaps %zmm1, 192(%rsi)
5903 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5904 ; AVX512F-NEXT: vmovaps %zmm1, 128(%rsi)
5905 ; AVX512F-NEXT: vmovups (%rsp), %zmm1 # 64-byte Reload
5906 ; AVX512F-NEXT: vmovaps %zmm1, 64(%rsi)
5907 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5908 ; AVX512F-NEXT: vmovaps %zmm1, (%rsi)
5909 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5910 ; AVX512F-NEXT: vmovaps %zmm1, 448(%rdx)
5911 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5912 ; AVX512F-NEXT: vmovaps %zmm1, 256(%rdx)
5913 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5914 ; AVX512F-NEXT: vmovaps %zmm1, 320(%rdx)
5915 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5916 ; AVX512F-NEXT: vmovaps %zmm1, 128(%rdx)
5917 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5918 ; AVX512F-NEXT: vmovaps %zmm1, 192(%rdx)
5919 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5920 ; AVX512F-NEXT: vmovaps %zmm1, (%rdx)
5921 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5922 ; AVX512F-NEXT: vmovaps %zmm1, 64(%rdx)
5923 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5924 ; AVX512F-NEXT: vmovaps %zmm1, 384(%rdx)
5925 ; AVX512F-NEXT: vmovdqa64 %zmm8, 448(%rcx)
5926 ; AVX512F-NEXT: vmovdqa64 %zmm28, 256(%rcx)
5927 ; AVX512F-NEXT: vmovdqa64 %zmm24, 320(%rcx)
5928 ; AVX512F-NEXT: vmovdqa64 %zmm25, 128(%rcx)
5929 ; AVX512F-NEXT: vmovdqa64 %zmm30, 192(%rcx)
5930 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5931 ; AVX512F-NEXT: vmovaps %zmm1, (%rcx)
5932 ; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
5933 ; AVX512F-NEXT: vmovaps %zmm1, 64(%rcx)
5934 ; AVX512F-NEXT: vmovdqa64 %zmm23, 384(%rcx)
5935 ; AVX512F-NEXT: vmovdqa64 %zmm10, 448(%r8)
5936 ; AVX512F-NEXT: vmovdqa64 %zmm29, 256(%r8)
5937 ; AVX512F-NEXT: vmovdqa64 %zmm5, 320(%r8)
5938 ; AVX512F-NEXT: vmovdqa64 %zmm4, 128(%r8)
5939 ; AVX512F-NEXT: vmovdqa64 %zmm0, 192(%r8)
5940 ; AVX512F-NEXT: vmovdqa64 %zmm7, (%r8)
5941 ; AVX512F-NEXT: vmovdqa64 %zmm2, 64(%r8)
5942 ; AVX512F-NEXT: vmovdqa64 %zmm27, 384(%r8)
5943 ; AVX512F-NEXT: vmovdqa64 %zmm9, 384(%r9)
5944 ; AVX512F-NEXT: vmovdqa64 %zmm6, 448(%r9)
5945 ; AVX512F-NEXT: vmovdqa64 %zmm12, 256(%r9)
5946 ; AVX512F-NEXT: vmovdqa64 %zmm22, 320(%r9)
5947 ; AVX512F-NEXT: vmovdqa64 %zmm20, 128(%r9)
5948 ; AVX512F-NEXT: vmovdqa64 %zmm21, 192(%r9)
5949 ; AVX512F-NEXT: vmovdqa64 %zmm31, (%r9)
5950 ; AVX512F-NEXT: vmovdqa64 %zmm26, 64(%r9)
5951 ; AVX512F-NEXT: addq $3400, %rsp # imm = 0xD48
5952 ; AVX512F-NEXT: vzeroupper
5953 ; AVX512F-NEXT: retq
5955 ; AVX512BW-LABEL: load_i64_stride5_vf64:
5956 ; AVX512BW: # %bb.0:
5957 ; AVX512BW-NEXT: subq $3400, %rsp # imm = 0xD48
5958 ; AVX512BW-NEXT: vmovdqa64 1728(%rdi), %zmm21
5959 ; AVX512BW-NEXT: vmovdqa64 1792(%rdi), %zmm4
5960 ; AVX512BW-NEXT: vmovdqa64 1408(%rdi), %zmm19
5961 ; AVX512BW-NEXT: vmovdqa64 1088(%rdi), %zmm0
5962 ; AVX512BW-NEXT: vmovdqa64 1152(%rdi), %zmm3
5963 ; AVX512BW-NEXT: vmovdqa64 768(%rdi), %zmm26
5964 ; AVX512BW-NEXT: vmovdqa64 832(%rdi), %zmm5
5965 ; AVX512BW-NEXT: vmovdqa64 448(%rdi), %zmm1
5966 ; AVX512BW-NEXT: vmovdqa64 512(%rdi), %zmm6
5967 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2
5968 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm7
5969 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm15 = [12,1,6,0,12,1,6,0]
5970 ; AVX512BW-NEXT: # zmm15 = mem[0,1,2,3,0,1,2,3]
5971 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm16
5972 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm15, %zmm16
5973 ; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm8
5974 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm15, %zmm8
5975 ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5976 ; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm8
5977 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm15, %zmm8
5978 ; AVX512BW-NEXT: vmovdqu64 %zmm8, (%rsp) # 64-byte Spill
5979 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm9
5980 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm8
5981 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm9
5982 ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5983 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm3
5984 ; AVX512BW-NEXT: vpermt2q %zmm21, %zmm15, %zmm3
5985 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5986 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [5,10,15,0,5,10,15,0]
5987 ; AVX512BW-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
5988 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3
5989 ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm9, %zmm3
5990 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5991 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3
5992 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm9, %zmm3
5993 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5994 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3
5995 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm9, %zmm3
5996 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
5997 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm3
5998 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm9, %zmm3
5999 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6000 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm3
6001 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm9, %zmm3
6002 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6003 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [6,11,0,1,6,11,0,1]
6004 ; AVX512BW-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
6005 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3
6006 ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm10, %zmm3
6007 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6008 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3
6009 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm10, %zmm3
6010 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6011 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm3
6012 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm10, %zmm3
6013 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6014 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm3
6015 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm10, %zmm3
6016 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6017 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm3
6018 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm10, %zmm3
6019 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6020 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [7,12,0,2,7,12,0,2]
6021 ; AVX512BW-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
6022 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3
6023 ; AVX512BW-NEXT: vpermt2q %zmm7, %zmm11, %zmm3
6024 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6025 ; AVX512BW-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,5,0,11,0,5,0,11]
6026 ; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
6027 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm3, %zmm7
6028 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6029 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2
6030 ; AVX512BW-NEXT: vpermt2q %zmm6, %zmm11, %zmm2
6031 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6032 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm3, %zmm6
6033 ; AVX512BW-NEXT: vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6034 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
6035 ; AVX512BW-NEXT: vpermt2q %zmm8, %zmm11, %zmm1
6036 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6037 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm3, %zmm8
6038 ; AVX512BW-NEXT: vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6039 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm0
6040 ; AVX512BW-NEXT: vpermt2q %zmm5, %zmm11, %zmm0
6041 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6042 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm3, %zmm5
6043 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6044 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm0
6045 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm11, %zmm0
6046 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6047 ; AVX512BW-NEXT: vpermt2q %zmm21, %zmm3, %zmm4
6048 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6049 ; AVX512BW-NEXT: vmovdqa64 1472(%rdi), %zmm1
6050 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm17
6051 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm15, %zmm17
6052 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm0
6053 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm9, %zmm0
6054 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6055 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm0
6056 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm10, %zmm0
6057 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6058 ; AVX512BW-NEXT: vmovdqa64 %zmm19, %zmm0
6059 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm11, %zmm0
6060 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6061 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm3, %zmm1
6062 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6063 ; AVX512BW-NEXT: vmovdqa64 2048(%rdi), %zmm0
6064 ; AVX512BW-NEXT: vmovdqa64 2112(%rdi), %zmm4
6065 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm18
6066 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm15, %zmm18
6067 ; AVX512BW-NEXT: vmovdqa64 2368(%rdi), %zmm1
6068 ; AVX512BW-NEXT: vmovdqa64 2432(%rdi), %zmm5
6069 ; AVX512BW-NEXT: vpermi2q %zmm1, %zmm5, %zmm15
6070 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2
6071 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm9, %zmm2
6072 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6073 ; AVX512BW-NEXT: vpermi2q %zmm5, %zmm1, %zmm9
6074 ; AVX512BW-NEXT: vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6075 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2
6076 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm10, %zmm2
6077 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6078 ; AVX512BW-NEXT: vpermi2q %zmm5, %zmm1, %zmm10
6079 ; AVX512BW-NEXT: vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6080 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2
6081 ; AVX512BW-NEXT: vpermt2q %zmm4, %zmm11, %zmm2
6082 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6083 ; AVX512BW-NEXT: vpermi2q %zmm5, %zmm1, %zmm11
6084 ; AVX512BW-NEXT: vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6085 ; AVX512BW-NEXT: vpermt2q %zmm1, %zmm3, %zmm5
6086 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6087 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm3, %zmm4
6088 ; AVX512BW-NEXT: vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6089 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm2
6090 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm31
6091 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,10,15]
6092 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm19
6093 ; AVX512BW-NEXT: vpermt2q %zmm31, %zmm1, %zmm19
6094 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm4 = <1,6,11,u>
6095 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6096 ; AVX512BW-NEXT: vpermt2q %zmm31, %zmm4, %zmm0
6097 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6098 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm8 = <2,7,12,u>
6099 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6100 ; AVX512BW-NEXT: vpermt2q %zmm31, %zmm8, %zmm0
6101 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6102 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm10 = <11,0,5,u>
6103 ; AVX512BW-NEXT: vmovdqa64 %zmm31, %zmm0
6104 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
6105 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6106 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} ymm25 = <12,1,6,u>
6107 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm25, %zmm31
6108 ; AVX512BW-NEXT: vmovdqa64 384(%rdi), %zmm26
6109 ; AVX512BW-NEXT: vmovdqa64 320(%rdi), %zmm2
6110 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm30
6111 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm1, %zmm30
6112 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6113 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm4, %zmm0
6114 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6115 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6116 ; AVX512BW-NEXT: vpermt2q %zmm26, %zmm8, %zmm0
6117 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6118 ; AVX512BW-NEXT: vmovdqa64 %zmm26, %zmm0
6119 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
6120 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6121 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm25, %zmm26
6122 ; AVX512BW-NEXT: vmovdqa64 1024(%rdi), %zmm21
6123 ; AVX512BW-NEXT: vmovdqa64 960(%rdi), %zmm2
6124 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm3
6125 ; AVX512BW-NEXT: vpermt2q %zmm21, %zmm1, %zmm3
6126 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6127 ; AVX512BW-NEXT: vpermt2q %zmm21, %zmm4, %zmm0
6128 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6129 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6130 ; AVX512BW-NEXT: vpermt2q %zmm21, %zmm8, %zmm0
6131 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6132 ; AVX512BW-NEXT: vmovdqa64 %zmm21, %zmm0
6133 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
6134 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6135 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm25, %zmm21
6136 ; AVX512BW-NEXT: vmovdqa64 704(%rdi), %zmm20
6137 ; AVX512BW-NEXT: vmovdqa64 640(%rdi), %zmm2
6138 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm5
6139 ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm1, %zmm5
6140 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6141 ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm4, %zmm0
6142 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6143 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6144 ; AVX512BW-NEXT: vpermt2q %zmm20, %zmm8, %zmm0
6145 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6146 ; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm0
6147 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
6148 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6149 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm25, %zmm20
6150 ; AVX512BW-NEXT: vmovdqa64 1664(%rdi), %zmm22
6151 ; AVX512BW-NEXT: vmovdqa64 1600(%rdi), %zmm2
6152 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm11
6153 ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm1, %zmm11
6154 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6155 ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm4, %zmm0
6156 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6157 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
6158 ; AVX512BW-NEXT: vpermt2q %zmm22, %zmm8, %zmm0
6159 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6160 ; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm0
6161 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm0
6162 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6163 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm25, %zmm22
6164 ; AVX512BW-NEXT: vmovdqa64 1344(%rdi), %zmm12
6165 ; AVX512BW-NEXT: vmovdqa64 1280(%rdi), %zmm2
6166 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm13
6167 ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm1, %zmm13
6168 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm24
6169 ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm4, %zmm24
6170 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm28
6171 ; AVX512BW-NEXT: vpermt2q %zmm12, %zmm8, %zmm28
6172 ; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm29
6173 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm10, %zmm29
6174 ; AVX512BW-NEXT: vpermt2q %zmm2, %zmm25, %zmm12
6175 ; AVX512BW-NEXT: vmovdqa64 1984(%rdi), %zmm9
6176 ; AVX512BW-NEXT: vmovdqa64 1920(%rdi), %zmm14
6177 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm2
6178 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm1, %zmm2
6179 ; AVX512BW-NEXT: vmovdqa64 2304(%rdi), %zmm6
6180 ; AVX512BW-NEXT: vmovdqa64 2240(%rdi), %zmm0
6181 ; AVX512BW-NEXT: vpermi2q %zmm6, %zmm0, %zmm1
6182 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm7
6183 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm4, %zmm7
6184 ; AVX512BW-NEXT: vpermi2q %zmm6, %zmm0, %zmm4
6185 ; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm23
6186 ; AVX512BW-NEXT: vpermt2q %zmm9, %zmm8, %zmm23
6187 ; AVX512BW-NEXT: vpermi2q %zmm6, %zmm0, %zmm8
6188 ; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm27
6189 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm10, %zmm27
6190 ; AVX512BW-NEXT: vpermi2q %zmm0, %zmm6, %zmm10
6191 ; AVX512BW-NEXT: vpermt2q %zmm0, %zmm25, %zmm6
6192 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm25, %zmm9
6193 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm19[0,1,2,3],zmm16[4,5,6,7]
6194 ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm14 # 64-byte Folded Reload
6195 ; AVX512BW-NEXT: # zmm14 = zmm30[0,1,2,3],mem[4,5,6,7]
6196 ; AVX512BW-NEXT: vshufi64x2 $228, (%rsp), %zmm5, %zmm5 # 64-byte Folded Reload
6197 ; AVX512BW-NEXT: # zmm5 = zmm5[0,1,2,3],mem[4,5,6,7]
6198 ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
6199 ; AVX512BW-NEXT: # zmm3 = zmm3[0,1,2,3],mem[4,5,6,7]
6200 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm17[4,5,6,7]
6201 ; AVX512BW-NEXT: vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm19 # 64-byte Folded Reload
6202 ; AVX512BW-NEXT: # zmm19 = zmm11[0,1,2,3],mem[4,5,6,7]
6203 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm18[4,5,6,7]
6204 ; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm15[4,5,6,7]
6205 ; AVX512BW-NEXT: vmovdqa64 256(%rdi), %zmm16
6206 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm25 = [0,1,2,3,4,5,6,11]
6207 ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm25, %zmm0
6208 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6209 ; AVX512BW-NEXT: vmovdqa64 576(%rdi), %zmm15
6210 ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm25, %zmm14
6211 ; AVX512BW-NEXT: vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
6212 ; AVX512BW-NEXT: vmovdqa64 896(%rdi), %zmm17
6213 ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm25, %zmm5
6214 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6215 ; AVX512BW-NEXT: vmovdqa64 1216(%rdi), %zmm11
6216 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm25, %zmm3
6217 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6218 ; AVX512BW-NEXT: vmovdqa64 1536(%rdi), %zmm18
6219 ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm25, %zmm13
6220 ; AVX512BW-NEXT: vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6221 ; AVX512BW-NEXT: vmovdqa64 1856(%rdi), %zmm14
6222 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm25, %zmm19
6223 ; AVX512BW-NEXT: vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6224 ; AVX512BW-NEXT: vmovdqa64 2176(%rdi), %zmm19
6225 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm25, %zmm2
6226 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6227 ; AVX512BW-NEXT: vmovdqa64 2496(%rdi), %zmm13
6228 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm25, %zmm1
6229 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6230 ; AVX512BW-NEXT: movb $7, %al
6231 ; AVX512BW-NEXT: kmovd %eax, %k1
6232 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
6233 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6234 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5 {%k1}
6235 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
6236 ; AVX512BW-NEXT: vmovdqa64 %zmm7, %zmm3 {%k1}
6237 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
6238 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6239 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm25 {%k1}
6240 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
6241 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6242 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm7 {%k1}
6243 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6244 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6245 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1}
6246 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6247 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
6248 ; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1}
6249 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
6250 ; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm2 {%k1}
6251 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
6252 ; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm24 {%k1}
6253 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,6,12]
6254 ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm4, %zmm5
6255 ; AVX512BW-NEXT: vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6256 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm4, %zmm3
6257 ; AVX512BW-NEXT: vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6258 ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm4, %zmm25
6259 ; AVX512BW-NEXT: vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6260 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm4, %zmm7
6261 ; AVX512BW-NEXT: vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6262 ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm4, %zmm0
6263 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6264 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm4, %zmm1
6265 ; AVX512BW-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6266 ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm4, %zmm2
6267 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6268 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm4, %zmm24
6269 ; AVX512BW-NEXT: vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6270 ; AVX512BW-NEXT: movb $56, %al
6271 ; AVX512BW-NEXT: kmovd %eax, %k1
6272 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6273 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
6274 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1}
6275 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6276 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm23 {%k1}
6277 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6278 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6279 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1}
6280 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6281 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
6282 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm30 {%k1}
6283 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6284 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
6285 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm25 {%k1}
6286 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6287 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
6288 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm24 {%k1}
6289 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6290 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm28 {%k1}
6291 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6292 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm8 {%k1}
6293 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,13]
6294 ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm3, %zmm2
6295 ; AVX512BW-NEXT: vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6296 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm3, %zmm23
6297 ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm3, %zmm0
6298 ; AVX512BW-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
6299 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm3, %zmm30
6300 ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm3, %zmm25
6301 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm3, %zmm24
6302 ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm3, %zmm28
6303 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm3, %zmm8
6304 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6305 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
6306 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm7 {%k1}
6307 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6308 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm27 {%k1}
6309 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6310 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
6311 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1}
6312 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6313 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
6314 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1}
6315 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6316 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
6317 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm4 {%k1}
6318 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6319 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
6320 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm5 {%k1}
6321 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6322 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm29 {%k1}
6323 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6324 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm10 {%k1}
6325 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,3,4,5,9,14]
6326 ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm1, %zmm7
6327 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm1, %zmm27
6328 ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm1, %zmm0
6329 ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm2
6330 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm1, %zmm3
6331 ; AVX512BW-NEXT: vmovdqa64 %zmm3, %zmm0
6332 ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm1, %zmm4
6333 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm1, %zmm5
6334 ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm1, %zmm29
6335 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm1, %zmm10
6336 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6337 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm31 {%k1}
6338 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,10,15]
6339 ; AVX512BW-NEXT: vpermt2q %zmm16, %zmm3, %zmm31
6340 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6341 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm26 {%k1}
6342 ; AVX512BW-NEXT: vpermt2q %zmm15, %zmm3, %zmm26
6343 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6344 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm21 {%k1}
6345 ; AVX512BW-NEXT: vpermt2q %zmm11, %zmm3, %zmm21
6346 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6347 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm20 {%k1}
6348 ; AVX512BW-NEXT: vpermt2q %zmm17, %zmm3, %zmm20
6349 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6350 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm22 {%k1}
6351 ; AVX512BW-NEXT: vpermt2q %zmm14, %zmm3, %zmm22
6352 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6353 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm12 {%k1}
6354 ; AVX512BW-NEXT: vpermt2q %zmm18, %zmm3, %zmm12
6355 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6356 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm6 {%k1}
6357 ; AVX512BW-NEXT: vpermt2q %zmm13, %zmm3, %zmm6
6358 ; AVX512BW-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6359 ; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm9 {%k1}
6360 ; AVX512BW-NEXT: vpermt2q %zmm19, %zmm3, %zmm9
6361 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6362 ; AVX512BW-NEXT: vmovaps %zmm1, 448(%rsi)
6363 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6364 ; AVX512BW-NEXT: vmovaps %zmm1, 384(%rsi)
6365 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6366 ; AVX512BW-NEXT: vmovaps %zmm1, 320(%rsi)
6367 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6368 ; AVX512BW-NEXT: vmovaps %zmm1, 256(%rsi)
6369 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6370 ; AVX512BW-NEXT: vmovaps %zmm1, 192(%rsi)
6371 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6372 ; AVX512BW-NEXT: vmovaps %zmm1, 128(%rsi)
6373 ; AVX512BW-NEXT: vmovups (%rsp), %zmm1 # 64-byte Reload
6374 ; AVX512BW-NEXT: vmovaps %zmm1, 64(%rsi)
6375 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6376 ; AVX512BW-NEXT: vmovaps %zmm1, (%rsi)
6377 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6378 ; AVX512BW-NEXT: vmovaps %zmm1, 448(%rdx)
6379 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6380 ; AVX512BW-NEXT: vmovaps %zmm1, 256(%rdx)
6381 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6382 ; AVX512BW-NEXT: vmovaps %zmm1, 320(%rdx)
6383 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6384 ; AVX512BW-NEXT: vmovaps %zmm1, 128(%rdx)
6385 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6386 ; AVX512BW-NEXT: vmovaps %zmm1, 192(%rdx)
6387 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6388 ; AVX512BW-NEXT: vmovaps %zmm1, (%rdx)
6389 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6390 ; AVX512BW-NEXT: vmovaps %zmm1, 64(%rdx)
6391 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6392 ; AVX512BW-NEXT: vmovaps %zmm1, 384(%rdx)
6393 ; AVX512BW-NEXT: vmovdqa64 %zmm8, 448(%rcx)
6394 ; AVX512BW-NEXT: vmovdqa64 %zmm28, 256(%rcx)
6395 ; AVX512BW-NEXT: vmovdqa64 %zmm24, 320(%rcx)
6396 ; AVX512BW-NEXT: vmovdqa64 %zmm25, 128(%rcx)
6397 ; AVX512BW-NEXT: vmovdqa64 %zmm30, 192(%rcx)
6398 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6399 ; AVX512BW-NEXT: vmovaps %zmm1, (%rcx)
6400 ; AVX512BW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
6401 ; AVX512BW-NEXT: vmovaps %zmm1, 64(%rcx)
6402 ; AVX512BW-NEXT: vmovdqa64 %zmm23, 384(%rcx)
6403 ; AVX512BW-NEXT: vmovdqa64 %zmm10, 448(%r8)
6404 ; AVX512BW-NEXT: vmovdqa64 %zmm29, 256(%r8)
6405 ; AVX512BW-NEXT: vmovdqa64 %zmm5, 320(%r8)
6406 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 128(%r8)
6407 ; AVX512BW-NEXT: vmovdqa64 %zmm0, 192(%r8)
6408 ; AVX512BW-NEXT: vmovdqa64 %zmm7, (%r8)
6409 ; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%r8)
6410 ; AVX512BW-NEXT: vmovdqa64 %zmm27, 384(%r8)
6411 ; AVX512BW-NEXT: vmovdqa64 %zmm9, 384(%r9)
6412 ; AVX512BW-NEXT: vmovdqa64 %zmm6, 448(%r9)
6413 ; AVX512BW-NEXT: vmovdqa64 %zmm12, 256(%r9)
6414 ; AVX512BW-NEXT: vmovdqa64 %zmm22, 320(%r9)
6415 ; AVX512BW-NEXT: vmovdqa64 %zmm20, 128(%r9)
6416 ; AVX512BW-NEXT: vmovdqa64 %zmm21, 192(%r9)
6417 ; AVX512BW-NEXT: vmovdqa64 %zmm31, (%r9)
6418 ; AVX512BW-NEXT: vmovdqa64 %zmm26, 64(%r9)
6419 ; AVX512BW-NEXT: addq $3400, %rsp # imm = 0xD48
6420 ; AVX512BW-NEXT: vzeroupper
6421 ; AVX512BW-NEXT: retq
6422 %wide.vec = load <320 x i64>, ptr %in.vec, align 64
6423 %strided.vec0 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155, i32 160, i32 165, i32 170, i32 175, i32 180, i32 185, i32 190, i32 195, i32 200, i32 205, i32 210, i32 215, i32 220, i32 225, i32 230, i32 235, i32 240, i32 245, i32 250, i32 255, i32 260, i32 265, i32 270, i32 275, i32 280, i32 285, i32 290, i32 295, i32 300, i32 305, i32 310, i32 315>
6424 %strided.vec1 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156, i32 161, i32 166, i32 171, i32 176, i32 181, i32 186, i32 191, i32 196, i32 201, i32 206, i32 211, i32 216, i32 221, i32 226, i32 231, i32 236, i32 241, i32 246, i32 251, i32 256, i32 261, i32 266, i32 271, i32 276, i32 281, i32 286, i32 291, i32 296, i32 301, i32 306, i32 311, i32 316>
6425 %strided.vec2 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77, i32 82, i32 87, i32 92, i32 97, i32 102, i32 107, i32 112, i32 117, i32 122, i32 127, i32 132, i32 137, i32 142, i32 147, i32 152, i32 157, i32 162, i32 167, i32 172, i32 177, i32 182, i32 187, i32 192, i32 197, i32 202, i32 207, i32 212, i32 217, i32 222, i32 227, i32 232, i32 237, i32 242, i32 247, i32 252, i32 257, i32 262, i32 267, i32 272, i32 277, i32 282, i32 287, i32 292, i32 297, i32 302, i32 307, i32 312, i32 317>
6426 %strided.vec3 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78, i32 83, i32 88, i32 93, i32 98, i32 103, i32 108, i32 113, i32 118, i32 123, i32 128, i32 133, i32 138, i32 143, i32 148, i32 153, i32 158, i32 163, i32 168, i32 173, i32 178, i32 183, i32 188, i32 193, i32 198, i32 203, i32 208, i32 213, i32 218, i32 223, i32 228, i32 233, i32 238, i32 243, i32 248, i32 253, i32 258, i32 263, i32 268, i32 273, i32 278, i32 283, i32 288, i32 293, i32 298, i32 303, i32 308, i32 313, i32 318>
6427 %strided.vec4 = shufflevector <320 x i64> %wide.vec, <320 x i64> poison, <64 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79, i32 84, i32 89, i32 94, i32 99, i32 104, i32 109, i32 114, i32 119, i32 124, i32 129, i32 134, i32 139, i32 144, i32 149, i32 154, i32 159, i32 164, i32 169, i32 174, i32 179, i32 184, i32 189, i32 194, i32 199, i32 204, i32 209, i32 214, i32 219, i32 224, i32 229, i32 234, i32 239, i32 244, i32 249, i32 254, i32 259, i32 264, i32 269, i32 274, i32 279, i32 284, i32 289, i32 294, i32 299, i32 304, i32 309, i32 314, i32 319>
6428 store <64 x i64> %strided.vec0, ptr %out.vec0, align 64
6429 store <64 x i64> %strided.vec1, ptr %out.vec1, align 64
6430 store <64 x i64> %strided.vec2, ptr %out.vec2, align 64
6431 store <64 x i64> %strided.vec3, ptr %out.vec3, align 64
6432 store <64 x i64> %strided.vec4, ptr %out.vec4, align 64
6435 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
6440 ; AVX2-FAST-PERLANE: {{.*}}
6442 ; AVX512BW-FAST: {{.*}}
6443 ; AVX512BW-ONLY-FAST: {{.*}}
6444 ; AVX512BW-ONLY-SLOW: {{.*}}
6445 ; AVX512BW-SLOW: {{.*}}
6446 ; AVX512DQ-FAST: {{.*}}
6447 ; AVX512DQ-SLOW: {{.*}}
6448 ; AVX512DQBW-FAST: {{.*}}
6449 ; AVX512DQBW-SLOW: {{.*}}
6450 ; AVX512F-FAST: {{.*}}
6451 ; AVX512F-ONLY-FAST: {{.*}}
6452 ; AVX512F-ONLY-SLOW: {{.*}}
6453 ; AVX512F-SLOW: {{.*}}
6456 ; FALLBACK10: {{.*}}
6457 ; FALLBACK11: {{.*}}
6458 ; FALLBACK12: {{.*}}